From 05ed1b544f2853ae0054cd22bd15e623abbb3aa9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 8 Aug 2022 15:41:16 +0200 Subject: [PATCH 001/421] Working base for reinforcement learning model --- docs/freqai.md | 4 +- freqtrade/constants.py | 5 +- freqtrade/freqai/data_drawer.py | 16 +- .../ReinforcementLearningExample.py | 147 +++++++++++ freqtrade/freqai/freqai_interface.py | 2 +- .../RL/RLPrediction_agent.py | 162 ++++++++++++ .../prediction_models/RL/RLPrediction_env.py | 230 ++++++++++++++++++ .../freqai/prediction_models/RL/config.py | 37 +++ .../ReinforcementLearningModel.py | 157 ++++++++++++ 9 files changed, 748 insertions(+), 12 deletions(-) create mode 100644 freqtrade/freqai/example_strats/ReinforcementLearningExample.py create mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py create mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_env.py create mode 100644 freqtrade/freqai/prediction_models/RL/config.py create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningModel.py diff --git a/docs/freqai.md b/docs/freqai.md index bba6faaea..032046882 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -123,8 +123,8 @@ Mandatory parameters are marked as **Required**, which means that they are requi | `learning_rate` | Boosting learning rate during regression.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. | | **Extraneous parameters** -| `keras` | If your model makes use of Keras (typical for Tensorflow-based prediction models), activate this flag so that the model save/loading follows Keras standards.
**Datatype:** Boolean. Default: `False`. -| `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction.
**Datatype:** Integer. Default: 2. +| `keras` | If your model makes use of keras (typical of Tensorflow based prediction models), activate this flag so that the model save/loading follows keras standards. Default value `false`
**Datatype:** boolean. +| `conv_width` | The width of a convolutional neural network input tensor or the `ReinforcementLearningModel` `window_size`. This replaces the need for `shift` by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. Default value, 2
**Datatype:** integer. ### Important dataframe key patterns diff --git a/freqtrade/constants.py b/freqtrade/constants.py index ddbc84fa9..4d1891165 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -520,10 +520,7 @@ CONF_SCHEMA = { }, }, "model_training_parameters": { - "type": "object", - "properties": { - "n_estimators": {"type": "integer", "default": 1000} - }, + "type": "object" }, }, "required": [ diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index b3060deff..5282b4f59 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -390,10 +390,13 @@ class FreqaiDataDrawer: save_path = Path(dk.data_path) # Save the trained model - if not dk.keras: + model_type = self.freqai_info.get('model_save_type', 'joblib') + if model_type == 'joblib': dump(model, save_path / f"{dk.model_filename}_model.joblib") - else: + elif model_type == 'keras': model.save(save_path / f"{dk.model_filename}_model.h5") + elif model_type == 'stable_baselines': + model.save(save_path / f"{dk.model_filename}_model.zip") if dk.svm_model is not None: dump(dk.svm_model, save_path / f"{dk.model_filename}_svm_model.joblib") @@ -459,15 +462,18 @@ class FreqaiDataDrawer: dk.data_path / f"{dk.model_filename}_trained_df.pkl" ) + model_type = self.freqai_info.get('model_save_type', 'joblib') # try to access model in memory instead of loading object from disk to save time if dk.live and coin in self.model_dictionary: model = self.model_dictionary[coin] - elif not dk.keras: + elif model_type == 'joblib': model = load(dk.data_path / f"{dk.model_filename}_model.joblib") - else: + elif model_type == 'keras': from tensorflow import keras - model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") + elif model_type == 'stable_baselines': + from stable_baselines3.ppo.ppo import PPO + model = PPO.load(dk.data_path / f"{dk.model_filename}_model.zip") if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file(): dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib") diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample.py new file mode 100644 index 000000000..1bafdbb80 --- /dev/null +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample.py @@ -0,0 +1,147 @@ +import logging +from functools import reduce + +import pandas as pd +import talib.abstract as ta +from pandas import DataFrame + +from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_informative_pair + + +logger = logging.getLogger(__name__) + + +class ReinforcementLearningExample(IStrategy): + """ + Test strategy - used for testing freqAI functionalities. + DO not use in production. + """ + + minimal_roi = {"0": 0.1, "240": -1} + + plot_config = { + "main_plot": {}, + "subplots": { + "prediction": {"prediction": {"color": "blue"}}, + "target_roi": { + "target_roi": {"color": "brown"}, + }, + "do_predict": { + "do_predict": {"color": "brown"}, + }, + }, + } + + process_only_new_candles = True + stoploss = -0.05 + use_exit_signal = True + startup_candle_count: int = 300 + can_short = False + + linear_roi_offset = DecimalParameter( + 0.00, 0.02, default=0.005, space="sell", optimize=False, load=True + ) + max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True) + + def informative_pairs(self): + whitelist_pairs = self.dp.current_whitelist() + corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] + informative_pairs = [] + for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: + for pair in whitelist_pairs: + informative_pairs.append((pair, tf)) + for pair in corr_pairs: + if pair in whitelist_pairs: + continue # avoid duplication + informative_pairs.append((pair, tf)) + return informative_pairs + + def populate_any_indicators( + self, pair, df, tf, informative=None, set_generalized_indicators=False + ): + + coin = pair.split('/')[0] + + with self.freqai.lock: + if informative is None: + informative = self.dp.get_pair_dataframe(pair, tf) + + # first loop is automatically duplicating indicators for time periods + for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: + + t = int(t) + informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + + informative[f"%-{coin}pct-change"] = informative["close"].pct_change() + informative[f"%-{coin}raw_volume"] = informative["volume"] + + # Raw price currently necessary for RL models: + informative[f"%-{coin}raw_price"] = informative["close"] + + indicators = [col for col in informative if col.startswith("%")] + # This loop duplicates and shifts all indicators to add a sense of recency to data + for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): + if n == 0: + continue + informative_shift = informative[indicators].shift(n) + informative_shift = informative_shift.add_suffix("_shift-" + str(n)) + informative = pd.concat((informative, informative_shift), axis=1) + + df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) + skip_columns = [ + (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] + ] + df = df.drop(columns=skip_columns) + + # Add generalized indicators here (because in live, it will call this + # function to populate indicators during training). Notice how we ensure not to + # add them multiple times + if set_generalized_indicators: + df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 + df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + + # user adds targets here by prepending them with &- (see convention below) + # If user wishes to use multiple targets, a multioutput prediction model + # needs to be used such as templates/CatboostPredictionMultiModel.py + df["&-action"] = 2 + + return df + + def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: + + self.freqai_info = self.config["freqai"] + + dataframe = self.freqai.start(dataframe, metadata, self) + + return dataframe + + def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + + enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + + if enter_long_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] + ] = (1, "long") + + enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + + if enter_short_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] + ] = (1, "short") + + return df + + def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + if exit_long_conditions: + df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 + + exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + if exit_short_conditions: + df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 + + return df diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 4106f24e0..b6fde9357 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -657,7 +657,7 @@ class IFreqaiModel(ABC): """ @abstractmethod - def fit(self, data_dictionary: Dict[str, Any]) -> Any: + def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: """ Most regressors use the same function names and arguments e.g. user can drop in LGBMRegressor in place of CatBoostRegressor and all data diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py new file mode 100644 index 000000000..acea025c0 --- /dev/null +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py @@ -0,0 +1,162 @@ +# common library + +import numpy as np +from stable_baselines3 import A2C +from stable_baselines3 import DDPG +from stable_baselines3 import PPO +from stable_baselines3 import SAC +from stable_baselines3 import TD3 +from stable_baselines3.common.callbacks import BaseCallback +from stable_baselines3.common.noise import NormalActionNoise +from stable_baselines3.common.noise import OrnsteinUhlenbeckActionNoise +# from stable_baselines3.common.vec_env import DummyVecEnv + +from freqtrade.freqai.prediction_models.RL import config +# from meta.env_stock_trading.env_stock_trading import StockTradingEnv + +# RL models from stable-baselines + + +MODELS = {"a2c": A2C, "ddpg": DDPG, "td3": TD3, "sac": SAC, "ppo": PPO} + + +MODEL_KWARGS = {x: config.__dict__[f"{x.upper()}_PARAMS"] for x in MODELS.keys()} + + +NOISE = { + "normal": NormalActionNoise, + "ornstein_uhlenbeck": OrnsteinUhlenbeckActionNoise, +} + + +class TensorboardCallback(BaseCallback): + """ + Custom callback for plotting additional values in tensorboard. + """ + + def __init__(self, verbose=0): + super(TensorboardCallback, self).__init__(verbose) + + def _on_step(self) -> bool: + try: + self.logger.record(key="train/reward", value=self.locals["rewards"][0]) + except BaseException: + self.logger.record(key="train/reward", value=self.locals["reward"][0]) + return True + + +class RLPrediction_agent: + """Provides implementations for DRL algorithms + Based on: + https://github.com/AI4Finance-Foundation/FinRL-Meta/blob/master/agents/stablebaselines3_models.py + Attributes + ---------- + env: gym environment class + user-defined class + + Methods + ------- + get_model() + setup DRL algorithms + train_model() + train DRL algorithms in a train dataset + and output the trained model + DRL_prediction() + make a prediction in a test dataset and get results + """ + + def __init__(self, env): + self.env = env + + def get_model( + self, + model_name, + policy="MlpPolicy", + policy_kwargs=None, + model_kwargs=None, + verbose=1, + seed=None, + ): + if model_name not in MODELS: + raise NotImplementedError("NotImplementedError") + + if model_kwargs is None: + model_kwargs = MODEL_KWARGS[model_name] + + if "action_noise" in model_kwargs: + n_actions = self.env.action_space.shape[-1] + model_kwargs["action_noise"] = NOISE[model_kwargs["action_noise"]]( + mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions) + ) + print(model_kwargs) + model = MODELS[model_name]( + policy=policy, + env=self.env, + tensorboard_log=f"{config.TENSORBOARD_LOG_DIR}/{model_name}", + verbose=verbose, + policy_kwargs=policy_kwargs, + seed=seed, + **model_kwargs, + ) + return model + + def train_model(self, model, tb_log_name, total_timesteps=5000): + model = model.learn( + total_timesteps=total_timesteps, + tb_log_name=tb_log_name, + callback=TensorboardCallback(), + ) + return model + + @staticmethod + def DRL_prediction(model, environment): + test_env, test_obs = environment.get_sb_env() + """make a prediction""" + account_memory = [] + actions_memory = [] + test_env.reset() + for i in range(len(environment.df.index.unique())): + action, _states = model.predict(test_obs) + # account_memory = test_env.env_method(method_name="save_asset_memory") + # actions_memory = test_env.env_method(method_name="save_action_memory") + test_obs, rewards, dones, info = test_env.step(action) + if i == (len(environment.df.index.unique()) - 2): + account_memory = test_env.env_method(method_name="save_asset_memory") + actions_memory = test_env.env_method(method_name="save_action_memory") + if dones[0]: + print("hit end!") + break + return account_memory[0], actions_memory[0] + + @staticmethod + def DRL_prediction_load_from_file(model_name, environment, cwd): + if model_name not in MODELS: + raise NotImplementedError("NotImplementedError") + try: + # load agent + model = MODELS[model_name].load(cwd) + print("Successfully load model", cwd) + except BaseException: + raise ValueError("Fail to load agent!") + + # test on the testing env + state = environment.reset() + episode_returns = list() # the cumulative_return / initial_account + episode_total_assets = list() + episode_total_assets.append(environment.initial_total_asset) + done = False + while not done: + action = model.predict(state)[0] + state, reward, done, _ = environment.step(action) + + total_asset = ( + environment.cash + + (environment.price_array[environment.time] * environment.stocks).sum() + ) + episode_total_assets.append(total_asset) + episode_return = total_asset / environment.initial_total_asset + episode_returns.append(episode_return) + + print("episode_return", episode_return) + print("Test Finished!") + return episode_total_assets diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py new file mode 100644 index 000000000..5fef7fbed --- /dev/null +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py @@ -0,0 +1,230 @@ +from enum import Enum + +import gym +import matplotlib.pyplot as plt +import numpy as np +from gym import spaces +from gym.utils import seeding + + +class Actions(Enum): + Hold = 0 + Buy = 1 + Sell = 2 + + +class Positions(Enum): + Short = 0 + Long = 1 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + + +class GymAnytrading(gym.Env): + """ + Based on https://github.com/AminHP/gym-anytrading + """ + + metadata = {'render.modes': ['human']} + + def __init__(self, signal_features, prices, window_size, fee=0.0): + assert signal_features.ndim == 2 + + self.seed() + self.signal_features = signal_features + self.prices = prices + self.window_size = window_size + self.fee = fee + self.shape = (window_size, self.signal_features.shape[1]) + + # spaces + self.action_space = spaces.Discrete(len(Actions)) + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick = self.window_size + self._end_tick = len(self.prices) - 1 + self._done = None + self._current_tick = None + self._last_trade_tick = None + self._position = None + self._position_history = None + self._total_reward = None + self._total_profit = None + self._first_rendering = None + self.history = None + + def seed(self, seed=None): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + self._done = False + self._current_tick = self._start_tick + self._last_trade_tick = self._current_tick - 1 + self._position = Positions.Short + self._position_history = (self.window_size * [None]) + [self._position] + self._total_reward = 0. + self._total_profit = 1. # unit + self._first_rendering = True + self.history = {} + return self._get_observation() + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + step_reward = self._calculate_reward(action) + self._total_reward += step_reward + + self._update_profit(action) + + trade = False + if ((action == Actions.Buy.value and self._position == Positions.Short) or + (action == Actions.Sell.value and self._position == Positions.Long)): + trade = True + + if trade: + self._position = self._position.opposite() + self._last_trade_tick = self._current_tick + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + total_reward=self._total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + def _get_observation(self): + return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + def render(self, mode='human'): + def _plot_position(position, tick): + color = None + if position == Positions.Short: + color = 'red' + elif position == Positions.Long: + color = 'green' + if color: + plt.scatter(tick, self.prices[tick], color=color) + + if self._first_rendering: + self._first_rendering = False + plt.cla() + plt.plot(self.prices) + start_position = self._position_history[self._start_tick] + _plot_position(start_position, self._start_tick) + + _plot_position(self._position, self._current_tick) + + plt.suptitle( + "Total Reward: %.6f" % self._total_reward + ' ~ ' + + "Total Profit: %.6f" % self._total_profit + ) + + plt.pause(0.01) + + def render_all(self, mode='human'): + window_ticks = np.arange(len(self._position_history)) + plt.plot(self.prices) + + short_ticks = [] + long_ticks = [] + for i, tick in enumerate(window_ticks): + if self._position_history[i] == Positions.Short: + short_ticks.append(tick) + elif self._position_history[i] == Positions.Long: + long_ticks.append(tick) + + plt.plot(short_ticks, self.prices[short_ticks], 'ro') + plt.plot(long_ticks, self.prices[long_ticks], 'go') + + plt.suptitle( + "Total Reward: %.6f" % self._total_reward + ' ~ ' + + "Total Profit: %.6f" % self._total_profit + ) + + def close(self): + plt.close() + + def save_rendering(self, filepath): + plt.savefig(filepath) + + def pause_rendering(self): + plt.show() + + def _calculate_reward(self, action): + step_reward = 0 + + trade = False + if ((action == Actions.Buy.value and self._position == Positions.Short) or + (action == Actions.Sell.value and self._position == Positions.Long)): + trade = True + + if trade: + current_price = self.prices[self._current_tick] + last_trade_price = self.prices[self._last_trade_tick] + price_diff = current_price - last_trade_price + + if self._position == Positions.Long: + step_reward += price_diff + + return step_reward + + def _update_profit(self, action): + trade = False + if ((action == Actions.Buy.value and self._position == Positions.Short) or + (action == Actions.Sell.value and self._position == Positions.Long)): + trade = True + + if trade or self._done: + current_price = self.prices[self._current_tick] + last_trade_price = self.prices[self._last_trade_tick] + + if self._position == Positions.Long: + shares = (self._total_profit * (1 - self.fee)) / last_trade_price + self._total_profit = (shares * (1 - self.fee)) * current_price + + def max_possible_profit(self): + current_tick = self._start_tick + last_trade_tick = current_tick - 1 + profit = 1. + + while current_tick <= self._end_tick: + position = None + if self.prices[current_tick] < self.prices[current_tick - 1]: + while (current_tick <= self._end_tick and + self.prices[current_tick] < self.prices[current_tick - 1]): + current_tick += 1 + position = Positions.Short + else: + while (current_tick <= self._end_tick and + self.prices[current_tick] >= self.prices[current_tick - 1]): + current_tick += 1 + position = Positions.Long + + if position == Positions.Long: + current_price = self.prices[current_tick - 1] + last_trade_price = self.prices[last_trade_tick] + shares = profit / last_trade_price + profit = shares * current_price + last_trade_tick = current_tick - 1 + print(profit) + + return profit diff --git a/freqtrade/freqai/prediction_models/RL/config.py b/freqtrade/freqai/prediction_models/RL/config.py new file mode 100644 index 000000000..c45eb2387 --- /dev/null +++ b/freqtrade/freqai/prediction_models/RL/config.py @@ -0,0 +1,37 @@ +# dir +DATA_SAVE_DIR = "datasets" +TRAINED_MODEL_DIR = "trained_models" +TENSORBOARD_LOG_DIR = "tensorboard_log" +RESULTS_DIR = "results" + +# Model Parameters +A2C_PARAMS = {"n_steps": 5, "ent_coef": 0.01, "learning_rate": 0.0007} +PPO_PARAMS = { + "n_steps": 2048, + "ent_coef": 0.01, + "learning_rate": 0.00025, + "batch_size": 64, +} +DDPG_PARAMS = {"batch_size": 128, "buffer_size": 50000, "learning_rate": 0.001} +TD3_PARAMS = { + "batch_size": 100, + "buffer_size": 1000000, + "learning_rate": 0.001, +} +SAC_PARAMS = { + "batch_size": 64, + "buffer_size": 100000, + "learning_rate": 0.0001, + "learning_starts": 100, + "ent_coef": "auto_0.1", +} +ERL_PARAMS = { + "learning_rate": 3e-5, + "batch_size": 2048, + "gamma": 0.985, + "seed": 312, + "net_dimension": 512, + "target_step": 5000, + "eval_gap": 30, +} +RLlib_PARAMS = {"lr": 5e-5, "train_batch_size": 500, "gamma": 0.99} diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningModel.py b/freqtrade/freqai/prediction_models/ReinforcementLearningModel.py new file mode 100644 index 000000000..dded1ac3b --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningModel.py @@ -0,0 +1,157 @@ +import logging +from typing import Any, Tuple, Dict +from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading +from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent +from pandas import DataFrame +import pandas as pd +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +import numpy as np +import numpy.typing as npt +from freqtrade.freqai.freqai_interface import IFreqaiModel + +logger = logging.getLogger(__name__) + + +class ReinforcementLearningModel(IFreqaiModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def train( + self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen + ) -> Any: + """ + Filter the training data and train a model to it. Train makes heavy use of the datakitchen + for storing, saving, loading, and analyzing the data. + :param unfiltered_dataframe: Full dataframe for the current training period + :param metadata: pair metadata from strategy. + :returns: + :model: Trained model which can be used to inference (self.predict) + """ + + logger.info("--------------------Starting training " f"{pair} --------------------") + + # filter the features requested by user in the configuration file and elegantly handle NaNs + features_filtered, labels_filtered = dk.filter_features( + unfiltered_dataframe, + dk.training_features_list, + dk.label_list, + training_filter=True, + ) + + data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( + features_filtered, labels_filtered) + dk.fit_labels() # useless for now, but just satiating append methods + + # normalize all data based on train_dataset only + data_dictionary = dk.normalize_data(data_dictionary) + + # optional additional data cleaning/analysis + self.data_cleaning_train(dk) + + logger.info( + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + ) + logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') + + model = self.fit(data_dictionary, pair) + + if pair not in self.dd.historic_predictions: + self.set_initial_historic_predictions( + data_dictionary['train_features'], model, dk, pair) + + self.dd.save_historic_predictions_to_disk() + + logger.info(f"--------------------done training {pair}--------------------") + + return model + + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + + train_df = data_dictionary["train_features"] + + sep = '/' + coin = pair.split(sep, 1)[0] + price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] + price.reset_index(inplace=True, drop=True) + + model_name = 'ppo' + + env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) + + agent_params = self.freqai_info['model_training_parameters'] + total_timesteps = agent_params.get('total_timesteps', 1000) + + agent = RLPrediction_agent(env_instance) + + model = agent.get_model(model_name, model_kwargs=agent_params) + trained_model = agent.train_model(model=model, + tb_log_name=model_name, + total_timesteps=total_timesteps) + print('Training finished!') + + return trained_model + + def predict( + self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False + ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: + """ + Filter the prediction features data and predict with it. + :param: unfiltered_dataframe: Full dataframe for the current backtest period. + :return: + :pred_df: dataframe containing the predictions + :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove + data (NaNs) or felt uncertain about data (PCA and DI index) + """ + + dk.find_features(unfiltered_dataframe) + filtered_dataframe, _ = dk.filter_features( + unfiltered_dataframe, dk.training_features_list, training_filter=False + ) + filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) + dk.data_dictionary["prediction_features"] = filtered_dataframe + + # optional additional data cleaning/analysis + self.data_cleaning_predict(dk, filtered_dataframe) + + pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) + pred_df.fillna(0, inplace=True) + + return (pred_df, dk.do_predict) + + def rl_model_predict(self, dataframe: DataFrame, + dk: FreqaiDataKitchen, model: Any) -> DataFrame: + + output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) + + def _predict(window): + observations = dataframe.iloc[window.index] + res, _ = model.predict(observations, deterministic=True) + return res + + output = output.rolling(window=self.CONV_WIDTH).apply(_predict) + + return output + + def set_initial_historic_predictions( + self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str + ) -> None: + + pred_df = self.rl_model_predict(df, dk, model) + pred_df.fillna(0, inplace=True) + self.dd.historic_predictions[pair] = pred_df + hist_preds_df = self.dd.historic_predictions[pair] + + for label in hist_preds_df.columns: + if hist_preds_df[label].dtype == object: + continue + hist_preds_df[f'{label}_mean'] = 0 + hist_preds_df[f'{label}_std'] = 0 + + hist_preds_df['do_predict'] = 0 + + if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: + hist_preds_df['DI_values'] = 0 + + for return_str in dk.data['extra_returns_per_train']: + hist_preds_df[return_str] = 0 From c1e7db31306665e5090c667d23e4158cacd2b5c3 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 8 Aug 2022 19:04:53 +0300 Subject: [PATCH 002/421] ReinforcementLearningModel --- .../ReinforcementLearning.py | 157 ++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearning.py diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py new file mode 100644 index 000000000..dded1ac3b --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearning.py @@ -0,0 +1,157 @@ +import logging +from typing import Any, Tuple, Dict +from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading +from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent +from pandas import DataFrame +import pandas as pd +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +import numpy as np +import numpy.typing as npt +from freqtrade.freqai.freqai_interface import IFreqaiModel + +logger = logging.getLogger(__name__) + + +class ReinforcementLearningModel(IFreqaiModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def train( + self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen + ) -> Any: + """ + Filter the training data and train a model to it. Train makes heavy use of the datakitchen + for storing, saving, loading, and analyzing the data. + :param unfiltered_dataframe: Full dataframe for the current training period + :param metadata: pair metadata from strategy. + :returns: + :model: Trained model which can be used to inference (self.predict) + """ + + logger.info("--------------------Starting training " f"{pair} --------------------") + + # filter the features requested by user in the configuration file and elegantly handle NaNs + features_filtered, labels_filtered = dk.filter_features( + unfiltered_dataframe, + dk.training_features_list, + dk.label_list, + training_filter=True, + ) + + data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( + features_filtered, labels_filtered) + dk.fit_labels() # useless for now, but just satiating append methods + + # normalize all data based on train_dataset only + data_dictionary = dk.normalize_data(data_dictionary) + + # optional additional data cleaning/analysis + self.data_cleaning_train(dk) + + logger.info( + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + ) + logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') + + model = self.fit(data_dictionary, pair) + + if pair not in self.dd.historic_predictions: + self.set_initial_historic_predictions( + data_dictionary['train_features'], model, dk, pair) + + self.dd.save_historic_predictions_to_disk() + + logger.info(f"--------------------done training {pair}--------------------") + + return model + + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + + train_df = data_dictionary["train_features"] + + sep = '/' + coin = pair.split(sep, 1)[0] + price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] + price.reset_index(inplace=True, drop=True) + + model_name = 'ppo' + + env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) + + agent_params = self.freqai_info['model_training_parameters'] + total_timesteps = agent_params.get('total_timesteps', 1000) + + agent = RLPrediction_agent(env_instance) + + model = agent.get_model(model_name, model_kwargs=agent_params) + trained_model = agent.train_model(model=model, + tb_log_name=model_name, + total_timesteps=total_timesteps) + print('Training finished!') + + return trained_model + + def predict( + self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False + ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: + """ + Filter the prediction features data and predict with it. + :param: unfiltered_dataframe: Full dataframe for the current backtest period. + :return: + :pred_df: dataframe containing the predictions + :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove + data (NaNs) or felt uncertain about data (PCA and DI index) + """ + + dk.find_features(unfiltered_dataframe) + filtered_dataframe, _ = dk.filter_features( + unfiltered_dataframe, dk.training_features_list, training_filter=False + ) + filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) + dk.data_dictionary["prediction_features"] = filtered_dataframe + + # optional additional data cleaning/analysis + self.data_cleaning_predict(dk, filtered_dataframe) + + pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) + pred_df.fillna(0, inplace=True) + + return (pred_df, dk.do_predict) + + def rl_model_predict(self, dataframe: DataFrame, + dk: FreqaiDataKitchen, model: Any) -> DataFrame: + + output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) + + def _predict(window): + observations = dataframe.iloc[window.index] + res, _ = model.predict(observations, deterministic=True) + return res + + output = output.rolling(window=self.CONV_WIDTH).apply(_predict) + + return output + + def set_initial_historic_predictions( + self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str + ) -> None: + + pred_df = self.rl_model_predict(df, dk, model) + pred_df.fillna(0, inplace=True) + self.dd.historic_predictions[pair] = pred_df + hist_preds_df = self.dd.historic_predictions[pair] + + for label in hist_preds_df.columns: + if hist_preds_df[label].dtype == object: + continue + hist_preds_df[f'{label}_mean'] = 0 + hist_preds_df[f'{label}_std'] = 0 + + hist_preds_df['do_predict'] = 0 + + if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: + hist_preds_df['DI_values'] = 0 + + for return_str in dk.data['extra_returns_per_train']: + hist_preds_df[return_str] = 0 From 2f4d73eb068419eebaef685f2d11b7a3841880d9 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 8 Aug 2022 19:10:03 +0300 Subject: [PATCH 003/421] Revert "ReinforcementLearningModel" This reverts commit 4d8dfe1ff1daa47276eda77118ddf39c13512a85. --- .../ReinforcementLearning.py | 157 ------------------ 1 file changed, 157 deletions(-) delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearning.py diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py deleted file mode 100644 index dded1ac3b..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearning.py +++ /dev/null @@ -1,157 +0,0 @@ -import logging -from typing import Any, Tuple, Dict -from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading -from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent -from pandas import DataFrame -import pandas as pd -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -import numpy as np -import numpy.typing as npt -from freqtrade.freqai.freqai_interface import IFreqaiModel - -logger = logging.getLogger(__name__) - - -class ReinforcementLearningModel(IFreqaiModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def train( - self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen - ) -> Any: - """ - Filter the training data and train a model to it. Train makes heavy use of the datakitchen - for storing, saving, loading, and analyzing the data. - :param unfiltered_dataframe: Full dataframe for the current training period - :param metadata: pair metadata from strategy. - :returns: - :model: Trained model which can be used to inference (self.predict) - """ - - logger.info("--------------------Starting training " f"{pair} --------------------") - - # filter the features requested by user in the configuration file and elegantly handle NaNs - features_filtered, labels_filtered = dk.filter_features( - unfiltered_dataframe, - dk.training_features_list, - dk.label_list, - training_filter=True, - ) - - data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( - features_filtered, labels_filtered) - dk.fit_labels() # useless for now, but just satiating append methods - - # normalize all data based on train_dataset only - data_dictionary = dk.normalize_data(data_dictionary) - - # optional additional data cleaning/analysis - self.data_cleaning_train(dk) - - logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" - ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - - model = self.fit(data_dictionary, pair) - - if pair not in self.dd.historic_predictions: - self.set_initial_historic_predictions( - data_dictionary['train_features'], model, dk, pair) - - self.dd.save_historic_predictions_to_disk() - - logger.info(f"--------------------done training {pair}--------------------") - - return model - - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): - - train_df = data_dictionary["train_features"] - - sep = '/' - coin = pair.split(sep, 1)[0] - price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] - price.reset_index(inplace=True, drop=True) - - model_name = 'ppo' - - env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) - - agent_params = self.freqai_info['model_training_parameters'] - total_timesteps = agent_params.get('total_timesteps', 1000) - - agent = RLPrediction_agent(env_instance) - - model = agent.get_model(model_name, model_kwargs=agent_params) - trained_model = agent.train_model(model=model, - tb_log_name=model_name, - total_timesteps=total_timesteps) - print('Training finished!') - - return trained_model - - def predict( - self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False - ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: - """ - Filter the prediction features data and predict with it. - :param: unfiltered_dataframe: Full dataframe for the current backtest period. - :return: - :pred_df: dataframe containing the predictions - :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove - data (NaNs) or felt uncertain about data (PCA and DI index) - """ - - dk.find_features(unfiltered_dataframe) - filtered_dataframe, _ = dk.filter_features( - unfiltered_dataframe, dk.training_features_list, training_filter=False - ) - filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) - dk.data_dictionary["prediction_features"] = filtered_dataframe - - # optional additional data cleaning/analysis - self.data_cleaning_predict(dk, filtered_dataframe) - - pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) - pred_df.fillna(0, inplace=True) - - return (pred_df, dk.do_predict) - - def rl_model_predict(self, dataframe: DataFrame, - dk: FreqaiDataKitchen, model: Any) -> DataFrame: - - output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) - - def _predict(window): - observations = dataframe.iloc[window.index] - res, _ = model.predict(observations, deterministic=True) - return res - - output = output.rolling(window=self.CONV_WIDTH).apply(_predict) - - return output - - def set_initial_historic_predictions( - self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str - ) -> None: - - pred_df = self.rl_model_predict(df, dk, model) - pred_df.fillna(0, inplace=True) - self.dd.historic_predictions[pair] = pred_df - hist_preds_df = self.dd.historic_predictions[pair] - - for label in hist_preds_df.columns: - if hist_preds_df[label].dtype == object: - continue - hist_preds_df[f'{label}_mean'] = 0 - hist_preds_df[f'{label}_std'] = 0 - - hist_preds_df['do_predict'] = 0 - - if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: - hist_preds_df['DI_values'] = 0 - - for return_str in dk.data['extra_returns_per_train']: - hist_preds_df[return_str] = 0 From ec813434f5ab40094c489498fc6eff32aa5cc923 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 8 Aug 2022 19:12:49 +0300 Subject: [PATCH 004/421] ReinforcementLearningModel --- .../ReinforcementLearning.py | 157 ++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearning.py diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py new file mode 100644 index 000000000..dded1ac3b --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearning.py @@ -0,0 +1,157 @@ +import logging +from typing import Any, Tuple, Dict +from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading +from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent +from pandas import DataFrame +import pandas as pd +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +import numpy as np +import numpy.typing as npt +from freqtrade.freqai.freqai_interface import IFreqaiModel + +logger = logging.getLogger(__name__) + + +class ReinforcementLearningModel(IFreqaiModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def train( + self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen + ) -> Any: + """ + Filter the training data and train a model to it. Train makes heavy use of the datakitchen + for storing, saving, loading, and analyzing the data. + :param unfiltered_dataframe: Full dataframe for the current training period + :param metadata: pair metadata from strategy. + :returns: + :model: Trained model which can be used to inference (self.predict) + """ + + logger.info("--------------------Starting training " f"{pair} --------------------") + + # filter the features requested by user in the configuration file and elegantly handle NaNs + features_filtered, labels_filtered = dk.filter_features( + unfiltered_dataframe, + dk.training_features_list, + dk.label_list, + training_filter=True, + ) + + data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( + features_filtered, labels_filtered) + dk.fit_labels() # useless for now, but just satiating append methods + + # normalize all data based on train_dataset only + data_dictionary = dk.normalize_data(data_dictionary) + + # optional additional data cleaning/analysis + self.data_cleaning_train(dk) + + logger.info( + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + ) + logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') + + model = self.fit(data_dictionary, pair) + + if pair not in self.dd.historic_predictions: + self.set_initial_historic_predictions( + data_dictionary['train_features'], model, dk, pair) + + self.dd.save_historic_predictions_to_disk() + + logger.info(f"--------------------done training {pair}--------------------") + + return model + + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + + train_df = data_dictionary["train_features"] + + sep = '/' + coin = pair.split(sep, 1)[0] + price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] + price.reset_index(inplace=True, drop=True) + + model_name = 'ppo' + + env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) + + agent_params = self.freqai_info['model_training_parameters'] + total_timesteps = agent_params.get('total_timesteps', 1000) + + agent = RLPrediction_agent(env_instance) + + model = agent.get_model(model_name, model_kwargs=agent_params) + trained_model = agent.train_model(model=model, + tb_log_name=model_name, + total_timesteps=total_timesteps) + print('Training finished!') + + return trained_model + + def predict( + self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False + ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: + """ + Filter the prediction features data and predict with it. + :param: unfiltered_dataframe: Full dataframe for the current backtest period. + :return: + :pred_df: dataframe containing the predictions + :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove + data (NaNs) or felt uncertain about data (PCA and DI index) + """ + + dk.find_features(unfiltered_dataframe) + filtered_dataframe, _ = dk.filter_features( + unfiltered_dataframe, dk.training_features_list, training_filter=False + ) + filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) + dk.data_dictionary["prediction_features"] = filtered_dataframe + + # optional additional data cleaning/analysis + self.data_cleaning_predict(dk, filtered_dataframe) + + pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) + pred_df.fillna(0, inplace=True) + + return (pred_df, dk.do_predict) + + def rl_model_predict(self, dataframe: DataFrame, + dk: FreqaiDataKitchen, model: Any) -> DataFrame: + + output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) + + def _predict(window): + observations = dataframe.iloc[window.index] + res, _ = model.predict(observations, deterministic=True) + return res + + output = output.rolling(window=self.CONV_WIDTH).apply(_predict) + + return output + + def set_initial_historic_predictions( + self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str + ) -> None: + + pred_df = self.rl_model_predict(df, dk, model) + pred_df.fillna(0, inplace=True) + self.dd.historic_predictions[pair] = pred_df + hist_preds_df = self.dd.historic_predictions[pair] + + for label in hist_preds_df.columns: + if hist_preds_df[label].dtype == object: + continue + hist_preds_df[f'{label}_mean'] = 0 + hist_preds_df[f'{label}_std'] = 0 + + hist_preds_df['do_predict'] = 0 + + if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: + hist_preds_df['DI_values'] = 0 + + for return_str in dk.data['extra_returns_per_train']: + hist_preds_df[return_str] = 0 From 8eeaab27467fa2e0bdc7314bdb888998bbb20af8 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Fri, 12 Aug 2022 20:25:13 +0300 Subject: [PATCH 005/421] add reward function --- .../RL/RLPrediction_agent.py | 89 +-- .../prediction_models/RL/RLPrediction_env.py | 615 +++++++++++++++--- .../ReinforcementLearning.py | 72 +- .../ReinforcementLearningModel.py | 157 ----- 4 files changed, 597 insertions(+), 336 deletions(-) delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningModel.py diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py index acea025c0..2e271bd02 100644 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py @@ -1,17 +1,15 @@ # common library import numpy as np -from stable_baselines3 import A2C -from stable_baselines3 import DDPG -from stable_baselines3 import PPO -from stable_baselines3 import SAC -from stable_baselines3 import TD3 -from stable_baselines3.common.callbacks import BaseCallback -from stable_baselines3.common.noise import NormalActionNoise -from stable_baselines3.common.noise import OrnsteinUhlenbeckActionNoise -# from stable_baselines3.common.vec_env import DummyVecEnv +from stable_baselines3 import A2C, DDPG, PPO, SAC, TD3 +from stable_baselines3.common.callbacks import BaseCallback, EvalCallback +from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise from freqtrade.freqai.prediction_models.RL import config + + +# from stable_baselines3.common.vec_env import DummyVecEnv + # from meta.env_stock_trading.env_stock_trading import StockTradingEnv # RL models from stable-baselines @@ -74,8 +72,10 @@ class RLPrediction_agent: policy="MlpPolicy", policy_kwargs=None, model_kwargs=None, + reward_kwargs=None, + #total_timesteps=None, verbose=1, - seed=None, + seed=None ): if model_name not in MODELS: raise NotImplementedError("NotImplementedError") @@ -95,68 +95,23 @@ class RLPrediction_agent: tensorboard_log=f"{config.TENSORBOARD_LOG_DIR}/{model_name}", verbose=verbose, policy_kwargs=policy_kwargs, - seed=seed, - **model_kwargs, + #model_kwargs=model_kwargs, + #total_timesteps=model_kwargs["total_timesteps"], + seed=seed + #**model_kwargs, ) + + + + return model - def train_model(self, model, tb_log_name, total_timesteps=5000): + def train_model(self, model, tb_log_name, model_kwargs): + model = model.learn( - total_timesteps=total_timesteps, + total_timesteps=model_kwargs["total_timesteps"], tb_log_name=tb_log_name, + #callback=eval_callback, callback=TensorboardCallback(), ) return model - - @staticmethod - def DRL_prediction(model, environment): - test_env, test_obs = environment.get_sb_env() - """make a prediction""" - account_memory = [] - actions_memory = [] - test_env.reset() - for i in range(len(environment.df.index.unique())): - action, _states = model.predict(test_obs) - # account_memory = test_env.env_method(method_name="save_asset_memory") - # actions_memory = test_env.env_method(method_name="save_action_memory") - test_obs, rewards, dones, info = test_env.step(action) - if i == (len(environment.df.index.unique()) - 2): - account_memory = test_env.env_method(method_name="save_asset_memory") - actions_memory = test_env.env_method(method_name="save_action_memory") - if dones[0]: - print("hit end!") - break - return account_memory[0], actions_memory[0] - - @staticmethod - def DRL_prediction_load_from_file(model_name, environment, cwd): - if model_name not in MODELS: - raise NotImplementedError("NotImplementedError") - try: - # load agent - model = MODELS[model_name].load(cwd) - print("Successfully load model", cwd) - except BaseException: - raise ValueError("Fail to load agent!") - - # test on the testing env - state = environment.reset() - episode_returns = list() # the cumulative_return / initial_account - episode_total_assets = list() - episode_total_assets.append(environment.initial_total_asset) - done = False - while not done: - action = model.predict(state)[0] - state, reward, done, _ = environment.step(action) - - total_asset = ( - environment.cash - + (environment.price_array[environment.time] * environment.stocks).sum() - ) - episode_total_assets.append(total_asset) - episode_return = total_asset / environment.initial_total_asset - episode_returns.append(episode_return) - - print("episode_return", episode_return) - print("Test Finished!") - return episode_total_assets diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py index 5fef7fbed..2bc7e868f 100644 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py @@ -1,47 +1,82 @@ +import logging +import random +from collections import deque from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union import gym -import matplotlib.pyplot as plt +import matplotlib.pylab as plt import numpy as np +import pandas as pd from gym import spaces from gym.utils import seeding +from sklearn.decomposition import PCA, KernelPCA +logger = logging.getLogger(__name__) + +# from bokeh.io import output_notebook +# from bokeh.plotting import figure, show +# from bokeh.models import ( +# CustomJS, +# ColumnDataSource, +# NumeralTickFormatter, +# Span, +# HoverTool, +# Range1d, +# DatetimeTickFormatter, +# Scatter, +# Label, LabelSet +# ) + class Actions(Enum): - Hold = 0 - Buy = 1 - Sell = 2 + Short = 0 + Long = 1 + Neutral = 2 + +class Actions_v2(Enum): + Neutral = 0 + Long_buy = 1 + Long_sell = 2 + Short_buy = 3 + Short_sell = 4 class Positions(Enum): Short = 0 Long = 1 + Neutral = 0.5 def opposite(self): return Positions.Short if self == Positions.Long else Positions.Long +def mean_over_std(x): + std = np.std(x, ddof=1) + mean = np.mean(x) + return mean / std if std > 0 else 0 -class GymAnytrading(gym.Env): - """ - Based on https://github.com/AminHP/gym-anytrading - """ +class DEnv(gym.Env): metadata = {'render.modes': ['human']} - def __init__(self, signal_features, prices, window_size, fee=0.0): - assert signal_features.ndim == 2 + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + assert df.ndim == 2 self.seed() - self.signal_features = signal_features + self.df = df + self.signal_features = self.df self.prices = prices self.window_size = window_size - self.fee = fee - self.shape = (window_size, self.signal_features.shape[1]) + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] - # spaces - self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + self.fee=0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1]) + self.action_space = spaces.Discrete(len(Actions_v2)) + self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) # episode self._start_tick = self.window_size @@ -49,29 +84,56 @@ class GymAnytrading(gym.Env): self._done = None self._current_tick = None self._last_trade_tick = None - self._position = None + self._position = Positions.Neutral self._position_history = None - self._total_reward = None + self.total_reward = None self._total_profit = None self._first_rendering = None self.history = None + self.trade_history = [] + + # self.A_t, self.B_t = 0.000639, 0.00001954 + self.r_t_change = 0. + + self.returns_report = [] + def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] + def reset(self): + self._done = False + + if self.starting_point == True: + self._position_history = (self._start_tick* [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + self._current_tick = self._start_tick - self._last_trade_tick = self._current_tick - 1 - self._position = Positions.Short - self._position_history = (self.window_size * [None]) + [self._position] - self._total_reward = 0. + self._last_trade_tick = None + #self._last_trade_tick = self._current_tick - 1 + self._position = Positions.Neutral + + self.total_reward = 0. self._total_profit = 1. # unit self._first_rendering = True self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self.r_t_change = 0. + + self.returns_report = [] + return self._get_observation() + def step(self, action): self._done = False self._current_tick += 1 @@ -79,34 +141,168 @@ class GymAnytrading(gym.Env): if self._current_tick == self._end_tick: self._done = True - step_reward = self._calculate_reward(action) - self._total_reward += step_reward + self.update_portfolio_log_returns(action) self._update_profit(action) + step_reward = self._calculate_reward(action) + self.total_reward += step_reward - trade = False - if ((action == Actions.Buy.value and self._position == Positions.Short) or - (action == Actions.Sell.value and self._position == Positions.Long)): - trade = True - if trade: - self._position = self._position.opposite() + + + + trade_type = None + if self.is_tradesignal_v2(action): # exclude 3 case not trade + # Update position + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + + temp_position = self._position + if action == Actions_v2.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions_v2.Long_buy.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions_v2.Short_buy.value: + self._position = Positions.Short + trade_type = "short" + elif action == Actions_v2.Long_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions_v2.Short_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + else: + print("case not defined") + + # Update last trade tick self._last_trade_tick = self._current_tick + if trade_type != None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + self._position_history.append(self._position) observation = self._get_observation() info = dict( - total_reward=self._total_reward, - total_profit=self._total_profit, - position=self._position.value + tick = self._current_tick, + total_reward = self.total_reward, + total_profit = self._total_profit, + position = self._position.value ) self._update_history(info) return observation, step_reward, self._done, info + + def processState(self, state): + return state.to_numpy() + + def convert_mlp_Policy(self, obs_): + pass + def _get_observation(self): return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + def get_unrealized_profit(self): + + if self._last_trade_tick == None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price)/last_trade_price + elif self._position == Positions.Long: + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price)/last_trade_price + else: + return 0. + + + def is_tradesignal(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) + or (action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def is_tradesignal_v2(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions_v2.Neutral.value and self._position == Positions.Neutral) or + (action == Actions_v2.Short_buy.value and self._position == Positions.Short) or + (action == Actions_v2.Short_sell.value and self._position == Positions.Short) or + (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or + (action == Actions_v2.Short_sell.value and self._position == Positions.Long) or + + (action == Actions_v2.Long_buy.value and self._position == Positions.Long) or + (action == Actions_v2.Long_sell.value and self._position == Positions.Long) or + (action == Actions_v2.Long_buy.value and self._position == Positions.Short) or + (action == Actions_v2.Long_sell.value and self._position == Positions.Short)) + + + + def _is_trade(self, action: Actions): + return ((action == Actions.Long.value and self._position == Positions.Short) or + (action == Actions.Short.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) + ) + + def _is_trade_v2(self, action: Actions_v2): + return ((action == Actions_v2.Long_buy.value and self._position == Positions.Short) or + (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or + (action == Actions_v2.Neutral.value and self._position == Positions.Long) or + (action == Actions_v2.Neutral.value and self._position == Positions.Short) or + + (action == Actions_v2.Neutral.Short_sell and self._position == Positions.Long) or + (action == Actions_v2.Neutral.Long_sell and self._position == Positions.Short) + ) + + + def is_hold(self, action): + return ((action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def is_hold_v2(self, action): + return ((action == Actions_v2.Short_buy.value and self._position == Positions.Short) + or (action == Actions_v2.Long_buy.value and self._position == Positions.Long)) + + + def add_buy_fee(self, price): + return price * (1 + self.fee) + + def add_sell_fee(self, price): + return price / (1 + self.fee) + def _update_history(self, info): if not self.history: self.history = {key: [] for key in info.keys()} @@ -114,7 +310,9 @@ class GymAnytrading(gym.Env): for key, value in info.items(): self.history[key].append(value) + def render(self, mode='human'): + def _plot_position(position, tick): color = None if position == Positions.Short: @@ -122,7 +320,7 @@ class GymAnytrading(gym.Env): elif position == Positions.Long: color = 'green' if color: - plt.scatter(tick, self.prices[tick], color=color) + plt.scatter(tick, self.prices.loc[tick].open, color=color) if self._first_rendering: self._first_rendering = False @@ -131,100 +329,319 @@ class GymAnytrading(gym.Env): start_position = self._position_history[self._start_tick] _plot_position(start_position, self._start_tick) + plt.cla() + plt.plot(self.prices) _plot_position(self._position, self._current_tick) - plt.suptitle( - "Total Reward: %.6f" % self._total_reward + ' ~ ' + - "Total Profit: %.6f" % self._total_profit - ) - + plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) plt.pause(0.01) - def render_all(self, mode='human'): + + def render_all(self): + plt.figure() window_ticks = np.arange(len(self._position_history)) - plt.plot(self.prices) + plt.plot(self.prices['open'], alpha=0.5) short_ticks = [] long_ticks = [] + neutral_ticks = [] for i, tick in enumerate(window_ticks): if self._position_history[i] == Positions.Short: - short_ticks.append(tick) + short_ticks.append(tick - 1) elif self._position_history[i] == Positions.Long: - long_ticks.append(tick) + long_ticks.append(tick - 1) + elif self._position_history[i] == Positions.Neutral: + neutral_ticks.append(tick - 1) - plt.plot(short_ticks, self.prices[short_ticks], 'ro') - plt.plot(long_ticks, self.prices[long_ticks], 'go') + plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, + 'o', color='grey', ms=3, alpha=0.1) + plt.plot(short_ticks, self.prices.loc[short_ticks].open, + 'o', color='r', ms=3, alpha=0.8) + plt.plot(long_ticks, self.prices.loc[long_ticks].open, + 'o', color='g', ms=3, alpha=0.8) - plt.suptitle( - "Total Reward: %.6f" % self._total_reward + ' ~ ' + - "Total Profit: %.6f" % self._total_profit - ) + plt.suptitle("Generalising") + fig = plt.gcf() + fig.set_size_inches(15, 10) + + + + + def close_trade_report(self): + small_trade = 0 + positive_big_trade = 0 + negative_big_trade = 0 + small_profit = 0.003 + for i in self.close_trade_profit: + if i < small_profit and i > -small_profit: + small_trade+=1 + elif i > small_profit: + positive_big_trade += 1 + elif i < -small_profit: + negative_big_trade += 1 + print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") + + + def report(self): + + # get total trade + long_trade = 0 + short_trade = 0 + neutral_trade = 0 + for trade in self.trade_history: + if trade['type'] == 'long': + long_trade += 1 + + elif trade['type'] == 'short': + short_trade += 1 + else: + neutral_trade += 1 + + negative_trade = 0 + positive_trade = 0 + for tr in self.close_trade_profit: + if tr < 0.: + negative_trade += 1 + + if tr > 0.: + positive_trade += 1 + + total_trade_lr = negative_trade+positive_trade + + + total_trade = long_trade + short_trade + sharp_ratio = self.sharpe_ratio() + sharp_log = self.get_sharpe_ratio() + + from tabulate import tabulate + + headers = ["Performance", ""] + performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], + ["Total reward", "{0:.3f}".format(self.total_reward)], + ["Start profit(unit)", "{0:.2f}".format(1.)], + ["End profit(unit)", "{0:.3f}".format(self._total_profit)], + ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], + ["Sharp log", "{0:.3f}".format(sharp_log)], + # ["Sortino ratio", "{0:.2f}".format(0) + '%'], + ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] + ] + tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") + print(tabulation) + + result = { + "Start": "{0:.2f}".format(1.), + "End": "{0:.2f}".format(self._total_profit), + "Sharp": "{0:.3f}".format(sharp_ratio), + "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) + } + return result def close(self): plt.close() + def get_sharpe_ratio(self): + return mean_over_std(self.get_portfolio_log_returns()) + + def save_rendering(self, filepath): plt.savefig(filepath) + def pause_rendering(self): plt.show() + def _calculate_reward(self, action): - step_reward = 0 + # rw = self.transaction_profit_reward(action) + #rw = self.reward_rr_profit_config(action) + rw = self.reward_rr_profit_config_v2(action) + return rw - trade = False - if ((action == Actions.Buy.value and self._position == Positions.Short) or - (action == Actions.Sell.value and self._position == Positions.Long)): - trade = True - - if trade: - current_price = self.prices[self._current_tick] - last_trade_price = self.prices[self._last_trade_tick] - price_diff = current_price - last_trade_price - - if self._position == Positions.Long: - step_reward += price_diff - - return step_reward def _update_profit(self, action): - trade = False - if ((action == Actions.Buy.value and self._position == Positions.Short) or - (action == Actions.Sell.value and self._position == Positions.Long)): - trade = True - - if trade or self._done: - current_price = self.prices[self._current_tick] - last_trade_price = self.prices[self._last_trade_tick] + #if self._is_trade(action) or self._done: + if self._is_trade_v2(action) or self._done: + pnl = self.get_unrealized_profit() if self._position == Positions.Long: - shares = (self._total_profit * (1 - self.fee)) / last_trade_price - self._total_profit = (shares * (1 - self.fee)) * current_price + self._total_profit = self._total_profit + self._total_profit*pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) - def max_possible_profit(self): - current_tick = self._start_tick - last_trade_tick = current_tick - 1 - profit = 1. + if self._position == Positions.Short: + self._total_profit = self._total_profit + self._total_profit*pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) - while current_tick <= self._end_tick: - position = None - if self.prices[current_tick] < self.prices[current_tick - 1]: - while (current_tick <= self._end_tick and - self.prices[current_tick] < self.prices[current_tick - 1]): - current_tick += 1 - position = Positions.Short - else: - while (current_tick <= self._end_tick and - self.prices[current_tick] >= self.prices[current_tick - 1]): - current_tick += 1 - position = Positions.Long - if position == Positions.Long: - current_price = self.prices[current_tick - 1] - last_trade_price = self.prices[last_trade_tick] - shares = profit / last_trade_price - profit = shares * current_price - last_trade_tick = current_tick - 1 - print(profit) + def most_recent_return(self, action): + """ + We support Long, Neutral and Short positions. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + #if action == Actions.Short.value or action == Actions.Neutral.value: + if action == Actions_v2.Short_buy.value or action == Actions_v2.Neutral.value: + current_price = self.add_sell_fee(current_price) - return profit + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_buy_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + #if action == Actions.Long.value or action == Actions.Neutral.value: + if action == Actions_v2.Long_buy.value or action == Actions_v2.Neutral.value: + current_price = self.add_buy_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_sell_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + + def get_trading_log_return(self): + return self.portfolio_log_returns[self._start_tick:] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick-1].open + + + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward + + def get_bnh_log_return(self): + return np.diff(np.log(self.prices['open'][self._start_tick:])) + + + def transaction_profit_reward(self, action): + rw = 0. + + pt = self.prev_price() + pt_1 = self.current_price() + + + if self._position == Positions.Long: + a_t = 1 + elif self._position == Positions.Short: + a_t = -1 + else: + a_t = 0 + + # close long + if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + rw = a_t*(pt_1 - po)/po + #rw = rw*2 + # close short + elif (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: + pt_1 = self.add_buy_fee(self.current_price()) + po = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + rw = a_t*(pt_1 - po)/po + #rw = rw*2 + else: + rw = a_t*(pt_1 - pt)/pt + + return np.clip(rw, 0, 1) + + + + def reward_rr_profit_config_v2(self, action): + rw = 0. + + pt_1 = self.current_price() + + + if len(self.close_trade_profit) > 0: + # long + if self._position == Positions.Long: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + if action == Actions_v2.Short_buy.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 2 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: + rw = 10 * 1 * 1 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Long_sell.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 5 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: + rw = 10 * 1 * 3 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Neutral.value: + if self.close_trade_profit[-1] > 0: + rw = 2 + elif self.close_trade_profit[-1] < 0: + rw = 2 * -1 + + # short + if self._position == Positions.Short: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + if action == Actions_v2.Long_buy.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 2 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 1 * 1 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Short_sell.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 5 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 1 * 3 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Neutral.value: + if self.close_trade_profit[-1] > 0: + rw = 2 + elif self.close_trade_profit[-1] < 0: + rw = 2 * -1 + + return np.clip(rw, 0, 1) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py index dded1ac3b..e208707eb 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearning.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearning.py @@ -1,13 +1,19 @@ import logging -from typing import Any, Tuple, Dict -from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading -from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent -from pandas import DataFrame -import pandas as pd -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from typing import Any, Dict, Tuple + import numpy as np import numpy.typing as npt +import pandas as pd +from pandas import DataFrame +from stable_baselines.common.callbacks import CallbackList, CheckpointCallback, EvalCallback + +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel +from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent +#from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading +from freqtrade.freqai.prediction_models.RL.RLPrediction_env import DEnv +from freqtrade.persistence import Trade + logger = logging.getLogger(__name__) @@ -69,29 +75,69 @@ class ReinforcementLearningModel(IFreqaiModel): def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): train_df = data_dictionary["train_features"] + # train_labels = data_dictionary["train_labels"] + test_df = data_dictionary["test_features"] + # test_labels = data_dictionary["test_labels"] + + # sep = '/' + # coin = pair.split(sep, 1)[0] + # price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] + # price.reset_index(inplace=True, drop=True) + # price = price.to_frame() + price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - sep = '/' - coin = pair.split(sep, 1)[0] - price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] - price.reset_index(inplace=True, drop=True) model_name = 'ppo' - env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) + #env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) agent_params = self.freqai_info['model_training_parameters'] - total_timesteps = agent_params.get('total_timesteps', 1000) + reward_params = self.freqai_info['model_reward_parameters'] + env_instance = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) agent = RLPrediction_agent(env_instance) + # checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./logs/') + # eval_callback = EvalCallback(test_df, best_model_save_path='./models/', + # log_path='./logs/', eval_freq=10000, + # deterministic=True, render=False) + + # #Create the callback list + # callback = CallbackList([checkpoint_callback, eval_callback]) + model = agent.get_model(model_name, model_kwargs=agent_params) trained_model = agent.train_model(model=model, tb_log_name=model_name, - total_timesteps=total_timesteps) + model_kwargs=agent_params) + #eval_callback=callback) + + print('Training finished!') return trained_model + def get_state_info(self, pair): + open_trades = Trade.get_trades(trade_filter=Trade.is_open.is_(True)) + market_side = 0.5 + current_profit = 0 + for trade in open_trades: + if trade.pair == pair: + current_value = trade.open_trade_value + openrate = trade.open_rate + if 'long' in trade.enter_tag: + market_side = 1 + else: + market_side = 0 + current_profit = current_value / openrate -1 + + total_profit = 0 + closed_trades = Trade.get_trades(trade_filter=[Trade.is_open.is_(False), Trade.pair == pair]) + for trade in closed_trades: + total_profit += trade.close_profit + + return market_side, current_profit, total_profit + + def predict( self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningModel.py b/freqtrade/freqai/prediction_models/ReinforcementLearningModel.py deleted file mode 100644 index dded1ac3b..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningModel.py +++ /dev/null @@ -1,157 +0,0 @@ -import logging -from typing import Any, Tuple, Dict -from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading -from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent -from pandas import DataFrame -import pandas as pd -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -import numpy as np -import numpy.typing as npt -from freqtrade.freqai.freqai_interface import IFreqaiModel - -logger = logging.getLogger(__name__) - - -class ReinforcementLearningModel(IFreqaiModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def train( - self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen - ) -> Any: - """ - Filter the training data and train a model to it. Train makes heavy use of the datakitchen - for storing, saving, loading, and analyzing the data. - :param unfiltered_dataframe: Full dataframe for the current training period - :param metadata: pair metadata from strategy. - :returns: - :model: Trained model which can be used to inference (self.predict) - """ - - logger.info("--------------------Starting training " f"{pair} --------------------") - - # filter the features requested by user in the configuration file and elegantly handle NaNs - features_filtered, labels_filtered = dk.filter_features( - unfiltered_dataframe, - dk.training_features_list, - dk.label_list, - training_filter=True, - ) - - data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( - features_filtered, labels_filtered) - dk.fit_labels() # useless for now, but just satiating append methods - - # normalize all data based on train_dataset only - data_dictionary = dk.normalize_data(data_dictionary) - - # optional additional data cleaning/analysis - self.data_cleaning_train(dk) - - logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" - ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - - model = self.fit(data_dictionary, pair) - - if pair not in self.dd.historic_predictions: - self.set_initial_historic_predictions( - data_dictionary['train_features'], model, dk, pair) - - self.dd.save_historic_predictions_to_disk() - - logger.info(f"--------------------done training {pair}--------------------") - - return model - - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): - - train_df = data_dictionary["train_features"] - - sep = '/' - coin = pair.split(sep, 1)[0] - price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] - price.reset_index(inplace=True, drop=True) - - model_name = 'ppo' - - env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) - - agent_params = self.freqai_info['model_training_parameters'] - total_timesteps = agent_params.get('total_timesteps', 1000) - - agent = RLPrediction_agent(env_instance) - - model = agent.get_model(model_name, model_kwargs=agent_params) - trained_model = agent.train_model(model=model, - tb_log_name=model_name, - total_timesteps=total_timesteps) - print('Training finished!') - - return trained_model - - def predict( - self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False - ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: - """ - Filter the prediction features data and predict with it. - :param: unfiltered_dataframe: Full dataframe for the current backtest period. - :return: - :pred_df: dataframe containing the predictions - :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove - data (NaNs) or felt uncertain about data (PCA and DI index) - """ - - dk.find_features(unfiltered_dataframe) - filtered_dataframe, _ = dk.filter_features( - unfiltered_dataframe, dk.training_features_list, training_filter=False - ) - filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) - dk.data_dictionary["prediction_features"] = filtered_dataframe - - # optional additional data cleaning/analysis - self.data_cleaning_predict(dk, filtered_dataframe) - - pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) - pred_df.fillna(0, inplace=True) - - return (pred_df, dk.do_predict) - - def rl_model_predict(self, dataframe: DataFrame, - dk: FreqaiDataKitchen, model: Any) -> DataFrame: - - output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) - - def _predict(window): - observations = dataframe.iloc[window.index] - res, _ = model.predict(observations, deterministic=True) - return res - - output = output.rolling(window=self.CONV_WIDTH).apply(_predict) - - return output - - def set_initial_historic_predictions( - self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str - ) -> None: - - pred_df = self.rl_model_predict(df, dk, model) - pred_df.fillna(0, inplace=True) - self.dd.historic_predictions[pair] = pred_df - hist_preds_df = self.dd.historic_predictions[pair] - - for label in hist_preds_df.columns: - if hist_preds_df[label].dtype == object: - continue - hist_preds_df[f'{label}_mean'] = 0 - hist_preds_df[f'{label}_std'] = 0 - - hist_preds_df['do_predict'] = 0 - - if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: - hist_preds_df['DI_values'] = 0 - - for return_str in dk.data['extra_returns_per_train']: - hist_preds_df[return_str] = 0 From 01232e9a1f8e28e3611e38af3816edb026600767 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Sat, 13 Aug 2022 18:48:58 +0300 Subject: [PATCH 006/421] callback function and TDQN model added --- freqtrade/freqai/data_drawer.py | 4 +- .../RL/RLPrediction_agent_v2.py | 225 ++++++ .../RL/RLPrediction_env_v2.py | 645 ++++++++++++++++++ .../prediction_models/RLPredictionModel.py | 253 +++++++ 4 files changed, 1126 insertions(+), 1 deletion(-) create mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py create mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py create mode 100644 freqtrade/freqai/prediction_models/RLPredictionModel.py diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 5282b4f59..f9d56c4b4 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -473,7 +473,9 @@ class FreqaiDataDrawer: model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") elif model_type == 'stable_baselines': from stable_baselines3.ppo.ppo import PPO - model = PPO.load(dk.data_path / f"{dk.model_filename}_model.zip") + from stable_baselines3 import DQN + #model = PPO.load(dk.data_path / f"{dk.model_filename}_model.zip") + model = DQN.load(dk.data_path / f"best_model.zip") if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file(): dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib") diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py new file mode 100644 index 000000000..e6a931e43 --- /dev/null +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py @@ -0,0 +1,225 @@ +import torch as th +from torch import nn +from typing import Dict, List, Tuple, Type, Optional, Any, Union +import gym +from stable_baselines3.common.type_aliases import GymEnv, Schedule +from stable_baselines3.common.torch_layers import ( + BaseFeaturesExtractor, + FlattenExtractor, + CombinedExtractor +) +from stable_baselines3.common.buffers import ReplayBuffer +from stable_baselines3 import DQN + + +from stable_baselines3.common.policies import BasePolicy +#from stable_baselines3.common.policies import register_policy +from stable_baselines3.dqn.policies import ( + QNetwork, DQNPolicy, MultiInputPolicy, + CnnPolicy, DQNPolicy, MlpPolicy) +import torch + + +def create_mlp_( + input_dim: int, + output_dim: int, + net_arch: List[int], + activation_fn: Type[nn.Module] = nn.ReLU, + squash_output: bool = False, +) -> List[nn.Module]: + dropout = 0.2 + if len(net_arch) > 0: + number_of_neural = net_arch[0] + + modules = [ + nn.Linear(input_dim, number_of_neural), + nn.BatchNorm1d(number_of_neural), + nn.LeakyReLU(), + nn.Dropout(dropout), + nn.Linear(number_of_neural, number_of_neural), + nn.BatchNorm1d(number_of_neural), + nn.LeakyReLU(), + nn.Dropout(dropout), + nn.Linear(number_of_neural, number_of_neural), + nn.BatchNorm1d(number_of_neural), + nn.LeakyReLU(), + nn.Dropout(dropout), + nn.Linear(number_of_neural, number_of_neural), + nn.BatchNorm1d(number_of_neural), + nn.LeakyReLU(), + nn.Dropout(dropout), + nn.Linear(number_of_neural, output_dim) + ] + return modules + +class TDQNetwork(QNetwork): + def __init__(self, + observation_space: gym.spaces.Space, + action_space: gym.spaces.Space, + features_extractor: nn.Module, + features_dim: int, + net_arch: Optional[List[int]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + normalize_images: bool = True + ): + super().__init__( + observation_space=observation_space, + action_space=action_space, + features_extractor=features_extractor, + features_dim=features_dim, + net_arch=net_arch, + activation_fn=activation_fn, + normalize_images=normalize_images + ) + action_dim = self.action_space.n + q_net = create_mlp_(self.features_dim, action_dim, self.net_arch, self.activation_fn) + self.q_net = nn.Sequential(*q_net).apply(self.init_weights) + + def init_weights(self, m): + if type(m) == nn.Linear: + torch.nn.init.kaiming_uniform_(m.weight) + + +class TDQNPolicy(DQNPolicy): + + def __init__( + self, + observation_space: gym.spaces.Space, + action_space: gym.spaces.Space, + lr_schedule: Schedule, + net_arch: Optional[List[int]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, + features_extractor_kwargs: Optional[Dict[str, Any]] = None, + normalize_images: bool = True, + optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, + optimizer_kwargs: Optional[Dict[str, Any]] = None, + ): + super().__init__( + observation_space=observation_space, + action_space=action_space, + lr_schedule=lr_schedule, + net_arch=net_arch, + activation_fn=activation_fn, + features_extractor_class=features_extractor_class, + features_extractor_kwargs=features_extractor_kwargs, + normalize_images=normalize_images, + optimizer_class=optimizer_class, + optimizer_kwargs=optimizer_kwargs + ) + + @staticmethod + def init_weights(module: nn.Module, gain: float = 1) -> None: + """ + Orthogonal initialization (used in PPO and A2C) + """ + if isinstance(module, (nn.Linear, nn.Conv2d)): + nn.init.kaiming_uniform_(module.weight) + if module.bias is not None: + module.bias.data.fill_(0.0) + + def make_q_net(self) -> TDQNetwork: + # Make sure we always have separate networks for features extractors etc + net_args = self._update_features_extractor(self.net_args, features_extractor=None) + return TDQNetwork(**net_args).to(self.device) + + +class TMultiInputPolicy(TDQNPolicy): + def __init__( + self, + observation_space: gym.spaces.Space, + action_space: gym.spaces.Space, + lr_schedule: Schedule, + net_arch: Optional[List[int]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, + features_extractor_kwargs: Optional[Dict[str, Any]] = None, + normalize_images: bool = True, + optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, + optimizer_kwargs: Optional[Dict[str, Any]] = None, + ): + super().__init__( + observation_space, + action_space, + lr_schedule, + net_arch, + activation_fn, + features_extractor_class, + features_extractor_kwargs, + normalize_images, + optimizer_class, + optimizer_kwargs, + ) + + +class TDQN(DQN): + + policy_aliases: Dict[str, Type[BasePolicy]] = { + "MlpPolicy": MlpPolicy, + "CnnPolicy": CnnPolicy, + "TMultiInputPolicy": TMultiInputPolicy, + } + + def __init__( + self, + policy: Union[str, Type[TDQNPolicy]], + env: Union[GymEnv, str], + learning_rate: Union[float, Schedule] = 1e-4, + buffer_size: int = 1000000, # 1e6 + learning_starts: int = 50000, + batch_size: int = 32, + tau: float = 1.0, + gamma: float = 0.99, + train_freq: Union[int, Tuple[int, str]] = 4, + gradient_steps: int = 1, + replay_buffer_class: Optional[ReplayBuffer] = None, + replay_buffer_kwargs: Optional[Dict[str, Any]] = None, + optimize_memory_usage: bool = False, + target_update_interval: int = 10000, + exploration_fraction: float = 0.1, + exploration_initial_eps: float = 1.0, + exploration_final_eps: float = 0.05, + max_grad_norm: float = 10, + tensorboard_log: Optional[str] = None, + create_eval_env: bool = False, + policy_kwargs: Optional[Dict[str, Any]] = None, + verbose: int = 1, + seed: Optional[int] = None, + device: Union[th.device, str] = "auto", + _init_setup_model: bool = True, + ): + + super().__init__( + policy=policy, + env=env, + learning_rate=learning_rate, + buffer_size=buffer_size, + learning_starts=learning_starts, + batch_size=batch_size, + tau=tau, + gamma=gamma, + train_freq=train_freq, + gradient_steps=gradient_steps, + replay_buffer_class=replay_buffer_class, # No action noise + replay_buffer_kwargs=replay_buffer_kwargs, + optimize_memory_usage=optimize_memory_usage, + target_update_interval=target_update_interval, + exploration_fraction=exploration_fraction, + exploration_initial_eps=exploration_initial_eps, + exploration_final_eps=exploration_final_eps, + max_grad_norm=max_grad_norm, + tensorboard_log=tensorboard_log, + create_eval_env=create_eval_env, + policy_kwargs=policy_kwargs, + verbose=verbose, + seed=seed, + device=device, + _init_setup_model=_init_setup_model + ) + + + +# try: +# register_policy("TMultiInputPolicy", TMultiInputPolicy) +# except: +# print("already registered") \ No newline at end of file diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py new file mode 100644 index 000000000..ac91cd200 --- /dev/null +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py @@ -0,0 +1,645 @@ +import gym +from gym import spaces +from gym.utils import seeding +from enum import Enum +from sklearn.decomposition import PCA, KernelPCA +import random +import numpy as np +import pandas as pd +from collections import deque +import matplotlib.pylab as plt +from typing import Dict, List, Tuple, Type, Optional, Any, Union, Callable +import logging + +logger = logging.getLogger(__name__) + +# from bokeh.io import output_notebook +# from bokeh.plotting import figure, show +# from bokeh.models import ( +# CustomJS, +# ColumnDataSource, +# NumeralTickFormatter, +# Span, +# HoverTool, +# Range1d, +# DatetimeTickFormatter, +# Scatter, +# Label, LabelSet +# ) + +class Actions(Enum): + Short = 0 + Long = 1 + Neutral = 2 + +class Actions_v2(Enum): + Neutral = 0 + Long_buy = 1 + Long_sell = 2 + Short_buy = 3 + Short_sell = 4 + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + +def mean_over_std(x): + std = np.std(x, ddof=1) + mean = np.mean(x) + return mean / std if std > 0 else 0 + +class DEnv(gym.Env): + + metadata = {'render.modes': ['human']} + + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + assert df.ndim == 2 + + self.seed() + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee=0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1]) + self.action_space = spaces.Discrete(len(Actions_v2)) + self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick = self.window_size + self._end_tick = len(self.prices) - 1 + self._done = None + self._current_tick = None + self._last_trade_tick = None + self._position = Positions.Neutral + self._position_history = None + self.total_reward = None + self._total_profit = None + self._first_rendering = None + self.history = None + self.trade_history = [] + + # self.A_t, self.B_t = 0.000639, 0.00001954 + self.r_t_change = 0. + + self.returns_report = [] + + + def seed(self, seed=None): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + + def reset(self): + + self._done = False + + if self.starting_point == True: + self._position_history = (self._start_tick* [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + #self._last_trade_tick = self._current_tick - 1 + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self._first_rendering = True + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self.r_t_change = 0. + + self.returns_report = [] + + return self._get_observation() + + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self._calculate_reward(action) + self.total_reward += step_reward + + + + + + trade_type = None + if self.is_tradesignal_v2(action): # exclude 3 case not trade + # Update position + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + + temp_position = self._position + if action == Actions_v2.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions_v2.Long_buy.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions_v2.Short_buy.value: + self._position = Positions.Short + trade_type = "short" + elif action == Actions_v2.Long_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions_v2.Short_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type != None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick = self._current_tick, + total_reward = self.total_reward, + total_profit = self._total_profit, + position = self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + + def processState(self, state): + return state.to_numpy() + + def convert_mlp_Policy(self, obs_): + pass + + def _get_observation(self): + return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + + def get_unrealized_profit(self): + + if self._last_trade_tick == None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price)/last_trade_price + elif self._position == Positions.Long: + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price)/last_trade_price + else: + return 0. + + + def is_tradesignal(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) + or (action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def is_tradesignal_v2(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions_v2.Neutral.value and self._position == Positions.Neutral) or + (action == Actions_v2.Short_buy.value and self._position == Positions.Short) or + (action == Actions_v2.Short_sell.value and self._position == Positions.Short) or + (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or + (action == Actions_v2.Short_sell.value and self._position == Positions.Long) or + + (action == Actions_v2.Long_buy.value and self._position == Positions.Long) or + (action == Actions_v2.Long_sell.value and self._position == Positions.Long) or + (action == Actions_v2.Long_buy.value and self._position == Positions.Short) or + (action == Actions_v2.Long_sell.value and self._position == Positions.Short)) + + + + def _is_trade(self, action: Actions): + return ((action == Actions.Long.value and self._position == Positions.Short) or + (action == Actions.Short.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) + ) + + def _is_trade_v2(self, action: Actions_v2): + return ((action == Actions_v2.Long_buy.value and self._position == Positions.Short) or + (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or + (action == Actions_v2.Neutral.value and self._position == Positions.Long) or + (action == Actions_v2.Neutral.value and self._position == Positions.Short) or + + (action == Actions_v2.Neutral.Short_sell and self._position == Positions.Long) or + (action == Actions_v2.Neutral.Long_sell and self._position == Positions.Short) + ) + + + def is_hold(self, action): + return ((action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def is_hold_v2(self, action): + return ((action == Actions_v2.Short_buy.value and self._position == Positions.Short) + or (action == Actions_v2.Long_buy.value and self._position == Positions.Long)) + + + def add_buy_fee(self, price): + return price * (1 + self.fee) + + def add_sell_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + + def render(self, mode='human'): + + def _plot_position(position, tick): + color = None + if position == Positions.Short: + color = 'red' + elif position == Positions.Long: + color = 'green' + if color: + plt.scatter(tick, self.prices.loc[tick].open, color=color) + + if self._first_rendering: + self._first_rendering = False + plt.cla() + plt.plot(self.prices) + start_position = self._position_history[self._start_tick] + _plot_position(start_position, self._start_tick) + + plt.cla() + plt.plot(self.prices) + _plot_position(self._position, self._current_tick) + + plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) + plt.pause(0.01) + + + def render_all(self): + plt.figure() + window_ticks = np.arange(len(self._position_history)) + plt.plot(self.prices['open'], alpha=0.5) + + short_ticks = [] + long_ticks = [] + neutral_ticks = [] + for i, tick in enumerate(window_ticks): + if self._position_history[i] == Positions.Short: + short_ticks.append(tick - 1) + elif self._position_history[i] == Positions.Long: + long_ticks.append(tick - 1) + elif self._position_history[i] == Positions.Neutral: + neutral_ticks.append(tick - 1) + + plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, + 'o', color='grey', ms=3, alpha=0.1) + plt.plot(short_ticks, self.prices.loc[short_ticks].open, + 'o', color='r', ms=3, alpha=0.8) + plt.plot(long_ticks, self.prices.loc[long_ticks].open, + 'o', color='g', ms=3, alpha=0.8) + + plt.suptitle("Generalising") + fig = plt.gcf() + fig.set_size_inches(15, 10) + + + + + def close_trade_report(self): + small_trade = 0 + positive_big_trade = 0 + negative_big_trade = 0 + small_profit = 0.003 + for i in self.close_trade_profit: + if i < small_profit and i > -small_profit: + small_trade+=1 + elif i > small_profit: + positive_big_trade += 1 + elif i < -small_profit: + negative_big_trade += 1 + print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") + + + def report(self): + + # get total trade + long_trade = 0 + short_trade = 0 + neutral_trade = 0 + for trade in self.trade_history: + if trade['type'] == 'long': + long_trade += 1 + + elif trade['type'] == 'short': + short_trade += 1 + else: + neutral_trade += 1 + + negative_trade = 0 + positive_trade = 0 + for tr in self.close_trade_profit: + if tr < 0.: + negative_trade += 1 + + if tr > 0.: + positive_trade += 1 + + total_trade_lr = negative_trade+positive_trade + + + total_trade = long_trade + short_trade + sharp_ratio = self.sharpe_ratio() + sharp_log = self.get_sharpe_ratio() + + from tabulate import tabulate + + headers = ["Performance", ""] + performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], + ["Total reward", "{0:.3f}".format(self.total_reward)], + ["Start profit(unit)", "{0:.2f}".format(1.)], + ["End profit(unit)", "{0:.3f}".format(self._total_profit)], + ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], + ["Sharp log", "{0:.3f}".format(sharp_log)], + # ["Sortino ratio", "{0:.2f}".format(0) + '%'], + ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] + ] + tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") + print(tabulation) + + result = { + "Start": "{0:.2f}".format(1.), + "End": "{0:.2f}".format(self._total_profit), + "Sharp": "{0:.3f}".format(sharp_ratio), + "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) + } + return result + + def close(self): + plt.close() + + def get_sharpe_ratio(self): + return mean_over_std(self.get_portfolio_log_returns()) + + + def save_rendering(self, filepath): + plt.savefig(filepath) + + + def pause_rendering(self): + plt.show() + + + def _calculate_reward(self, action): + # rw = self.transaction_profit_reward(action) + #rw = self.reward_rr_profit_config(action) + rw = self.reward_rr_profit_config_v2(action) + return rw + + + def _update_profit(self, action): + #if self._is_trade(action) or self._done: + if self._is_trade_v2(action) or self._done: + pnl = self.get_unrealized_profit() + + if self._position == Positions.Long: + self._total_profit = self._total_profit + self._total_profit*pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + if self._position == Positions.Short: + self._total_profit = self._total_profit + self._total_profit*pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + + def most_recent_return(self, action): + """ + We support Long, Neutral and Short positions. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + #if action == Actions.Short.value or action == Actions.Neutral.value: + if action == Actions_v2.Short_buy.value or action == Actions_v2.Neutral.value: + current_price = self.add_sell_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_buy_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + #if action == Actions.Long.value or action == Actions.Neutral.value: + if action == Actions_v2.Long_buy.value or action == Actions_v2.Neutral.value: + current_price = self.add_buy_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_sell_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + + def get_trading_log_return(self): + return self.portfolio_log_returns[self._start_tick:] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick-1].open + + + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward + + def get_bnh_log_return(self): + return np.diff(np.log(self.prices['open'][self._start_tick:])) + + + def transaction_profit_reward(self, action): + rw = 0. + + pt = self.prev_price() + pt_1 = self.current_price() + + + if self._position == Positions.Long: + a_t = 1 + elif self._position == Positions.Short: + a_t = -1 + else: + a_t = 0 + + # close long + if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + rw = a_t*(pt_1 - po)/po + #rw = rw*2 + # close short + elif (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: + pt_1 = self.add_buy_fee(self.current_price()) + po = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + rw = a_t*(pt_1 - po)/po + #rw = rw*2 + else: + rw = a_t*(pt_1 - pt)/pt + + return np.clip(rw, 0, 1) + + + + def reward_rr_profit_config_v2(self, action): + rw = 0. + + pt_1 = self.current_price() + + + if len(self.close_trade_profit) > 0: + # long + if self._position == Positions.Long: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + if action == Actions_v2.Short_buy.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 2 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: + rw = 10 * 1 * 1 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Long_sell.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 5 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: + rw = 10 * 1 * 3 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Neutral.value: + if self.close_trade_profit[-1] > 0: + rw = 2 + elif self.close_trade_profit[-1] < 0: + rw = 2 * -1 + + # short + if self._position == Positions.Short: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + if action == Actions_v2.Long_buy.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 2 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 1 * 1 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Short_sell.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 5 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 1 * 3 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Neutral.value: + if self.close_trade_profit[-1] > 0: + rw = 2 + elif self.close_trade_profit[-1] < 0: + rw = 2 * -1 + + return np.clip(rw, 0, 1) \ No newline at end of file diff --git a/freqtrade/freqai/prediction_models/RLPredictionModel.py b/freqtrade/freqai/prediction_models/RLPredictionModel.py new file mode 100644 index 000000000..b6903dd43 --- /dev/null +++ b/freqtrade/freqai/prediction_models/RLPredictionModel.py @@ -0,0 +1,253 @@ +import logging +from typing import Any, Dict, Tuple +#from matplotlib.colors import DivergingNorm + +from pandas import DataFrame +import pandas as pd +from freqtrade.exceptions import OperationalException +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +import tensorflow as tf +from freqtrade.freqai.prediction_models.BaseTensorFlowModel import BaseTensorFlowModel +from freqtrade.freqai.freqai_interface import IFreqaiModel +from tensorflow.keras.layers import Input, Conv1D, Dense, MaxPooling1D, Flatten, Dropout +from tensorflow.keras.models import Model +import numpy as np +import copy + +from keras.layers import * +import random + + +logger = logging.getLogger(__name__) + +# tf.config.run_functions_eagerly(True) +# tf.data.experimental.enable_debug_mode() + +import os +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +os.environ["CUDA_VISIBLE_DEVICES"] = "-1" + +MAX_EPOCHS = 10 +LOOKBACK = 8 + + +class RLPredictionModel_v2(IFreqaiModel): + """ + User created prediction model. The class needs to override three necessary + functions, predict(), fit(). + """ + + def fit(self, data_dictionary: Dict, pair) -> Any: + """ + User sets up the training and test data to fit their desired model here + :params: + :data_dictionary: the dictionary constructed by DataHandler to hold + all the training and test data/labels. + """ + + train_df = data_dictionary["train_features"] + train_labels = data_dictionary["train_labels"] + test_df = data_dictionary["test_features"] + test_labels = data_dictionary["test_labels"] + n_labels = len(train_labels.columns) + if n_labels > 1: + raise OperationalException( + "Neural Net not yet configured for multi-targets. Please " + " reduce number of targets to 1 in strategy." + ) + + n_features = len(data_dictionary["train_features"].columns) + BATCH_SIZE = self.freqai_info.get("batch_size", 64) + input_dims = [BATCH_SIZE, self.CONV_WIDTH, n_features] + + + w1 = WindowGenerator( + input_width=self.CONV_WIDTH, + label_width=1, + shift=1, + train_df=train_df, + val_df=test_df, + train_labels=train_labels, + val_labels=test_labels, + batch_size=BATCH_SIZE, + ) + + + # train_agent() + #pair = self.dd.historical_data[pair] + #gym_env = FreqtradeEnv(data=train_df, prices=0.01, windows_size=100, pair=pair, stake_amount=100) + + # sep = '/' + # coin = pair.split(sep, 1)[0] + + # # df1 = train_df.filter(regex='price') + # # df2 = df1.filter(regex='raw') + + # # df3 = df2.filter(regex=f"{coin}") + # # print(df3) + + # price = train_df[f"%-{coin}raw_price_5m"] + # gym_env = RLPrediction_GymAnytrading(signal_features=train_df, prices=price, window_size=100) + # sac = RLPrediction_Agent(gym_env) + + # print(sac) + + # return 0 + + + + return model + + def predict( + self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first=True + ) -> Tuple[DataFrame, DataFrame]: + """ + Filter the prediction features data and predict with it. + :param: unfiltered_dataframe: Full dataframe for the current backtest period. + :return: + :predictions: np.array of predictions + :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove + data (NaNs) or felt uncertain about data (PCA and DI index) + """ + + dk.find_features(unfiltered_dataframe) + filtered_dataframe, _ = dk.filter_features( + unfiltered_dataframe, dk.training_features_list, training_filter=False + ) + filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) + dk.data_dictionary["prediction_features"] = filtered_dataframe + + # optional additional data cleaning/analysis + self.data_cleaning_predict(dk, filtered_dataframe) + + if first: + full_df = dk.data_dictionary["prediction_features"] + + w1 = WindowGenerator( + input_width=self.CONV_WIDTH, + label_width=1, + shift=1, + test_df=full_df, + batch_size=len(full_df), + ) + + predictions = self.model.predict(w1.inference) + len_diff = len(dk.do_predict) - len(predictions) + if len_diff > 0: + dk.do_predict = dk.do_predict[len_diff:] + + else: + data = dk.data_dictionary["prediction_features"] + data = tf.expand_dims(data, axis=0) + predictions = self.model(data, training=False) + + predictions = predictions[:, 0] + pred_df = DataFrame(predictions, columns=dk.label_list) + + pred_df = dk.denormalize_labels_from_metadata(pred_df) + + return (pred_df, np.ones(len(pred_df))) + + + def set_initial_historic_predictions( + self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str + ) -> None: + + pass + # w1 = WindowGenerator( + # input_width=self.CONV_WIDTH, label_width=1, shift=1, test_df=df, batch_size=len(df) + # ) + + # trained_predictions = model.predict(w1.inference) + # #trained_predictions = trained_predictions[:, 0, 0] + # trained_predictions = trained_predictions[:, 0] + + # n_lost_points = len(df) - len(trained_predictions) + # pred_df = DataFrame(trained_predictions, columns=dk.label_list) + # zeros_df = DataFrame(np.zeros((n_lost_points, len(dk.label_list))), columns=dk.label_list) + # pred_df = pd.concat([zeros_df, pred_df], axis=0) + + # pred_df = dk.denormalize_labels_from_metadata(pred_df) + + + + # self.dd.historic_predictions[pair] = DataFrame() + # self.dd.historic_predictions[pair] = copy.deepcopy(pred_df) + + +class WindowGenerator: + def __init__( + self, + input_width, + label_width, + shift, + train_df=None, + val_df=None, + test_df=None, + train_labels=None, + val_labels=None, + test_labels=None, + batch_size=None, + ): + # Store the raw data. + self.train_df = train_df + self.val_df = val_df + self.test_df = test_df + self.train_labels = train_labels + self.val_labels = val_labels + self.test_labels = test_labels + self.batch_size = batch_size + self.input_width = input_width + self.label_width = label_width + self.shift = shift + + self.total_window_size = input_width + shift + + self.input_slice = slice(0, input_width) + self.input_indices = np.arange(self.total_window_size)[self.input_slice] + + def make_dataset(self, data, labels=None): + data = np.array(data, dtype=np.float32) + if labels is not None: + labels = np.array(labels, dtype=np.float32) + ds = tf.keras.preprocessing.timeseries_dataset_from_array( + data=data, + targets=labels, + sequence_length=self.total_window_size, + sequence_stride=1, + sampling_rate=1, + shuffle=False, + batch_size=self.batch_size, + ) + + return ds + + @property + def train(self): + + + + return self.make_dataset(self.train_df, self.train_labels) + + @property + def val(self): + return self.make_dataset(self.val_df, self.val_labels) + + @property + def test(self): + return self.make_dataset(self.test_df, self.test_labels) + + @property + def inference(self): + return self.make_dataset(self.test_df) + + @property + def example(self): + """Get and cache an example batch of `inputs, labels` for plotting.""" + result = getattr(self, "_example", None) + if result is None: + # No example batch was found, so get one from the `.train` dataset + result = next(iter(self.train)) + # And cache it for next time + self._example = result + return result \ No newline at end of file From cd3fe44424357d80e1e944131298a332c6afb30d Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Sat, 13 Aug 2022 20:05:21 +0300 Subject: [PATCH 007/421] callback function and TDQN model added --- .../ReinforcementLearning.py | 143 ++++++++++++++---- 1 file changed, 116 insertions(+), 27 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py index e208707eb..5783baba8 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearning.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearning.py @@ -5,14 +5,23 @@ import numpy as np import numpy.typing as npt import pandas as pd from pandas import DataFrame -from stable_baselines.common.callbacks import CallbackList, CheckpointCallback, EvalCallback from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent +from freqtrade.freqai.prediction_models.RL.RLPrediction_agent_v2 import TDQN #from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading from freqtrade.freqai.prediction_models.RL.RLPrediction_env import DEnv from freqtrade.persistence import Trade +from stable_baselines3.common.vec_env import SubprocVecEnv +from stable_baselines3.common.monitor import Monitor + +import torch as th +from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, StopTrainingOnRewardThreshold +from stable_baselines3.common.buffers import ReplayBuffer +from stable_baselines3 import PPO + + logger = logging.getLogger(__name__) @@ -74,47 +83,127 @@ class ReinforcementLearningModel(IFreqaiModel): def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): - train_df = data_dictionary["train_features"] - # train_labels = data_dictionary["train_labels"] - test_df = data_dictionary["test_features"] - # test_labels = data_dictionary["test_labels"] + # train_df = data_dictionary["train_features"] + # # train_labels = data_dictionary["train_labels"] + # test_df = data_dictionary["test_features"] + # # test_labels = data_dictionary["test_labels"] - # sep = '/' - # coin = pair.split(sep, 1)[0] - # price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] - # price.reset_index(inplace=True, drop=True) - # price = price.to_frame() - price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + # # sep = '/' + # # coin = pair.split(sep, 1)[0] + # # price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] + # # price.reset_index(inplace=True, drop=True) + # # price = price.to_frame() + # price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + # price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) + + # #train_env = GymAnytrading(train_df, price, self.CONV_WIDTH) + + # agent_params = self.freqai_info['model_training_parameters'] + # reward_params = self.freqai_info['model_reward_parameters'] - model_name = 'ppo' + # train_env = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + # #eval_env = DEnv(df=test_df, prices=price_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + + # #env_instance = SubprocVecEnv([DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params)]) + # #train_env.reset() + # #eval_env.reset() + + # # model + + # #policy_kwargs = dict(net_arch=[512, 512, 512]) + # policy_kwargs = dict(activation_fn=th.nn.Tanh, + # net_arch=[256, 256, 256]) + # agent = RLPrediction_agent(train_env) + # #eval_agent = RLPrediction_agent(eval_env) - #env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) + # # PPO + # model_name = 'ppo' + # model = agent.get_model(model_name, model_kwargs=agent_params, policy_kwargs=policy_kwargs) + # trained_model = agent.train_model(model=model, + # tb_log_name=model_name, + # model_kwargs=agent_params, + # train_df=train_df, + # test_df=test_df, + # price=price, + # price_test=price_test, + # window_size=self.CONV_WIDTH) + + + # # best_model = eval_agent.train_model(model=model, + # # tb_log_name=model_name, + # # model_kwargs=agent_params, + # # eval=eval_env) + + + # # TDQN + # # model_name = 'TDQN' + # # model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log='./tensorboard_log/', + # # learning_rate=agent_params["learning_rate"], gamma=0.9, + # # target_update_interval=5000, buffer_size=50000, + # # exploration_initial_eps=1, exploration_final_eps=0.1, + # # replay_buffer_class=ReplayBuffer + # # ) + + # # trained_model = agent.train_model(model=model, + # # tb_log_name=model_name, + # # model_kwargs=agent_params) + # #model.learn( + # # total_timesteps=5000, + # # callback=callback + # # ) agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] - env_instance = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - agent = RLPrediction_agent(env_instance) + # price data for model training and evaluation + price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) + + # environments + train_env = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + eval = DEnv(df=test_df, prices=price_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + eval_env = Monitor(eval, ".") + eval_env.reset() - # checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./logs/') - # eval_callback = EvalCallback(test_df, best_model_save_path='./models/', - # log_path='./logs/', eval_freq=10000, - # deterministic=True, render=False) + # this should be in config - TODO + agent_type = 'tdqn' - # #Create the callback list - # callback = CallbackList([checkpoint_callback, eval_callback]) + path = self.dk.data_path + eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/{agent_type}/logs/", eval_freq=10000, + deterministic=True, render=False) - model = agent.get_model(model_name, model_kwargs=agent_params) - trained_model = agent.train_model(model=model, - tb_log_name=model_name, - model_kwargs=agent_params) - #eval_callback=callback) + # model arch + policy_kwargs = dict(activation_fn=th.nn.Tanh, + net_arch=[512, 512, 512]) + + + if agent_type == 'tdqn': + model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/{agent_type}/tensorboard/", + learning_rate=0.00025, gamma=0.9, + target_update_interval=5000, buffer_size=50000, + exploration_initial_eps=1, exploration_final_eps=0.1, + replay_buffer_class=ReplayBuffer + ) + elif agent_type == 'ppo': + model = PPO('MultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/{agent_type}/tensorboard/", + learning_rate=0.00025, gamma=0.9 + ) + + model.learn( + total_timesteps=agent_params["total_timesteps"], + callback=eval_callback + ) print('Training finished!') - return trained_model + return model + + def get_state_info(self, pair): open_trades = Trade.get_trades(trade_filter=Trade.is_open.is_(True)) From 9b895500b306cf04c608931ae22e244901ba4fdf Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Sun, 14 Aug 2022 16:24:20 +0300 Subject: [PATCH 008/421] initial commit - new dev branch --- .../RL/RLPrediction_agent.py | 30 +- ...agent_v2.py => RLPrediction_agent_TDQN.py} | 59 +- .../RL/RLPrediction_env_TDQN_3ac.py | 513 ++++++++++++++ ...on_env.py => RLPrediction_env_TDQN_5ac.py} | 416 +++++------ .../RL/RLPrediction_env_v2.py | 645 ------------------ .../ReinforcementLearning.py | 67 +- 6 files changed, 810 insertions(+), 920 deletions(-) rename freqtrade/freqai/prediction_models/RL/{RLPrediction_agent_v2.py => RLPrediction_agent_TDQN.py} (93%) create mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py rename freqtrade/freqai/prediction_models/RL/{RLPrediction_env.py => RLPrediction_env_TDQN_5ac.py} (55%) delete mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py index 2e271bd02..26b31f6e9 100644 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py @@ -1,11 +1,15 @@ # common library +import gym import numpy as np from stable_baselines3 import A2C, DDPG, PPO, SAC, TD3 -from stable_baselines3.common.callbacks import BaseCallback, EvalCallback +from stable_baselines3.common.callbacks import (BaseCallback, CallbackList, CheckpointCallback, + EvalCallback, StopTrainingOnRewardThreshold) from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise from freqtrade.freqai.prediction_models.RL import config +#from freqtrade.freqai.prediction_models.RL.RLPrediction_agent_v2 import TDQN +from freqtrade.freqai.prediction_models.RL.RLPrediction_env import DEnv # from stable_baselines3.common.vec_env import DummyVecEnv @@ -106,12 +110,30 @@ class RLPrediction_agent: return model - def train_model(self, model, tb_log_name, model_kwargs): + def train_model(self, model, tb_log_name, model_kwargs, train_df, test_df, price, price_test, window_size): + + + agent_params = self.freqai_info['model_training_parameters'] + reward_params = self.freqai_info['model_reward_parameters'] + train_env = DEnv(df=train_df, prices=price, window_size=window_size, reward_kwargs=reward_params) + eval_env = DEnv(df=test_df, prices=price_test, window_size=window_size, reward_kwargs=reward_params) + + # checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./logs/', + # name_prefix='rl_model') + + checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./logs/') + + eval_callback = EvalCallback(eval_env, best_model_save_path='./logs/best_model', log_path='./logs/results', eval_freq=500) + #callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=-200, verbose=1) + + # Create the callback list + callback = CallbackList([checkpoint_callback, eval_callback]) + model = model.learn( total_timesteps=model_kwargs["total_timesteps"], tb_log_name=tb_log_name, - #callback=eval_callback, - callback=TensorboardCallback(), + callback=callback, + #callback=TensorboardCallback(), ) return model diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_TDQN.py similarity index 93% rename from freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py rename to freqtrade/freqai/prediction_models/RL/RLPrediction_agent_TDQN.py index e6a931e43..0aa3512a1 100644 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_TDQN.py @@ -1,23 +1,18 @@ -import torch as th -from torch import nn -from typing import Dict, List, Tuple, Type, Optional, Any, Union +from typing import Any, Dict, List, Optional, Tuple, Type, Union + import gym -from stable_baselines3.common.type_aliases import GymEnv, Schedule -from stable_baselines3.common.torch_layers import ( - BaseFeaturesExtractor, - FlattenExtractor, - CombinedExtractor -) -from stable_baselines3.common.buffers import ReplayBuffer -from stable_baselines3 import DQN - - -from stable_baselines3.common.policies import BasePolicy -#from stable_baselines3.common.policies import register_policy -from stable_baselines3.dqn.policies import ( - QNetwork, DQNPolicy, MultiInputPolicy, - CnnPolicy, DQNPolicy, MlpPolicy) import torch +import torch as th +from stable_baselines3 import DQN +from stable_baselines3.common.buffers import ReplayBuffer +from stable_baselines3.common.policies import BasePolicy +from stable_baselines3.common.torch_layers import (BaseFeaturesExtractor, CombinedExtractor, + FlattenExtractor) +from stable_baselines3.common.type_aliases import GymEnv, Schedule +#from stable_baselines3.common.policies import register_policy +from stable_baselines3.dqn.policies import (CnnPolicy, DQNPolicy, MlpPolicy, MultiInputPolicy, + QNetwork) +from torch import nn def create_mlp_( @@ -30,7 +25,7 @@ def create_mlp_( dropout = 0.2 if len(net_arch) > 0: number_of_neural = net_arch[0] - + modules = [ nn.Linear(input_dim, number_of_neural), nn.BatchNorm1d(number_of_neural), @@ -69,19 +64,19 @@ class TDQNetwork(QNetwork): features_dim=features_dim, net_arch=net_arch, activation_fn=activation_fn, - normalize_images=normalize_images + normalize_images=normalize_images ) action_dim = self.action_space.n q_net = create_mlp_(self.features_dim, action_dim, self.net_arch, self.activation_fn) self.q_net = nn.Sequential(*q_net).apply(self.init_weights) - + def init_weights(self, m): if type(m) == nn.Linear: torch.nn.init.kaiming_uniform_(m.weight) - - + + class TDQNPolicy(DQNPolicy): - + def __init__( self, observation_space: gym.spaces.Space, @@ -107,7 +102,7 @@ class TDQNPolicy(DQNPolicy): optimizer_class=optimizer_class, optimizer_kwargs=optimizer_kwargs ) - + @staticmethod def init_weights(module: nn.Module, gain: float = 1) -> None: """ @@ -117,13 +112,13 @@ class TDQNPolicy(DQNPolicy): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: module.bias.data.fill_(0.0) - + def make_q_net(self) -> TDQNetwork: # Make sure we always have separate networks for features extractors etc net_args = self._update_features_extractor(self.net_args, features_extractor=None) return TDQNetwork(**net_args).to(self.device) - + class TMultiInputPolicy(TDQNPolicy): def __init__( self, @@ -150,8 +145,8 @@ class TMultiInputPolicy(TDQNPolicy): optimizer_class, optimizer_kwargs, ) - - + + class TDQN(DQN): policy_aliases: Dict[str, Type[BasePolicy]] = { @@ -216,10 +211,10 @@ class TDQN(DQN): device=device, _init_setup_model=_init_setup_model ) - - + + # try: # register_policy("TMultiInputPolicy", TMultiInputPolicy) # except: -# print("already registered") \ No newline at end of file +# print("already registered") diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py new file mode 100644 index 000000000..184ec57ec --- /dev/null +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py @@ -0,0 +1,513 @@ +import logging +import random +from collections import deque +from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union + +import gym +import matplotlib.pylab as plt +import numpy as np +import pandas as pd +from gym import spaces +from gym.utils import seeding + +logger = logging.getLogger(__name__) + +class Actions(Enum): + Short = 0 + Long = 1 + Neutral = 2 + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + +def mean_over_std(x): + std = np.std(x, ddof=1) + mean = np.mean(x) + return mean / std if std > 0 else 0 + +class DEnv(gym.Env): + + metadata = {'render.modes': ['human']} + + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + assert df.ndim == 2 + + self.seed() + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee=0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1]) + self.action_space = spaces.Discrete(len(Actions)) + self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick = self.window_size + self._end_tick = len(self.prices) - 1 + self._done = None + self._current_tick = None + self._last_trade_tick = None + self._position = Positions.Neutral + self._position_history = None + self.total_reward = None + self._total_profit = None + self._first_rendering = None + self.history = None + self.trade_history = [] + + # self.A_t, self.B_t = 0.000639, 0.00001954 + self.r_t_change = 0. + + self.returns_report = [] + + def seed(self, seed=None): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + + self._done = False + + if self.starting_point == True: + self._position_history = (self._start_tick* [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + #self._last_trade_tick = self._current_tick - 1 + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self._first_rendering = True + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self.r_t_change = 0. + + self.returns_report = [] + + return self._get_observation() + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self._calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): # exclude 3 case not trade + # Update position + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + temp_position = self._position + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type != None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick = self._current_tick, + total_reward = self.total_reward, + total_profit = self._total_profit, + position = self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + # def processState(self, state): + # return state.to_numpy() + + # def convert_mlp_Policy(self, obs_): + # pass + + def _get_observation(self): + return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + def get_unrealized_profit(self): + + if self._last_trade_tick == None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price)/last_trade_price + elif self._position == Positions.Long: + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price)/last_trade_price + else: + return 0. + + def is_tradesignal(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) + or (action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def _is_trade(self, action: Actions): + return ((action == Actions.Long.value and self._position == Positions.Short) or + (action == Actions.Short.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) + ) + + def is_hold(self, action): + return ((action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def add_buy_fee(self, price): + return price * (1 + self.fee) + + def add_sell_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + + # def render(self, mode='human'): + # def _plot_position(position, tick): + # color = None + # if position == Positions.Short: + # color = 'red' + # elif position == Positions.Long: + # color = 'green' + # if color: + # plt.scatter(tick, self.prices.loc[tick].open, color=color) + # if self._first_rendering: + # self._first_rendering = False + # plt.cla() + # plt.plot(self.prices) + # start_position = self._position_history[self._start_tick] + # _plot_position(start_position, self._start_tick) + # plt.cla() + # plt.plot(self.prices) + # _plot_position(self._position, self._current_tick) + # plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) + # plt.pause(0.01) + + # def render_all(self): + # plt.figure() + # window_ticks = np.arange(len(self._position_history)) + # plt.plot(self.prices['open'], alpha=0.5) + # short_ticks = [] + # long_ticks = [] + # neutral_ticks = [] + # for i, tick in enumerate(window_ticks): + # if self._position_history[i] == Positions.Short: + # short_ticks.append(tick - 1) + # elif self._position_history[i] == Positions.Long: + # long_ticks.append(tick - 1) + # elif self._position_history[i] == Positions.Neutral: + # neutral_ticks.append(tick - 1) + # plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, + # 'o', color='grey', ms=3, alpha=0.1) + # plt.plot(short_ticks, self.prices.loc[short_ticks].open, + # 'o', color='r', ms=3, alpha=0.8) + # plt.plot(long_ticks, self.prices.loc[long_ticks].open, + # 'o', color='g', ms=3, alpha=0.8) + # plt.suptitle("Generalising") + # fig = plt.gcf() + # fig.set_size_inches(15, 10) + + # def close_trade_report(self): + # small_trade = 0 + # positive_big_trade = 0 + # negative_big_trade = 0 + # small_profit = 0.003 + # for i in self.close_trade_profit: + # if i < small_profit and i > -small_profit: + # small_trade+=1 + # elif i > small_profit: + # positive_big_trade += 1 + # elif i < -small_profit: + # negative_big_trade += 1 + # print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") + + # def report(self): + # # get total trade + # long_trade = 0 + # short_trade = 0 + # neutral_trade = 0 + # for trade in self.trade_history: + # if trade['type'] == 'long': + # long_trade += 1 + # elif trade['type'] == 'short': + # short_trade += 1 + # else: + # neutral_trade += 1 + # negative_trade = 0 + # positive_trade = 0 + # for tr in self.close_trade_profit: + # if tr < 0.: + # negative_trade += 1 + # if tr > 0.: + # positive_trade += 1 + # total_trade_lr = negative_trade+positive_trade + # total_trade = long_trade + short_trade + # sharp_ratio = self.sharpe_ratio() + # sharp_log = self.get_sharpe_ratio() + # from tabulate import tabulate + # headers = ["Performance", ""] + # performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], + # ["Total reward", "{0:.3f}".format(self.total_reward)], + # ["Start profit(unit)", "{0:.2f}".format(1.)], + # ["End profit(unit)", "{0:.3f}".format(self._total_profit)], + # ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], + # ["Sharp log", "{0:.3f}".format(sharp_log)], + # # ["Sortino ratio", "{0:.2f}".format(0) + '%'], + # ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] + # ] + # tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") + # print(tabulation) + # result = { + # "Start": "{0:.2f}".format(1.), + # "End": "{0:.2f}".format(self._total_profit), + # "Sharp": "{0:.3f}".format(sharp_ratio), + # "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) + # } + # return result + + # def close(self): + # plt.close() + + def get_sharpe_ratio(self): + return mean_over_std(self.get_portfolio_log_returns()) + + # def save_rendering(self, filepath): + # plt.savefig(filepath) + + # def pause_rendering(self): + # plt.show() + + def _calculate_reward(self, action): + # rw = self.transaction_profit_reward(action) + #rw = self.reward_rr_profit_config(action) + rw = self.profit_only_when_close_reward(action) + #rw = self.profit_only_when_close_reward_aim(action) + return rw + + def _update_profit(self, action): + if self._is_trade(action) or self._done: + pnl = self.get_unrealized_profit() + + if self._position == Positions.Long: + self._total_profit = self._total_profit + self._total_profit*pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + if self._position == Positions.Short: + self._total_profit = self._total_profit + self._total_profit*pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + def most_recent_return(self, action): + """ + We support Long, Neutral and Short positions. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + if action == Actions.Short.value or action == Actions.Neutral.value: + current_price = self.add_sell_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_buy_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + if action == Actions.Long.value or action == Actions.Neutral.value: + current_price = self.add_buy_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_sell_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + # def get_trading_log_return(self): + # return self.portfolio_log_returns[self._start_tick:] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick-1].open + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward + + # def get_bnh_log_return(self): + # return np.diff(np.log(self.prices['open'][self._start_tick:])) + + def transaction_profit_reward(self, action): + rw = 0. + + pt = self.prev_price() + pt_1 = self.current_price() + + + if self._position == Positions.Long: + a_t = 1 + elif self._position == Positions.Short: + a_t = -1 + else: + a_t = 0 + + # close long + if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + rw = a_t*(pt_1 - po)/po + #rw = rw*2 + # close short + elif (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: + pt_1 = self.add_buy_fee(self.current_price()) + po = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + rw = a_t*(pt_1 - po)/po + #rw = rw*2 + else: + rw = a_t*(pt_1 - pt)/pt + + return np.clip(rw, 0, 1) + + def profit_only_when_close_reward_aim(self, action): + + if self._last_trade_tick == None: + return 0. + + # close long + if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + + # close short + if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + + return 0. + + def profit_only_when_close_reward(self, action): + + if self._last_trade_tick == None: + return 0. + + # close long + if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + # close short + if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + return 0. \ No newline at end of file diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py similarity index 55% rename from freqtrade/freqai/prediction_models/RL/RLPrediction_env.py rename to freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py index 2bc7e868f..9b01579e8 100644 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py @@ -2,6 +2,7 @@ import logging import random from collections import deque from enum import Enum +#from sklearn.decomposition import PCA, KernelPCA from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union import gym @@ -10,7 +11,6 @@ import numpy as np import pandas as pd from gym import spaces from gym.utils import seeding -from sklearn.decomposition import PCA, KernelPCA logger = logging.getLogger(__name__) @@ -29,12 +29,8 @@ logger = logging.getLogger(__name__) # Label, LabelSet # ) -class Actions(Enum): - Short = 0 - Long = 1 - Neutral = 2 -class Actions_v2(Enum): +class Actions(Enum): Neutral = 0 Long_buy = 1 Long_sell = 2 @@ -75,7 +71,7 @@ class DEnv(gym.Env): # # spaces self.shape = (window_size, self.signal_features.shape[1]) - self.action_space = spaces.Discrete(len(Actions_v2)) + self.action_space = spaces.Discrete(len(Actions)) self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) # episode @@ -152,7 +148,7 @@ class DEnv(gym.Env): trade_type = None - if self.is_tradesignal_v2(action): # exclude 3 case not trade + if self.is_tradesignal(action): # exclude 3 case not trade # Update position """ Action: Neutral, position: Long -> Close Long @@ -167,19 +163,19 @@ class DEnv(gym.Env): temp_position = self._position - if action == Actions_v2.Neutral.value: + if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions_v2.Long_buy.value: + elif action == Actions.Long_buy.value: self._position = Positions.Long trade_type = "long" - elif action == Actions_v2.Short_buy.value: + elif action == Actions.Short_buy.value: self._position = Positions.Short trade_type = "short" - elif action == Actions_v2.Long_sell.value: + elif action == Actions.Long_sell.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions_v2.Short_sell.value: + elif action == Actions.Short_sell.value: self._position = Positions.Neutral trade_type = "neutral" else: @@ -208,11 +204,11 @@ class DEnv(gym.Env): return observation, step_reward, self._done, info - def processState(self, state): - return state.to_numpy() + # def processState(self, state): + # return state.to_numpy() - def convert_mlp_Policy(self, obs_): - pass + # def convert_mlp_Policy(self, obs_): + # pass def _get_observation(self): return self.signal_features[(self._current_tick - self.window_size):self._current_tick] @@ -245,46 +241,26 @@ class DEnv(gym.Env): Action: Long, position: Long -> Hold Long Action: Short, position: Short -> Hold Short """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) - or (action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def is_tradesignal_v2(self, action): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions_v2.Neutral.value and self._position == Positions.Neutral) or - (action == Actions_v2.Short_buy.value and self._position == Positions.Short) or - (action == Actions_v2.Short_sell.value and self._position == Positions.Short) or - (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or - (action == Actions_v2.Short_sell.value and self._position == Positions.Long) or - - (action == Actions_v2.Long_buy.value and self._position == Positions.Long) or - (action == Actions_v2.Long_sell.value and self._position == Positions.Long) or - (action == Actions_v2.Long_buy.value and self._position == Positions.Short) or - (action == Actions_v2.Long_sell.value and self._position == Positions.Short)) + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or + (action == Actions.Short_buy.value and self._position == Positions.Short) or + (action == Actions.Short_sell.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Short_sell.value and self._position == Positions.Long) or + (action == Actions.Long_buy.value and self._position == Positions.Long) or + (action == Actions.Long_sell.value and self._position == Positions.Long) or + (action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Long_sell.value and self._position == Positions.Short)) def _is_trade(self, action: Actions): - return ((action == Actions.Long.value and self._position == Positions.Short) or - (action == Actions.Short.value and self._position == Positions.Long) or + return ((action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) - ) + (action == Actions.Neutral.value and self._position == Positions.Short) or - def _is_trade_v2(self, action: Actions_v2): - return ((action == Actions_v2.Long_buy.value and self._position == Positions.Short) or - (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or - (action == Actions_v2.Neutral.value and self._position == Positions.Long) or - (action == Actions_v2.Neutral.value and self._position == Positions.Short) or - - (action == Actions_v2.Neutral.Short_sell and self._position == Positions.Long) or - (action == Actions_v2.Neutral.Long_sell and self._position == Positions.Short) + (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or + (action == Actions.Neutral.Long_sell and self._position == Positions.Short) ) @@ -292,9 +268,6 @@ class DEnv(gym.Env): return ((action == Actions.Short.value and self._position == Positions.Short) or (action == Actions.Long.value and self._position == Positions.Long)) - def is_hold_v2(self, action): - return ((action == Actions_v2.Short_buy.value and self._position == Positions.Short) - or (action == Actions_v2.Long_buy.value and self._position == Positions.Long)) def add_buy_fee(self, price): @@ -311,156 +284,158 @@ class DEnv(gym.Env): self.history[key].append(value) - def render(self, mode='human'): + # def render(self, mode='human'): - def _plot_position(position, tick): - color = None - if position == Positions.Short: - color = 'red' - elif position == Positions.Long: - color = 'green' - if color: - plt.scatter(tick, self.prices.loc[tick].open, color=color) + # def _plot_position(position, tick): + # color = None + # if position == Positions.Short: + # color = 'red' + # elif position == Positions.Long: + # color = 'green' + # if color: + # plt.scatter(tick, self.prices.loc[tick].open, color=color) - if self._first_rendering: - self._first_rendering = False - plt.cla() - plt.plot(self.prices) - start_position = self._position_history[self._start_tick] - _plot_position(start_position, self._start_tick) + # if self._first_rendering: + # self._first_rendering = False + # plt.cla() + # plt.plot(self.prices) + # start_position = self._position_history[self._start_tick] + # _plot_position(start_position, self._start_tick) - plt.cla() - plt.plot(self.prices) - _plot_position(self._position, self._current_tick) + # plt.cla() + # plt.plot(self.prices) + # _plot_position(self._position, self._current_tick) - plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) - plt.pause(0.01) + # plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) + # plt.pause(0.01) - def render_all(self): - plt.figure() - window_ticks = np.arange(len(self._position_history)) - plt.plot(self.prices['open'], alpha=0.5) + # def render_all(self): + # plt.figure() + # window_ticks = np.arange(len(self._position_history)) + # plt.plot(self.prices['open'], alpha=0.5) - short_ticks = [] - long_ticks = [] - neutral_ticks = [] - for i, tick in enumerate(window_ticks): - if self._position_history[i] == Positions.Short: - short_ticks.append(tick - 1) - elif self._position_history[i] == Positions.Long: - long_ticks.append(tick - 1) - elif self._position_history[i] == Positions.Neutral: - neutral_ticks.append(tick - 1) + # short_ticks = [] + # long_ticks = [] + # neutral_ticks = [] + # for i, tick in enumerate(window_ticks): + # if self._position_history[i] == Positions.Short: + # short_ticks.append(tick - 1) + # elif self._position_history[i] == Positions.Long: + # long_ticks.append(tick - 1) + # elif self._position_history[i] == Positions.Neutral: + # neutral_ticks.append(tick - 1) - plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, - 'o', color='grey', ms=3, alpha=0.1) - plt.plot(short_ticks, self.prices.loc[short_ticks].open, - 'o', color='r', ms=3, alpha=0.8) - plt.plot(long_ticks, self.prices.loc[long_ticks].open, - 'o', color='g', ms=3, alpha=0.8) + # plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, + # 'o', color='grey', ms=3, alpha=0.1) + # plt.plot(short_ticks, self.prices.loc[short_ticks].open, + # 'o', color='r', ms=3, alpha=0.8) + # plt.plot(long_ticks, self.prices.loc[long_ticks].open, + # 'o', color='g', ms=3, alpha=0.8) - plt.suptitle("Generalising") - fig = plt.gcf() - fig.set_size_inches(15, 10) + # plt.suptitle("Generalising") + # fig = plt.gcf() + # fig.set_size_inches(15, 10) - def close_trade_report(self): - small_trade = 0 - positive_big_trade = 0 - negative_big_trade = 0 - small_profit = 0.003 - for i in self.close_trade_profit: - if i < small_profit and i > -small_profit: - small_trade+=1 - elif i > small_profit: - positive_big_trade += 1 - elif i < -small_profit: - negative_big_trade += 1 - print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") + # def close_trade_report(self): + # small_trade = 0 + # positive_big_trade = 0 + # negative_big_trade = 0 + # small_profit = 0.003 + # for i in self.close_trade_profit: + # if i < small_profit and i > -small_profit: + # small_trade+=1 + # elif i > small_profit: + # positive_big_trade += 1 + # elif i < -small_profit: + # negative_big_trade += 1 + # print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") - def report(self): + # def report(self): - # get total trade - long_trade = 0 - short_trade = 0 - neutral_trade = 0 - for trade in self.trade_history: - if trade['type'] == 'long': - long_trade += 1 + # # get total trade + # long_trade = 0 + # short_trade = 0 + # neutral_trade = 0 + # for trade in self.trade_history: + # if trade['type'] == 'long': + # long_trade += 1 - elif trade['type'] == 'short': - short_trade += 1 - else: - neutral_trade += 1 + # elif trade['type'] == 'short': + # short_trade += 1 + # else: + # neutral_trade += 1 - negative_trade = 0 - positive_trade = 0 - for tr in self.close_trade_profit: - if tr < 0.: - negative_trade += 1 + # negative_trade = 0 + # positive_trade = 0 + # for tr in self.close_trade_profit: + # if tr < 0.: + # negative_trade += 1 - if tr > 0.: - positive_trade += 1 + # if tr > 0.: + # positive_trade += 1 - total_trade_lr = negative_trade+positive_trade + # total_trade_lr = negative_trade+positive_trade - total_trade = long_trade + short_trade - sharp_ratio = self.sharpe_ratio() - sharp_log = self.get_sharpe_ratio() + # total_trade = long_trade + short_trade + # sharp_ratio = self.sharpe_ratio() + # sharp_log = self.get_sharpe_ratio() - from tabulate import tabulate + # from tabulate import tabulate - headers = ["Performance", ""] - performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], - ["Total reward", "{0:.3f}".format(self.total_reward)], - ["Start profit(unit)", "{0:.2f}".format(1.)], - ["End profit(unit)", "{0:.3f}".format(self._total_profit)], - ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], - ["Sharp log", "{0:.3f}".format(sharp_log)], - # ["Sortino ratio", "{0:.2f}".format(0) + '%'], - ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] - ] - tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") - print(tabulation) + # headers = ["Performance", ""] + # performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], + # ["Total reward", "{0:.3f}".format(self.total_reward)], + # ["Start profit(unit)", "{0:.2f}".format(1.)], + # ["End profit(unit)", "{0:.3f}".format(self._total_profit)], + # ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], + # ["Sharp log", "{0:.3f}".format(sharp_log)], + # # ["Sortino ratio", "{0:.2f}".format(0) + '%'], + # ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] + # ] + # tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") + # print(tabulation) - result = { - "Start": "{0:.2f}".format(1.), - "End": "{0:.2f}".format(self._total_profit), - "Sharp": "{0:.3f}".format(sharp_ratio), - "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) - } - return result + # result = { + # "Start": "{0:.2f}".format(1.), + # "End": "{0:.2f}".format(self._total_profit), + # "Sharp": "{0:.3f}".format(sharp_ratio), + # "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) + # } + # return result - def close(self): - plt.close() + # def close(self): + # plt.close() def get_sharpe_ratio(self): return mean_over_std(self.get_portfolio_log_returns()) - def save_rendering(self, filepath): - plt.savefig(filepath) + # def save_rendering(self, filepath): + # plt.savefig(filepath) - def pause_rendering(self): - plt.show() + # def pause_rendering(self): + # plt.show() def _calculate_reward(self, action): # rw = self.transaction_profit_reward(action) #rw = self.reward_rr_profit_config(action) - rw = self.reward_rr_profit_config_v2(action) + #rw = self.reward_rr_profit_config(action) # main + #rw = self.profit_only_when_close_reward(action) + rw = self.profit_only_when_close_reward_aim(action) return rw def _update_profit(self, action): #if self._is_trade(action) or self._done: - if self._is_trade_v2(action) or self._done: + if self._is_trade(action) or self._done: pnl = self.get_unrealized_profit() if self._position == Positions.Long: @@ -485,7 +460,7 @@ class DEnv(gym.Env): if self._position == Positions.Long: current_price = self.prices.iloc[self._current_tick].open #if action == Actions.Short.value or action == Actions.Neutral.value: - if action == Actions_v2.Short_buy.value or action == Actions_v2.Neutral.value: + if action == Actions.Short_buy.value or action == Actions.Neutral.value: current_price = self.add_sell_fee(current_price) previous_price = self.prices.iloc[self._current_tick - 1].open @@ -500,7 +475,7 @@ class DEnv(gym.Env): if self._position == Positions.Short: current_price = self.prices.iloc[self._current_tick].open #if action == Actions.Long.value or action == Actions.Neutral.value: - if action == Actions_v2.Long_buy.value or action == Actions_v2.Neutral.value: + if action == Actions.Long_buy.value or action == Actions.Neutral.value: current_price = self.add_buy_fee(current_price) previous_price = self.prices.iloc[self._current_tick - 1].open @@ -574,8 +549,57 @@ class DEnv(gym.Env): return np.clip(rw, 0, 1) + def profit_only_when_close_reward(self, action): - def reward_rr_profit_config_v2(self, action): + if self._last_trade_tick == None: + return 0. + + # close long + if action == Actions.Long_sell.value and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + # close short + if action == Actions.Short_buy.value and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + return 0. + + def profit_only_when_close_reward_aim(self, action): + + if self._last_trade_tick == None: + return 0. + + # close long + if action == Actions.Long_sell.value and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + if action == Actions.Long_sell.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + + # close short + if action == Actions.Short_buy.value and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + if action == Actions.Short_buy.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + + return 0. + + def reward_rr_profit_config(self, action): rw = 0. pt_1 = self.current_price() @@ -587,61 +611,61 @@ class DEnv(gym.Env): pt_1 = self.add_sell_fee(self.current_price()) po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - if action == Actions_v2.Short_buy.value: + if action == Actions.Short_buy.value: if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 2 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: - rw = 10 * 1 * 1 + rw = 15 + elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < self.profit_aim * self.rr: + rw = -1 elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 + rw = -10 elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 + rw = -15 - if action == Actions_v2.Long_sell.value: + if action == Actions.Long_sell.value: if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 5 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: - rw = 10 * 1 * 3 + rw = 20 + elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < self.profit_aim * self.rr: + rw = -1 elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 + rw = -15 elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 + rw = -25 - if action == Actions_v2.Neutral.value: - if self.close_trade_profit[-1] > 0: - rw = 2 + if action == Actions.Neutral.value: + if self.close_trade_profit[-1] > 0.005: + rw = 0 elif self.close_trade_profit[-1] < 0: - rw = 2 * -1 + rw = 0 # short if self._position == Positions.Short: pt_1 = self.add_sell_fee(self.current_price()) po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - if action == Actions_v2.Long_buy.value: + if action == Actions.Long_buy.value: if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 2 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 1 * 1 + rw = 15 + elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = -1 elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 + rw = -10 elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 + rw =- -25 - if action == Actions_v2.Short_sell.value: + if action == Actions.Short_sell.value: if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 5 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 1 * 3 + rw = 20 + elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = -1 elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 + rw = -15 elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 + rw = -25 - if action == Actions_v2.Neutral.value: - if self.close_trade_profit[-1] > 0: - rw = 2 + if action == Actions.Neutral.value: + if self.close_trade_profit[-1] > 0.005: + rw = 0 elif self.close_trade_profit[-1] < 0: - rw = 2 * -1 + rw = 0 return np.clip(rw, 0, 1) diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py deleted file mode 100644 index ac91cd200..000000000 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py +++ /dev/null @@ -1,645 +0,0 @@ -import gym -from gym import spaces -from gym.utils import seeding -from enum import Enum -from sklearn.decomposition import PCA, KernelPCA -import random -import numpy as np -import pandas as pd -from collections import deque -import matplotlib.pylab as plt -from typing import Dict, List, Tuple, Type, Optional, Any, Union, Callable -import logging - -logger = logging.getLogger(__name__) - -# from bokeh.io import output_notebook -# from bokeh.plotting import figure, show -# from bokeh.models import ( -# CustomJS, -# ColumnDataSource, -# NumeralTickFormatter, -# Span, -# HoverTool, -# Range1d, -# DatetimeTickFormatter, -# Scatter, -# Label, LabelSet -# ) - -class Actions(Enum): - Short = 0 - Long = 1 - Neutral = 2 - -class Actions_v2(Enum): - Neutral = 0 - Long_buy = 1 - Long_sell = 2 - Short_buy = 3 - Short_sell = 4 - - -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 - - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long - -def mean_over_std(x): - std = np.std(x, ddof=1) - mean = np.mean(x) - return mean / std if std > 0 else 0 - -class DEnv(gym.Env): - - metadata = {'render.modes': ['human']} - - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): - assert df.ndim == 2 - - self.seed() - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] - - self.fee=0.0015 - - # # spaces - self.shape = (window_size, self.signal_features.shape[1]) - self.action_space = spaces.Discrete(len(Actions_v2)) - self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick = self.window_size - self._end_tick = len(self.prices) - 1 - self._done = None - self._current_tick = None - self._last_trade_tick = None - self._position = Positions.Neutral - self._position_history = None - self.total_reward = None - self._total_profit = None - self._first_rendering = None - self.history = None - self.trade_history = [] - - # self.A_t, self.B_t = 0.000639, 0.00001954 - self.r_t_change = 0. - - self.returns_report = [] - - - def seed(self, seed=None): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - - def reset(self): - - self._done = False - - if self.starting_point == True: - self._position_history = (self._start_tick* [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - #self._last_trade_tick = self._current_tick - 1 - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self._first_rendering = True - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - self.r_t_change = 0. - - self.returns_report = [] - - return self._get_observation() - - - def step(self, action): - self._done = False - self._current_tick += 1 - - if self._current_tick == self._end_tick: - self._done = True - - self.update_portfolio_log_returns(action) - - self._update_profit(action) - step_reward = self._calculate_reward(action) - self.total_reward += step_reward - - - - - - trade_type = None - if self.is_tradesignal_v2(action): # exclude 3 case not trade - # Update position - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - - temp_position = self._position - if action == Actions_v2.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions_v2.Long_buy.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions_v2.Short_buy.value: - self._position = Positions.Short - trade_type = "short" - elif action == Actions_v2.Long_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions_v2.Short_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type != None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick = self._current_tick, - total_reward = self.total_reward, - total_profit = self._total_profit, - position = self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info - - - def processState(self, state): - return state.to_numpy() - - def convert_mlp_Policy(self, obs_): - pass - - def _get_observation(self): - return self.signal_features[(self._current_tick - self.window_size):self._current_tick] - - - def get_unrealized_profit(self): - - if self._last_trade_tick == None: - return 0. - - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price)/last_trade_price - elif self._position == Positions.Long: - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price)/last_trade_price - else: - return 0. - - - def is_tradesignal(self, action): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) - or (action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def is_tradesignal_v2(self, action): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions_v2.Neutral.value and self._position == Positions.Neutral) or - (action == Actions_v2.Short_buy.value and self._position == Positions.Short) or - (action == Actions_v2.Short_sell.value and self._position == Positions.Short) or - (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or - (action == Actions_v2.Short_sell.value and self._position == Positions.Long) or - - (action == Actions_v2.Long_buy.value and self._position == Positions.Long) or - (action == Actions_v2.Long_sell.value and self._position == Positions.Long) or - (action == Actions_v2.Long_buy.value and self._position == Positions.Short) or - (action == Actions_v2.Long_sell.value and self._position == Positions.Short)) - - - - def _is_trade(self, action: Actions): - return ((action == Actions.Long.value and self._position == Positions.Short) or - (action == Actions.Short.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) - ) - - def _is_trade_v2(self, action: Actions_v2): - return ((action == Actions_v2.Long_buy.value and self._position == Positions.Short) or - (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or - (action == Actions_v2.Neutral.value and self._position == Positions.Long) or - (action == Actions_v2.Neutral.value and self._position == Positions.Short) or - - (action == Actions_v2.Neutral.Short_sell and self._position == Positions.Long) or - (action == Actions_v2.Neutral.Long_sell and self._position == Positions.Short) - ) - - - def is_hold(self, action): - return ((action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def is_hold_v2(self, action): - return ((action == Actions_v2.Short_buy.value and self._position == Positions.Short) - or (action == Actions_v2.Long_buy.value and self._position == Positions.Long)) - - - def add_buy_fee(self, price): - return price * (1 + self.fee) - - def add_sell_fee(self, price): - return price / (1 + self.fee) - - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} - - for key, value in info.items(): - self.history[key].append(value) - - - def render(self, mode='human'): - - def _plot_position(position, tick): - color = None - if position == Positions.Short: - color = 'red' - elif position == Positions.Long: - color = 'green' - if color: - plt.scatter(tick, self.prices.loc[tick].open, color=color) - - if self._first_rendering: - self._first_rendering = False - plt.cla() - plt.plot(self.prices) - start_position = self._position_history[self._start_tick] - _plot_position(start_position, self._start_tick) - - plt.cla() - plt.plot(self.prices) - _plot_position(self._position, self._current_tick) - - plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) - plt.pause(0.01) - - - def render_all(self): - plt.figure() - window_ticks = np.arange(len(self._position_history)) - plt.plot(self.prices['open'], alpha=0.5) - - short_ticks = [] - long_ticks = [] - neutral_ticks = [] - for i, tick in enumerate(window_ticks): - if self._position_history[i] == Positions.Short: - short_ticks.append(tick - 1) - elif self._position_history[i] == Positions.Long: - long_ticks.append(tick - 1) - elif self._position_history[i] == Positions.Neutral: - neutral_ticks.append(tick - 1) - - plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, - 'o', color='grey', ms=3, alpha=0.1) - plt.plot(short_ticks, self.prices.loc[short_ticks].open, - 'o', color='r', ms=3, alpha=0.8) - plt.plot(long_ticks, self.prices.loc[long_ticks].open, - 'o', color='g', ms=3, alpha=0.8) - - plt.suptitle("Generalising") - fig = plt.gcf() - fig.set_size_inches(15, 10) - - - - - def close_trade_report(self): - small_trade = 0 - positive_big_trade = 0 - negative_big_trade = 0 - small_profit = 0.003 - for i in self.close_trade_profit: - if i < small_profit and i > -small_profit: - small_trade+=1 - elif i > small_profit: - positive_big_trade += 1 - elif i < -small_profit: - negative_big_trade += 1 - print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") - - - def report(self): - - # get total trade - long_trade = 0 - short_trade = 0 - neutral_trade = 0 - for trade in self.trade_history: - if trade['type'] == 'long': - long_trade += 1 - - elif trade['type'] == 'short': - short_trade += 1 - else: - neutral_trade += 1 - - negative_trade = 0 - positive_trade = 0 - for tr in self.close_trade_profit: - if tr < 0.: - negative_trade += 1 - - if tr > 0.: - positive_trade += 1 - - total_trade_lr = negative_trade+positive_trade - - - total_trade = long_trade + short_trade - sharp_ratio = self.sharpe_ratio() - sharp_log = self.get_sharpe_ratio() - - from tabulate import tabulate - - headers = ["Performance", ""] - performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], - ["Total reward", "{0:.3f}".format(self.total_reward)], - ["Start profit(unit)", "{0:.2f}".format(1.)], - ["End profit(unit)", "{0:.3f}".format(self._total_profit)], - ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], - ["Sharp log", "{0:.3f}".format(sharp_log)], - # ["Sortino ratio", "{0:.2f}".format(0) + '%'], - ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] - ] - tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") - print(tabulation) - - result = { - "Start": "{0:.2f}".format(1.), - "End": "{0:.2f}".format(self._total_profit), - "Sharp": "{0:.3f}".format(sharp_ratio), - "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) - } - return result - - def close(self): - plt.close() - - def get_sharpe_ratio(self): - return mean_over_std(self.get_portfolio_log_returns()) - - - def save_rendering(self, filepath): - plt.savefig(filepath) - - - def pause_rendering(self): - plt.show() - - - def _calculate_reward(self, action): - # rw = self.transaction_profit_reward(action) - #rw = self.reward_rr_profit_config(action) - rw = self.reward_rr_profit_config_v2(action) - return rw - - - def _update_profit(self, action): - #if self._is_trade(action) or self._done: - if self._is_trade_v2(action) or self._done: - pnl = self.get_unrealized_profit() - - if self._position == Positions.Long: - self._total_profit = self._total_profit + self._total_profit*pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - if self._position == Positions.Short: - self._total_profit = self._total_profit + self._total_profit*pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - - def most_recent_return(self, action): - """ - We support Long, Neutral and Short positions. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - #if action == Actions.Short.value or action == Actions.Neutral.value: - if action == Actions_v2.Short_buy.value or action == Actions_v2.Neutral.value: - current_price = self.add_sell_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_buy_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - #if action == Actions.Long.value or action == Actions.Neutral.value: - if action == Actions_v2.Long_buy.value or action == Actions_v2.Neutral.value: - current_price = self.add_buy_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_sell_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - - - def get_trading_log_return(self): - return self.portfolio_log_returns[self._start_tick:] - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - - def current_price(self) -> float: - return self.prices.iloc[self._current_tick].open - - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick-1].open - - - - def sharpe_ratio(self): - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward - - def get_bnh_log_return(self): - return np.diff(np.log(self.prices['open'][self._start_tick:])) - - - def transaction_profit_reward(self, action): - rw = 0. - - pt = self.prev_price() - pt_1 = self.current_price() - - - if self._position == Positions.Long: - a_t = 1 - elif self._position == Positions.Short: - a_t = -1 - else: - a_t = 0 - - # close long - if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - rw = a_t*(pt_1 - po)/po - #rw = rw*2 - # close short - elif (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: - pt_1 = self.add_buy_fee(self.current_price()) - po = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - rw = a_t*(pt_1 - po)/po - #rw = rw*2 - else: - rw = a_t*(pt_1 - pt)/pt - - return np.clip(rw, 0, 1) - - - - def reward_rr_profit_config_v2(self, action): - rw = 0. - - pt_1 = self.current_price() - - - if len(self.close_trade_profit) > 0: - # long - if self._position == Positions.Long: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - if action == Actions_v2.Short_buy.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 2 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: - rw = 10 * 1 * 1 - elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 - - if action == Actions_v2.Long_sell.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 5 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: - rw = 10 * 1 * 3 - elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 - - if action == Actions_v2.Neutral.value: - if self.close_trade_profit[-1] > 0: - rw = 2 - elif self.close_trade_profit[-1] < 0: - rw = 2 * -1 - - # short - if self._position == Positions.Short: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - if action == Actions_v2.Long_buy.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 2 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 1 * 1 - elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 - - if action == Actions_v2.Short_sell.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 5 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 1 * 3 - elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 - - if action == Actions_v2.Neutral.value: - if self.close_trade_profit[-1] > 0: - rw = 2 - elif self.close_trade_profit[-1] < 0: - rw = 2 * -1 - - return np.clip(rw, 0, 1) \ No newline at end of file diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py index 5783baba8..60e29d3ab 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearning.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearning.py @@ -4,29 +4,23 @@ from typing import Any, Dict, Tuple import numpy as np import numpy.typing as npt import pandas as pd +import torch as th from pandas import DataFrame +from stable_baselines3 import PPO +from stable_baselines3.common.buffers import ReplayBuffer +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.monitor import Monitor +from stable_baselines3.common.vec_env import SubprocVecEnv from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel -from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent -from freqtrade.freqai.prediction_models.RL.RLPrediction_agent_v2 import TDQN -#from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading -from freqtrade.freqai.prediction_models.RL.RLPrediction_env import DEnv +from freqtrade.freqai.prediction_models.RL.RLPrediction_agent_TDQN import TDQN +from freqtrade.freqai.prediction_models.RL.RLPrediction_env_TDQN_5ac import DEnv +#from freqtrade.freqai.prediction_models.RL.RLPrediction_env_TDQN_3ac import DEnv from freqtrade.persistence import Trade -from stable_baselines3.common.vec_env import SubprocVecEnv -from stable_baselines3.common.monitor import Monitor - -import torch as th -from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, StopTrainingOnRewardThreshold -from stable_baselines3.common.buffers import ReplayBuffer -from stable_baselines3 import PPO - - - logger = logging.getLogger(__name__) - class ReinforcementLearningModel(IFreqaiModel): """ User created Reinforcement Learning Model prediction model. @@ -87,30 +81,22 @@ class ReinforcementLearningModel(IFreqaiModel): # # train_labels = data_dictionary["train_labels"] # test_df = data_dictionary["test_features"] # # test_labels = data_dictionary["test_labels"] - # # sep = '/' # # coin = pair.split(sep, 1)[0] # # price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] # # price.reset_index(inplace=True, drop=True) # # price = price.to_frame() # price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - # price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) - + # price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) # #train_env = GymAnytrading(train_df, price, self.CONV_WIDTH) - # agent_params = self.freqai_info['model_training_parameters'] # reward_params = self.freqai_info['model_reward_parameters'] - - # train_env = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) # #eval_env = DEnv(df=test_df, prices=price_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - # #env_instance = SubprocVecEnv([DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params)]) # #train_env.reset() # #eval_env.reset() - # # model - # #policy_kwargs = dict(net_arch=[512, 512, 512]) # policy_kwargs = dict(activation_fn=th.nn.Tanh, # net_arch=[256, 256, 256]) @@ -124,27 +110,22 @@ class ReinforcementLearningModel(IFreqaiModel): # tb_log_name=model_name, # model_kwargs=agent_params, # train_df=train_df, - # test_df=test_df, - # price=price, - # price_test=price_test, + # test_df=test_df, + # price=price, + # price_test=price_test, # window_size=self.CONV_WIDTH) - - # # best_model = eval_agent.train_model(model=model, # # tb_log_name=model_name, # # model_kwargs=agent_params, # # eval=eval_env) - - # # TDQN # # model_name = 'TDQN' # # model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log='./tensorboard_log/', # # learning_rate=agent_params["learning_rate"], gamma=0.9, - # # target_update_interval=5000, buffer_size=50000, + # # target_update_interval=5000, buffer_size=50000, # # exploration_initial_eps=1, exploration_final_eps=0.1, # # replay_buffer_class=ReplayBuffer # # ) - # # trained_model = agent.train_model(model=model, # # tb_log_name=model_name, # # model_kwargs=agent_params) @@ -157,11 +138,13 @@ class ReinforcementLearningModel(IFreqaiModel): reward_params = self.freqai_info['model_reward_parameters'] train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] + eval_freq = agent_params["eval_cycles"] * len(test_df) + total_timesteps = agent_params["train_cycles"] * len(train_df) # price data for model training and evaluation price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) - + price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) + # environments train_env = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) eval = DEnv(df=test_df, prices=price_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) @@ -173,19 +156,17 @@ class ReinforcementLearningModel(IFreqaiModel): path = self.dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/{agent_type}/logs/", eval_freq=10000, + log_path=f"{path}/{agent_type}/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) - # model arch - policy_kwargs = dict(activation_fn=th.nn.Tanh, - net_arch=[512, 512, 512]) - + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[256, 256, 128]) if agent_type == 'tdqn': model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/{agent_type}/tensorboard/", learning_rate=0.00025, gamma=0.9, - target_update_interval=5000, buffer_size=50000, + target_update_interval=5000, buffer_size=50000, exploration_initial_eps=1, exploration_final_eps=0.1, replay_buffer_class=ReplayBuffer ) @@ -193,9 +174,9 @@ class ReinforcementLearningModel(IFreqaiModel): model = PPO('MultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/{agent_type}/tensorboard/", learning_rate=0.00025, gamma=0.9 ) - + model.learn( - total_timesteps=agent_params["total_timesteps"], + total_timesteps=int(total_timesteps), callback=eval_callback ) From 70b25461f06b6a555d2cc21c3910834514df996a Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Sun, 14 Aug 2022 20:47:58 +0700 Subject: [PATCH 009/421] add rl dependency --- requirements-freqai.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/requirements-freqai.txt b/requirements-freqai.txt index 26e4617af..8d8bb03c5 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -6,3 +6,6 @@ scikit-learn==1.1.2 joblib==1.1.0 catboost==1.0.6; platform_machine != 'aarch64' lightgbm==3.3.2 +torch==1.12.1 +stable-baselines3==1.5.0 +gym==0.21.0 \ No newline at end of file From ecd1f55abc45c677cb688040c01ddfc255dab40d Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Sun, 14 Aug 2022 21:26:34 +0700 Subject: [PATCH 010/421] add rl module --- freqtrade/freqai/prediction_models/ReinforcementLearning.py | 2 +- freqtrade/freqai/rl/BaseRLAgent.py | 0 freqtrade/freqai/rl/__init__.py | 0 3 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 freqtrade/freqai/rl/BaseRLAgent.py create mode 100644 freqtrade/freqai/rl/__init__.py diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py index 60e29d3ab..6ced4749e 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearning.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearning.py @@ -21,7 +21,7 @@ from freqtrade.persistence import Trade logger = logging.getLogger(__name__) -class ReinforcementLearningModel(IFreqaiModel): +class ReinforcementLearning(IFreqaiModel): """ User created Reinforcement Learning Model prediction model. """ diff --git a/freqtrade/freqai/rl/BaseRLAgent.py b/freqtrade/freqai/rl/BaseRLAgent.py new file mode 100644 index 000000000..e69de29bb diff --git a/freqtrade/freqai/rl/__init__.py b/freqtrade/freqai/rl/__init__.py new file mode 100644 index 000000000..e69de29bb From 91683e1dcaa0b966b813d5e418834136828b0cad Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 10:26:44 +0200 Subject: [PATCH 011/421] restructure RL so that user can customize environment --- freqtrade/freqai/RL/BaseRLEnv.py | 318 +++++++++ .../RL/BaseReinforcementLearningModel.py | 230 ++++++ .../TDQNagent.py} | 29 +- freqtrade/freqai/{rl => RL}/__init__.py | 0 .../RL/RLPrediction_agent.py | 139 ---- .../RL/RLPrediction_env_TDQN_3ac.py | 513 ------------- .../RL/RLPrediction_env_TDQN_5ac.py | 671 ------------------ .../freqai/prediction_models/RL/config.py | 37 - .../prediction_models/RLPredictionModel.py | 253 ------- .../ReinforcementLearning.py | 273 ------- .../ReinforcementLearningPPO.py | 155 ++++ .../ReinforcementLearningTDQN.py | 168 +++++ freqtrade/freqai/rl/BaseRLAgent.py | 0 13 files changed, 882 insertions(+), 1904 deletions(-) create mode 100644 freqtrade/freqai/RL/BaseRLEnv.py create mode 100644 freqtrade/freqai/RL/BaseReinforcementLearningModel.py rename freqtrade/freqai/{prediction_models/RL/RLPrediction_agent_TDQN.py => RL/TDQNagent.py} (93%) rename freqtrade/freqai/{rl => RL}/__init__.py (100%) delete mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py delete mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py delete mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py delete mode 100644 freqtrade/freqai/prediction_models/RL/config.py delete mode 100644 freqtrade/freqai/prediction_models/RLPredictionModel.py delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearning.py create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py delete mode 100644 freqtrade/freqai/rl/BaseRLAgent.py diff --git a/freqtrade/freqai/RL/BaseRLEnv.py b/freqtrade/freqai/RL/BaseRLEnv.py new file mode 100644 index 000000000..607262acd --- /dev/null +++ b/freqtrade/freqai/RL/BaseRLEnv.py @@ -0,0 +1,318 @@ +import logging +from enum import Enum +# from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union + +import gym +import numpy as np +from gym import spaces +from gym.utils import seeding + +logger = logging.getLogger(__name__) + + +class Actions(Enum): + Short = 0 + Long = 1 + Neutral = 2 + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + + +def mean_over_std(x): + std = np.std(x, ddof=1) + mean = np.mean(x) + return mean / std if std > 0 else 0 + + +class BaseRLEnv(gym.Env): + + metadata = {'render.modes': ['human']} + + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + assert df.ndim == 2 + + self.seed() + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee = 0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1]) + self.action_space = spaces.Discrete(len(Actions)) + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick = self.window_size + self._end_tick = len(self.prices) - 1 + self._done = None + self._current_tick = None + self._last_trade_tick = None + self._position = Positions.Neutral + self._position_history = None + self.total_reward = None + self._total_profit = None + self._first_rendering = None + self.history = None + self.trade_history = [] + + self.r_t_change = 0. + + self.returns_report = [] + + def seed(self, seed: int = 1): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + + self._done = False + + if self.starting_point is True: + self._position_history = (self._start_tick * [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self._first_rendering = True + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self.r_t_change = 0. + + self.returns_report = [] + + return self._get_observation() + + def step(self, action: int): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self.calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): # exclude 3 case not trade + # Update position + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick=self._current_tick, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + def _get_observation(self): + return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + def get_unrealized_profit(self): + + if self._last_trade_tick is None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price) / last_trade_price + else: + return 0. + + def is_tradesignal(self, action: int): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) + or (action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def _is_trade(self, action: Actions): + return ((action == Actions.Long.value and self._position == Positions.Short) or + (action == Actions.Short.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) + ) + + def is_hold(self, action): + return ((action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def add_buy_fee(self, price): + return price * (1 + self.fee) + + def add_sell_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + def get_sharpe_ratio(self): + return mean_over_std(self.get_portfolio_log_returns()) + + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if (action == Actions.Short.value or + action == Actions.Neutral.value) and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + # close short + if (action == Actions.Long.value or + action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + return 0. + + def _update_profit(self, action): + if self._is_trade(action) or self._done: + pnl = self.get_unrealized_profit() + + if self._position == Positions.Long: + self._total_profit = self._total_profit + self._total_profit * pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + if self._position == Positions.Short: + self._total_profit = self._total_profit + self._total_profit * pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + def most_recent_return(self, action: int): + """ + We support Long, Neutral and Short positions. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + if action == Actions.Short.value or action == Actions.Neutral.value: + current_price = self.add_sell_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_buy_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + if action == Actions.Long.value or action == Actions.Neutral.value: + current_price = self.add_buy_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_sell_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick - 1].open + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py new file mode 100644 index 000000000..accddc94d --- /dev/null +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -0,0 +1,230 @@ +import logging +from typing import Any, Dict, Tuple + +import numpy as np +import numpy.typing as npt +import pandas as pd +from pandas import DataFrame +from abc import abstractmethod +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.freqai_interface import IFreqaiModel +from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv, Actions, Positions +from freqtrade.persistence import Trade + +logger = logging.getLogger(__name__) + + +class BaseReinforcementLearningModel(IFreqaiModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def train( + self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen + ) -> Any: + """ + Filter the training data and train a model to it. Train makes heavy use of the datakitchen + for storing, saving, loading, and analyzing the data. + :param unfiltered_dataframe: Full dataframe for the current training period + :param metadata: pair metadata from strategy. + :returns: + :model: Trained model which can be used to inference (self.predict) + """ + + logger.info("--------------------Starting training " f"{pair} --------------------") + + # filter the features requested by user in the configuration file and elegantly handle NaNs + features_filtered, labels_filtered = dk.filter_features( + unfiltered_dataframe, + dk.training_features_list, + dk.label_list, + training_filter=True, + ) + + data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( + features_filtered, labels_filtered) + dk.fit_labels() # useless for now, but just satiating append methods + + # normalize all data based on train_dataset only + data_dictionary = dk.normalize_data(data_dictionary) + + # optional additional data cleaning/analysis + self.data_cleaning_train(dk) + + logger.info( + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + ) + logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') + + model = self.fit(data_dictionary, pair) + + if pair not in self.dd.historic_predictions: + self.set_initial_historic_predictions( + data_dictionary['train_features'], model, dk, pair) + + self.dd.save_historic_predictions_to_disk() + + logger.info(f"--------------------done training {pair}--------------------") + + return model + + @abstractmethod + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + """ + Agent customizations and abstract Reinforcement Learning customizations + go in here. Abstract method, so this function must be overridden by + user class. + """ + + return + + def get_state_info(self, pair): + open_trades = Trade.get_trades(trade_filter=Trade.is_open.is_(True)) + market_side = 0.5 + current_profit = 0 + for trade in open_trades: + if trade.pair == pair: + current_value = trade.open_trade_value + openrate = trade.open_rate + if 'long' in trade.enter_tag: + market_side = 1 + else: + market_side = 0 + current_profit = current_value / openrate - 1 + + total_profit = 0 + closed_trades = Trade.get_trades( + trade_filter=[Trade.is_open.is_(False), Trade.pair == pair]) + for trade in closed_trades: + total_profit += trade.close_profit + + return market_side, current_profit, total_profit + + def predict( + self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False + ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: + """ + Filter the prediction features data and predict with it. + :param: unfiltered_dataframe: Full dataframe for the current backtest period. + :return: + :pred_df: dataframe containing the predictions + :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove + data (NaNs) or felt uncertain about data (PCA and DI index) + """ + + dk.find_features(unfiltered_dataframe) + filtered_dataframe, _ = dk.filter_features( + unfiltered_dataframe, dk.training_features_list, training_filter=False + ) + filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) + dk.data_dictionary["prediction_features"] = filtered_dataframe + + # optional additional data cleaning/analysis + self.data_cleaning_predict(dk, filtered_dataframe) + + pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) + pred_df.fillna(0, inplace=True) + + return (pred_df, dk.do_predict) + + def rl_model_predict(self, dataframe: DataFrame, + dk: FreqaiDataKitchen, model: Any) -> DataFrame: + + output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) + + def _predict(window): + observations = dataframe.iloc[window.index] + res, _ = model.predict(observations, deterministic=True) + return res + + output = output.rolling(window=self.CONV_WIDTH).apply(_predict) + + return output + + def set_initial_historic_predictions( + self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str + ) -> None: + + pred_df = self.rl_model_predict(df, dk, model) + pred_df.fillna(0, inplace=True) + self.dd.historic_predictions[pair] = pred_df + hist_preds_df = self.dd.historic_predictions[pair] + + for label in hist_preds_df.columns: + if hist_preds_df[label].dtype == object: + continue + hist_preds_df[f'{label}_mean'] = 0 + hist_preds_df[f'{label}_std'] = 0 + + hist_preds_df['do_predict'] = 0 + + if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: + hist_preds_df['DI_values'] = 0 + + for return_str in dk.data['extra_returns_per_train']: + hist_preds_df[return_str] = 0 + + +class MyRLEnv(BaseRLEnv): + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self._calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): # exclude 3 case not trade + # Update position + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick=self._current_tick, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_TDQN.py b/freqtrade/freqai/RL/TDQNagent.py similarity index 93% rename from freqtrade/freqai/prediction_models/RL/RLPrediction_agent_TDQN.py rename to freqtrade/freqai/RL/TDQNagent.py index 0aa3512a1..584f6a8ef 100644 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_TDQN.py +++ b/freqtrade/freqai/RL/TDQNagent.py @@ -6,11 +6,10 @@ import torch as th from stable_baselines3 import DQN from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.policies import BasePolicy -from stable_baselines3.common.torch_layers import (BaseFeaturesExtractor, CombinedExtractor, +from stable_baselines3.common.torch_layers import (BaseFeaturesExtractor, FlattenExtractor) from stable_baselines3.common.type_aliases import GymEnv, Schedule -#from stable_baselines3.common.policies import register_policy -from stable_baselines3.dqn.policies import (CnnPolicy, DQNPolicy, MlpPolicy, MultiInputPolicy, +from stable_baselines3.dqn.policies import (CnnPolicy, DQNPolicy, MlpPolicy, QNetwork) from torch import nn @@ -47,16 +46,17 @@ def create_mlp_( ] return modules + class TDQNetwork(QNetwork): def __init__(self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - features_extractor: nn.Module, - features_dim: int, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - normalize_images: bool = True - ): + observation_space: gym.spaces.Space, + action_space: gym.spaces.Space, + features_extractor: nn.Module, + features_dim: int, + net_arch: Optional[List[int]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + normalize_images: bool = True + ): super().__init__( observation_space=observation_space, action_space=action_space, @@ -211,10 +211,3 @@ class TDQN(DQN): device=device, _init_setup_model=_init_setup_model ) - - - -# try: -# register_policy("TMultiInputPolicy", TMultiInputPolicy) -# except: -# print("already registered") diff --git a/freqtrade/freqai/rl/__init__.py b/freqtrade/freqai/RL/__init__.py similarity index 100% rename from freqtrade/freqai/rl/__init__.py rename to freqtrade/freqai/RL/__init__.py diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py deleted file mode 100644 index 26b31f6e9..000000000 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py +++ /dev/null @@ -1,139 +0,0 @@ -# common library - -import gym -import numpy as np -from stable_baselines3 import A2C, DDPG, PPO, SAC, TD3 -from stable_baselines3.common.callbacks import (BaseCallback, CallbackList, CheckpointCallback, - EvalCallback, StopTrainingOnRewardThreshold) -from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise - -from freqtrade.freqai.prediction_models.RL import config -#from freqtrade.freqai.prediction_models.RL.RLPrediction_agent_v2 import TDQN -from freqtrade.freqai.prediction_models.RL.RLPrediction_env import DEnv - - -# from stable_baselines3.common.vec_env import DummyVecEnv - -# from meta.env_stock_trading.env_stock_trading import StockTradingEnv - -# RL models from stable-baselines - - -MODELS = {"a2c": A2C, "ddpg": DDPG, "td3": TD3, "sac": SAC, "ppo": PPO} - - -MODEL_KWARGS = {x: config.__dict__[f"{x.upper()}_PARAMS"] for x in MODELS.keys()} - - -NOISE = { - "normal": NormalActionNoise, - "ornstein_uhlenbeck": OrnsteinUhlenbeckActionNoise, -} - - -class TensorboardCallback(BaseCallback): - """ - Custom callback for plotting additional values in tensorboard. - """ - - def __init__(self, verbose=0): - super(TensorboardCallback, self).__init__(verbose) - - def _on_step(self) -> bool: - try: - self.logger.record(key="train/reward", value=self.locals["rewards"][0]) - except BaseException: - self.logger.record(key="train/reward", value=self.locals["reward"][0]) - return True - - -class RLPrediction_agent: - """Provides implementations for DRL algorithms - Based on: - https://github.com/AI4Finance-Foundation/FinRL-Meta/blob/master/agents/stablebaselines3_models.py - Attributes - ---------- - env: gym environment class - user-defined class - - Methods - ------- - get_model() - setup DRL algorithms - train_model() - train DRL algorithms in a train dataset - and output the trained model - DRL_prediction() - make a prediction in a test dataset and get results - """ - - def __init__(self, env): - self.env = env - - def get_model( - self, - model_name, - policy="MlpPolicy", - policy_kwargs=None, - model_kwargs=None, - reward_kwargs=None, - #total_timesteps=None, - verbose=1, - seed=None - ): - if model_name not in MODELS: - raise NotImplementedError("NotImplementedError") - - if model_kwargs is None: - model_kwargs = MODEL_KWARGS[model_name] - - if "action_noise" in model_kwargs: - n_actions = self.env.action_space.shape[-1] - model_kwargs["action_noise"] = NOISE[model_kwargs["action_noise"]]( - mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions) - ) - print(model_kwargs) - model = MODELS[model_name]( - policy=policy, - env=self.env, - tensorboard_log=f"{config.TENSORBOARD_LOG_DIR}/{model_name}", - verbose=verbose, - policy_kwargs=policy_kwargs, - #model_kwargs=model_kwargs, - #total_timesteps=model_kwargs["total_timesteps"], - seed=seed - #**model_kwargs, - ) - - - - - return model - - def train_model(self, model, tb_log_name, model_kwargs, train_df, test_df, price, price_test, window_size): - - - agent_params = self.freqai_info['model_training_parameters'] - reward_params = self.freqai_info['model_reward_parameters'] - train_env = DEnv(df=train_df, prices=price, window_size=window_size, reward_kwargs=reward_params) - eval_env = DEnv(df=test_df, prices=price_test, window_size=window_size, reward_kwargs=reward_params) - - # checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./logs/', - # name_prefix='rl_model') - - checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./logs/') - - eval_callback = EvalCallback(eval_env, best_model_save_path='./logs/best_model', log_path='./logs/results', eval_freq=500) - #callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=-200, verbose=1) - - # Create the callback list - callback = CallbackList([checkpoint_callback, eval_callback]) - - - model = model.learn( - total_timesteps=model_kwargs["total_timesteps"], - tb_log_name=tb_log_name, - callback=callback, - #callback=TensorboardCallback(), - ) - return model diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py deleted file mode 100644 index 184ec57ec..000000000 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py +++ /dev/null @@ -1,513 +0,0 @@ -import logging -import random -from collections import deque -from enum import Enum -from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union - -import gym -import matplotlib.pylab as plt -import numpy as np -import pandas as pd -from gym import spaces -from gym.utils import seeding - -logger = logging.getLogger(__name__) - -class Actions(Enum): - Short = 0 - Long = 1 - Neutral = 2 - - -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 - - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long - -def mean_over_std(x): - std = np.std(x, ddof=1) - mean = np.mean(x) - return mean / std if std > 0 else 0 - -class DEnv(gym.Env): - - metadata = {'render.modes': ['human']} - - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): - assert df.ndim == 2 - - self.seed() - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] - - self.fee=0.0015 - - # # spaces - self.shape = (window_size, self.signal_features.shape[1]) - self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick = self.window_size - self._end_tick = len(self.prices) - 1 - self._done = None - self._current_tick = None - self._last_trade_tick = None - self._position = Positions.Neutral - self._position_history = None - self.total_reward = None - self._total_profit = None - self._first_rendering = None - self.history = None - self.trade_history = [] - - # self.A_t, self.B_t = 0.000639, 0.00001954 - self.r_t_change = 0. - - self.returns_report = [] - - def seed(self, seed=None): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - def reset(self): - - self._done = False - - if self.starting_point == True: - self._position_history = (self._start_tick* [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - #self._last_trade_tick = self._current_tick - 1 - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self._first_rendering = True - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - self.r_t_change = 0. - - self.returns_report = [] - - return self._get_observation() - - def step(self, action): - self._done = False - self._current_tick += 1 - - if self._current_tick == self._end_tick: - self._done = True - - self.update_portfolio_log_returns(action) - - self._update_profit(action) - step_reward = self._calculate_reward(action) - self.total_reward += step_reward - - trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade - # Update position - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - temp_position = self._position - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short.value: - self._position = Positions.Short - trade_type = "short" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type != None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick = self._current_tick, - total_reward = self.total_reward, - total_profit = self._total_profit, - position = self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info - - # def processState(self, state): - # return state.to_numpy() - - # def convert_mlp_Policy(self, obs_): - # pass - - def _get_observation(self): - return self.signal_features[(self._current_tick - self.window_size):self._current_tick] - - def get_unrealized_profit(self): - - if self._last_trade_tick == None: - return 0. - - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price)/last_trade_price - elif self._position == Positions.Long: - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price)/last_trade_price - else: - return 0. - - def is_tradesignal(self, action): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) - or (action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def _is_trade(self, action: Actions): - return ((action == Actions.Long.value and self._position == Positions.Short) or - (action == Actions.Short.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) - ) - - def is_hold(self, action): - return ((action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def add_buy_fee(self, price): - return price * (1 + self.fee) - - def add_sell_fee(self, price): - return price / (1 + self.fee) - - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} - - for key, value in info.items(): - self.history[key].append(value) - - - # def render(self, mode='human'): - # def _plot_position(position, tick): - # color = None - # if position == Positions.Short: - # color = 'red' - # elif position == Positions.Long: - # color = 'green' - # if color: - # plt.scatter(tick, self.prices.loc[tick].open, color=color) - # if self._first_rendering: - # self._first_rendering = False - # plt.cla() - # plt.plot(self.prices) - # start_position = self._position_history[self._start_tick] - # _plot_position(start_position, self._start_tick) - # plt.cla() - # plt.plot(self.prices) - # _plot_position(self._position, self._current_tick) - # plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) - # plt.pause(0.01) - - # def render_all(self): - # plt.figure() - # window_ticks = np.arange(len(self._position_history)) - # plt.plot(self.prices['open'], alpha=0.5) - # short_ticks = [] - # long_ticks = [] - # neutral_ticks = [] - # for i, tick in enumerate(window_ticks): - # if self._position_history[i] == Positions.Short: - # short_ticks.append(tick - 1) - # elif self._position_history[i] == Positions.Long: - # long_ticks.append(tick - 1) - # elif self._position_history[i] == Positions.Neutral: - # neutral_ticks.append(tick - 1) - # plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, - # 'o', color='grey', ms=3, alpha=0.1) - # plt.plot(short_ticks, self.prices.loc[short_ticks].open, - # 'o', color='r', ms=3, alpha=0.8) - # plt.plot(long_ticks, self.prices.loc[long_ticks].open, - # 'o', color='g', ms=3, alpha=0.8) - # plt.suptitle("Generalising") - # fig = plt.gcf() - # fig.set_size_inches(15, 10) - - # def close_trade_report(self): - # small_trade = 0 - # positive_big_trade = 0 - # negative_big_trade = 0 - # small_profit = 0.003 - # for i in self.close_trade_profit: - # if i < small_profit and i > -small_profit: - # small_trade+=1 - # elif i > small_profit: - # positive_big_trade += 1 - # elif i < -small_profit: - # negative_big_trade += 1 - # print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") - - # def report(self): - # # get total trade - # long_trade = 0 - # short_trade = 0 - # neutral_trade = 0 - # for trade in self.trade_history: - # if trade['type'] == 'long': - # long_trade += 1 - # elif trade['type'] == 'short': - # short_trade += 1 - # else: - # neutral_trade += 1 - # negative_trade = 0 - # positive_trade = 0 - # for tr in self.close_trade_profit: - # if tr < 0.: - # negative_trade += 1 - # if tr > 0.: - # positive_trade += 1 - # total_trade_lr = negative_trade+positive_trade - # total_trade = long_trade + short_trade - # sharp_ratio = self.sharpe_ratio() - # sharp_log = self.get_sharpe_ratio() - # from tabulate import tabulate - # headers = ["Performance", ""] - # performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], - # ["Total reward", "{0:.3f}".format(self.total_reward)], - # ["Start profit(unit)", "{0:.2f}".format(1.)], - # ["End profit(unit)", "{0:.3f}".format(self._total_profit)], - # ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], - # ["Sharp log", "{0:.3f}".format(sharp_log)], - # # ["Sortino ratio", "{0:.2f}".format(0) + '%'], - # ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] - # ] - # tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") - # print(tabulation) - # result = { - # "Start": "{0:.2f}".format(1.), - # "End": "{0:.2f}".format(self._total_profit), - # "Sharp": "{0:.3f}".format(sharp_ratio), - # "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) - # } - # return result - - # def close(self): - # plt.close() - - def get_sharpe_ratio(self): - return mean_over_std(self.get_portfolio_log_returns()) - - # def save_rendering(self, filepath): - # plt.savefig(filepath) - - # def pause_rendering(self): - # plt.show() - - def _calculate_reward(self, action): - # rw = self.transaction_profit_reward(action) - #rw = self.reward_rr_profit_config(action) - rw = self.profit_only_when_close_reward(action) - #rw = self.profit_only_when_close_reward_aim(action) - return rw - - def _update_profit(self, action): - if self._is_trade(action) or self._done: - pnl = self.get_unrealized_profit() - - if self._position == Positions.Long: - self._total_profit = self._total_profit + self._total_profit*pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - if self._position == Positions.Short: - self._total_profit = self._total_profit + self._total_profit*pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - def most_recent_return(self, action): - """ - We support Long, Neutral and Short positions. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Short.value or action == Actions.Neutral.value: - current_price = self.add_sell_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_buy_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Long.value or action == Actions.Neutral.value: - current_price = self.add_buy_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_sell_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - - # def get_trading_log_return(self): - # return self.portfolio_log_returns[self._start_tick:] - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - - def current_price(self) -> float: - return self.prices.iloc[self._current_tick].open - - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick-1].open - - def sharpe_ratio(self): - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward - - # def get_bnh_log_return(self): - # return np.diff(np.log(self.prices['open'][self._start_tick:])) - - def transaction_profit_reward(self, action): - rw = 0. - - pt = self.prev_price() - pt_1 = self.current_price() - - - if self._position == Positions.Long: - a_t = 1 - elif self._position == Positions.Short: - a_t = -1 - else: - a_t = 0 - - # close long - if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - rw = a_t*(pt_1 - po)/po - #rw = rw*2 - # close short - elif (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: - pt_1 = self.add_buy_fee(self.current_price()) - po = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - rw = a_t*(pt_1 - po)/po - #rw = rw*2 - else: - rw = a_t*(pt_1 - pt)/pt - - return np.clip(rw, 0, 1) - - def profit_only_when_close_reward_aim(self, action): - - if self._last_trade_tick == None: - return 0. - - # close long - if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - - # close short - if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) - - return 0. - - def profit_only_when_close_reward(self, action): - - if self._last_trade_tick == None: - return 0. - - # close long - if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - # close short - if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - return 0. \ No newline at end of file diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py deleted file mode 100644 index 9b01579e8..000000000 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py +++ /dev/null @@ -1,671 +0,0 @@ -import logging -import random -from collections import deque -from enum import Enum -#from sklearn.decomposition import PCA, KernelPCA -from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union - -import gym -import matplotlib.pylab as plt -import numpy as np -import pandas as pd -from gym import spaces -from gym.utils import seeding - - -logger = logging.getLogger(__name__) - -# from bokeh.io import output_notebook -# from bokeh.plotting import figure, show -# from bokeh.models import ( -# CustomJS, -# ColumnDataSource, -# NumeralTickFormatter, -# Span, -# HoverTool, -# Range1d, -# DatetimeTickFormatter, -# Scatter, -# Label, LabelSet -# ) - - -class Actions(Enum): - Neutral = 0 - Long_buy = 1 - Long_sell = 2 - Short_buy = 3 - Short_sell = 4 - - -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 - - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long - -def mean_over_std(x): - std = np.std(x, ddof=1) - mean = np.mean(x) - return mean / std if std > 0 else 0 - -class DEnv(gym.Env): - - metadata = {'render.modes': ['human']} - - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): - assert df.ndim == 2 - - self.seed() - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] - - self.fee=0.0015 - - # # spaces - self.shape = (window_size, self.signal_features.shape[1]) - self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick = self.window_size - self._end_tick = len(self.prices) - 1 - self._done = None - self._current_tick = None - self._last_trade_tick = None - self._position = Positions.Neutral - self._position_history = None - self.total_reward = None - self._total_profit = None - self._first_rendering = None - self.history = None - self.trade_history = [] - - # self.A_t, self.B_t = 0.000639, 0.00001954 - self.r_t_change = 0. - - self.returns_report = [] - - - def seed(self, seed=None): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - - def reset(self): - - self._done = False - - if self.starting_point == True: - self._position_history = (self._start_tick* [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - #self._last_trade_tick = self._current_tick - 1 - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self._first_rendering = True - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - self.r_t_change = 0. - - self.returns_report = [] - - return self._get_observation() - - - def step(self, action): - self._done = False - self._current_tick += 1 - - if self._current_tick == self._end_tick: - self._done = True - - self.update_portfolio_log_returns(action) - - self._update_profit(action) - step_reward = self._calculate_reward(action) - self.total_reward += step_reward - - - - - - trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade - # Update position - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - - temp_position = self._position - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long_buy.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short_buy.value: - self._position = Positions.Short - trade_type = "short" - elif action == Actions.Long_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Short_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type != None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick = self._current_tick, - total_reward = self.total_reward, - total_profit = self._total_profit, - position = self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info - - - # def processState(self, state): - # return state.to_numpy() - - # def convert_mlp_Policy(self, obs_): - # pass - - def _get_observation(self): - return self.signal_features[(self._current_tick - self.window_size):self._current_tick] - - - def get_unrealized_profit(self): - - if self._last_trade_tick == None: - return 0. - - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price)/last_trade_price - elif self._position == Positions.Long: - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price)/last_trade_price - else: - return 0. - - - def is_tradesignal(self, action): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or - (action == Actions.Short_buy.value and self._position == Positions.Short) or - (action == Actions.Short_sell.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Short_sell.value and self._position == Positions.Long) or - - (action == Actions.Long_buy.value and self._position == Positions.Long) or - (action == Actions.Long_sell.value and self._position == Positions.Long) or - (action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Long_sell.value and self._position == Positions.Short)) - - - def _is_trade(self, action: Actions): - return ((action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) or - - (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or - (action == Actions.Neutral.Long_sell and self._position == Positions.Short) - ) - - - def is_hold(self, action): - return ((action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - - - def add_buy_fee(self, price): - return price * (1 + self.fee) - - def add_sell_fee(self, price): - return price / (1 + self.fee) - - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} - - for key, value in info.items(): - self.history[key].append(value) - - - # def render(self, mode='human'): - - # def _plot_position(position, tick): - # color = None - # if position == Positions.Short: - # color = 'red' - # elif position == Positions.Long: - # color = 'green' - # if color: - # plt.scatter(tick, self.prices.loc[tick].open, color=color) - - # if self._first_rendering: - # self._first_rendering = False - # plt.cla() - # plt.plot(self.prices) - # start_position = self._position_history[self._start_tick] - # _plot_position(start_position, self._start_tick) - - # plt.cla() - # plt.plot(self.prices) - # _plot_position(self._position, self._current_tick) - - # plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) - # plt.pause(0.01) - - - # def render_all(self): - # plt.figure() - # window_ticks = np.arange(len(self._position_history)) - # plt.plot(self.prices['open'], alpha=0.5) - - # short_ticks = [] - # long_ticks = [] - # neutral_ticks = [] - # for i, tick in enumerate(window_ticks): - # if self._position_history[i] == Positions.Short: - # short_ticks.append(tick - 1) - # elif self._position_history[i] == Positions.Long: - # long_ticks.append(tick - 1) - # elif self._position_history[i] == Positions.Neutral: - # neutral_ticks.append(tick - 1) - - # plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, - # 'o', color='grey', ms=3, alpha=0.1) - # plt.plot(short_ticks, self.prices.loc[short_ticks].open, - # 'o', color='r', ms=3, alpha=0.8) - # plt.plot(long_ticks, self.prices.loc[long_ticks].open, - # 'o', color='g', ms=3, alpha=0.8) - - # plt.suptitle("Generalising") - # fig = plt.gcf() - # fig.set_size_inches(15, 10) - - - - - # def close_trade_report(self): - # small_trade = 0 - # positive_big_trade = 0 - # negative_big_trade = 0 - # small_profit = 0.003 - # for i in self.close_trade_profit: - # if i < small_profit and i > -small_profit: - # small_trade+=1 - # elif i > small_profit: - # positive_big_trade += 1 - # elif i < -small_profit: - # negative_big_trade += 1 - # print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") - - - # def report(self): - - # # get total trade - # long_trade = 0 - # short_trade = 0 - # neutral_trade = 0 - # for trade in self.trade_history: - # if trade['type'] == 'long': - # long_trade += 1 - - # elif trade['type'] == 'short': - # short_trade += 1 - # else: - # neutral_trade += 1 - - # negative_trade = 0 - # positive_trade = 0 - # for tr in self.close_trade_profit: - # if tr < 0.: - # negative_trade += 1 - - # if tr > 0.: - # positive_trade += 1 - - # total_trade_lr = negative_trade+positive_trade - - - # total_trade = long_trade + short_trade - # sharp_ratio = self.sharpe_ratio() - # sharp_log = self.get_sharpe_ratio() - - # from tabulate import tabulate - - # headers = ["Performance", ""] - # performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], - # ["Total reward", "{0:.3f}".format(self.total_reward)], - # ["Start profit(unit)", "{0:.2f}".format(1.)], - # ["End profit(unit)", "{0:.3f}".format(self._total_profit)], - # ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], - # ["Sharp log", "{0:.3f}".format(sharp_log)], - # # ["Sortino ratio", "{0:.2f}".format(0) + '%'], - # ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] - # ] - # tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") - # print(tabulation) - - # result = { - # "Start": "{0:.2f}".format(1.), - # "End": "{0:.2f}".format(self._total_profit), - # "Sharp": "{0:.3f}".format(sharp_ratio), - # "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) - # } - # return result - - # def close(self): - # plt.close() - - def get_sharpe_ratio(self): - return mean_over_std(self.get_portfolio_log_returns()) - - - # def save_rendering(self, filepath): - # plt.savefig(filepath) - - - # def pause_rendering(self): - # plt.show() - - - def _calculate_reward(self, action): - # rw = self.transaction_profit_reward(action) - #rw = self.reward_rr_profit_config(action) - #rw = self.reward_rr_profit_config(action) # main - #rw = self.profit_only_when_close_reward(action) - rw = self.profit_only_when_close_reward_aim(action) - return rw - - - def _update_profit(self, action): - #if self._is_trade(action) or self._done: - if self._is_trade(action) or self._done: - pnl = self.get_unrealized_profit() - - if self._position == Positions.Long: - self._total_profit = self._total_profit + self._total_profit*pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - if self._position == Positions.Short: - self._total_profit = self._total_profit + self._total_profit*pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - - def most_recent_return(self, action): - """ - We support Long, Neutral and Short positions. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - #if action == Actions.Short.value or action == Actions.Neutral.value: - if action == Actions.Short_buy.value or action == Actions.Neutral.value: - current_price = self.add_sell_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_buy_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - #if action == Actions.Long.value or action == Actions.Neutral.value: - if action == Actions.Long_buy.value or action == Actions.Neutral.value: - current_price = self.add_buy_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_sell_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - - - def get_trading_log_return(self): - return self.portfolio_log_returns[self._start_tick:] - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - - def current_price(self) -> float: - return self.prices.iloc[self._current_tick].open - - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick-1].open - - - - def sharpe_ratio(self): - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward - - def get_bnh_log_return(self): - return np.diff(np.log(self.prices['open'][self._start_tick:])) - - - def transaction_profit_reward(self, action): - rw = 0. - - pt = self.prev_price() - pt_1 = self.current_price() - - - if self._position == Positions.Long: - a_t = 1 - elif self._position == Positions.Short: - a_t = -1 - else: - a_t = 0 - - # close long - if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - rw = a_t*(pt_1 - po)/po - #rw = rw*2 - # close short - elif (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: - pt_1 = self.add_buy_fee(self.current_price()) - po = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - rw = a_t*(pt_1 - po)/po - #rw = rw*2 - else: - rw = a_t*(pt_1 - pt)/pt - - return np.clip(rw, 0, 1) - - - def profit_only_when_close_reward(self, action): - - if self._last_trade_tick == None: - return 0. - - # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - return 0. - - def profit_only_when_close_reward_aim(self, action): - - if self._last_trade_tick == None: - return 0. - - # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_sell.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - - # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_buy.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) - - return 0. - - def reward_rr_profit_config(self, action): - rw = 0. - - pt_1 = self.current_price() - - - if len(self.close_trade_profit) > 0: - # long - if self._position == Positions.Long: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - if action == Actions.Short_buy.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 15 - elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < self.profit_aim * self.rr: - rw = -1 - elif self.close_trade_profit[-1] < 0: - rw = -10 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = -15 - - if action == Actions.Long_sell.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 20 - elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < self.profit_aim * self.rr: - rw = -1 - elif self.close_trade_profit[-1] < 0: - rw = -15 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = -25 - - if action == Actions.Neutral.value: - if self.close_trade_profit[-1] > 0.005: - rw = 0 - elif self.close_trade_profit[-1] < 0: - rw = 0 - - # short - if self._position == Positions.Short: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - if action == Actions.Long_buy.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 15 - elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = -1 - elif self.close_trade_profit[-1] < 0: - rw = -10 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw =- -25 - - if action == Actions.Short_sell.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 20 - elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = -1 - elif self.close_trade_profit[-1] < 0: - rw = -15 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = -25 - - if action == Actions.Neutral.value: - if self.close_trade_profit[-1] > 0.005: - rw = 0 - elif self.close_trade_profit[-1] < 0: - rw = 0 - - return np.clip(rw, 0, 1) diff --git a/freqtrade/freqai/prediction_models/RL/config.py b/freqtrade/freqai/prediction_models/RL/config.py deleted file mode 100644 index c45eb2387..000000000 --- a/freqtrade/freqai/prediction_models/RL/config.py +++ /dev/null @@ -1,37 +0,0 @@ -# dir -DATA_SAVE_DIR = "datasets" -TRAINED_MODEL_DIR = "trained_models" -TENSORBOARD_LOG_DIR = "tensorboard_log" -RESULTS_DIR = "results" - -# Model Parameters -A2C_PARAMS = {"n_steps": 5, "ent_coef": 0.01, "learning_rate": 0.0007} -PPO_PARAMS = { - "n_steps": 2048, - "ent_coef": 0.01, - "learning_rate": 0.00025, - "batch_size": 64, -} -DDPG_PARAMS = {"batch_size": 128, "buffer_size": 50000, "learning_rate": 0.001} -TD3_PARAMS = { - "batch_size": 100, - "buffer_size": 1000000, - "learning_rate": 0.001, -} -SAC_PARAMS = { - "batch_size": 64, - "buffer_size": 100000, - "learning_rate": 0.0001, - "learning_starts": 100, - "ent_coef": "auto_0.1", -} -ERL_PARAMS = { - "learning_rate": 3e-5, - "batch_size": 2048, - "gamma": 0.985, - "seed": 312, - "net_dimension": 512, - "target_step": 5000, - "eval_gap": 30, -} -RLlib_PARAMS = {"lr": 5e-5, "train_batch_size": 500, "gamma": 0.99} diff --git a/freqtrade/freqai/prediction_models/RLPredictionModel.py b/freqtrade/freqai/prediction_models/RLPredictionModel.py deleted file mode 100644 index b6903dd43..000000000 --- a/freqtrade/freqai/prediction_models/RLPredictionModel.py +++ /dev/null @@ -1,253 +0,0 @@ -import logging -from typing import Any, Dict, Tuple -#from matplotlib.colors import DivergingNorm - -from pandas import DataFrame -import pandas as pd -from freqtrade.exceptions import OperationalException -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -import tensorflow as tf -from freqtrade.freqai.prediction_models.BaseTensorFlowModel import BaseTensorFlowModel -from freqtrade.freqai.freqai_interface import IFreqaiModel -from tensorflow.keras.layers import Input, Conv1D, Dense, MaxPooling1D, Flatten, Dropout -from tensorflow.keras.models import Model -import numpy as np -import copy - -from keras.layers import * -import random - - -logger = logging.getLogger(__name__) - -# tf.config.run_functions_eagerly(True) -# tf.data.experimental.enable_debug_mode() - -import os -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -os.environ["CUDA_VISIBLE_DEVICES"] = "-1" - -MAX_EPOCHS = 10 -LOOKBACK = 8 - - -class RLPredictionModel_v2(IFreqaiModel): - """ - User created prediction model. The class needs to override three necessary - functions, predict(), fit(). - """ - - def fit(self, data_dictionary: Dict, pair) -> Any: - """ - User sets up the training and test data to fit their desired model here - :params: - :data_dictionary: the dictionary constructed by DataHandler to hold - all the training and test data/labels. - """ - - train_df = data_dictionary["train_features"] - train_labels = data_dictionary["train_labels"] - test_df = data_dictionary["test_features"] - test_labels = data_dictionary["test_labels"] - n_labels = len(train_labels.columns) - if n_labels > 1: - raise OperationalException( - "Neural Net not yet configured for multi-targets. Please " - " reduce number of targets to 1 in strategy." - ) - - n_features = len(data_dictionary["train_features"].columns) - BATCH_SIZE = self.freqai_info.get("batch_size", 64) - input_dims = [BATCH_SIZE, self.CONV_WIDTH, n_features] - - - w1 = WindowGenerator( - input_width=self.CONV_WIDTH, - label_width=1, - shift=1, - train_df=train_df, - val_df=test_df, - train_labels=train_labels, - val_labels=test_labels, - batch_size=BATCH_SIZE, - ) - - - # train_agent() - #pair = self.dd.historical_data[pair] - #gym_env = FreqtradeEnv(data=train_df, prices=0.01, windows_size=100, pair=pair, stake_amount=100) - - # sep = '/' - # coin = pair.split(sep, 1)[0] - - # # df1 = train_df.filter(regex='price') - # # df2 = df1.filter(regex='raw') - - # # df3 = df2.filter(regex=f"{coin}") - # # print(df3) - - # price = train_df[f"%-{coin}raw_price_5m"] - # gym_env = RLPrediction_GymAnytrading(signal_features=train_df, prices=price, window_size=100) - # sac = RLPrediction_Agent(gym_env) - - # print(sac) - - # return 0 - - - - return model - - def predict( - self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first=True - ) -> Tuple[DataFrame, DataFrame]: - """ - Filter the prediction features data and predict with it. - :param: unfiltered_dataframe: Full dataframe for the current backtest period. - :return: - :predictions: np.array of predictions - :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove - data (NaNs) or felt uncertain about data (PCA and DI index) - """ - - dk.find_features(unfiltered_dataframe) - filtered_dataframe, _ = dk.filter_features( - unfiltered_dataframe, dk.training_features_list, training_filter=False - ) - filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) - dk.data_dictionary["prediction_features"] = filtered_dataframe - - # optional additional data cleaning/analysis - self.data_cleaning_predict(dk, filtered_dataframe) - - if first: - full_df = dk.data_dictionary["prediction_features"] - - w1 = WindowGenerator( - input_width=self.CONV_WIDTH, - label_width=1, - shift=1, - test_df=full_df, - batch_size=len(full_df), - ) - - predictions = self.model.predict(w1.inference) - len_diff = len(dk.do_predict) - len(predictions) - if len_diff > 0: - dk.do_predict = dk.do_predict[len_diff:] - - else: - data = dk.data_dictionary["prediction_features"] - data = tf.expand_dims(data, axis=0) - predictions = self.model(data, training=False) - - predictions = predictions[:, 0] - pred_df = DataFrame(predictions, columns=dk.label_list) - - pred_df = dk.denormalize_labels_from_metadata(pred_df) - - return (pred_df, np.ones(len(pred_df))) - - - def set_initial_historic_predictions( - self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str - ) -> None: - - pass - # w1 = WindowGenerator( - # input_width=self.CONV_WIDTH, label_width=1, shift=1, test_df=df, batch_size=len(df) - # ) - - # trained_predictions = model.predict(w1.inference) - # #trained_predictions = trained_predictions[:, 0, 0] - # trained_predictions = trained_predictions[:, 0] - - # n_lost_points = len(df) - len(trained_predictions) - # pred_df = DataFrame(trained_predictions, columns=dk.label_list) - # zeros_df = DataFrame(np.zeros((n_lost_points, len(dk.label_list))), columns=dk.label_list) - # pred_df = pd.concat([zeros_df, pred_df], axis=0) - - # pred_df = dk.denormalize_labels_from_metadata(pred_df) - - - - # self.dd.historic_predictions[pair] = DataFrame() - # self.dd.historic_predictions[pair] = copy.deepcopy(pred_df) - - -class WindowGenerator: - def __init__( - self, - input_width, - label_width, - shift, - train_df=None, - val_df=None, - test_df=None, - train_labels=None, - val_labels=None, - test_labels=None, - batch_size=None, - ): - # Store the raw data. - self.train_df = train_df - self.val_df = val_df - self.test_df = test_df - self.train_labels = train_labels - self.val_labels = val_labels - self.test_labels = test_labels - self.batch_size = batch_size - self.input_width = input_width - self.label_width = label_width - self.shift = shift - - self.total_window_size = input_width + shift - - self.input_slice = slice(0, input_width) - self.input_indices = np.arange(self.total_window_size)[self.input_slice] - - def make_dataset(self, data, labels=None): - data = np.array(data, dtype=np.float32) - if labels is not None: - labels = np.array(labels, dtype=np.float32) - ds = tf.keras.preprocessing.timeseries_dataset_from_array( - data=data, - targets=labels, - sequence_length=self.total_window_size, - sequence_stride=1, - sampling_rate=1, - shuffle=False, - batch_size=self.batch_size, - ) - - return ds - - @property - def train(self): - - - - return self.make_dataset(self.train_df, self.train_labels) - - @property - def val(self): - return self.make_dataset(self.val_df, self.val_labels) - - @property - def test(self): - return self.make_dataset(self.test_df, self.test_labels) - - @property - def inference(self): - return self.make_dataset(self.test_df) - - @property - def example(self): - """Get and cache an example batch of `inputs, labels` for plotting.""" - result = getattr(self, "_example", None) - if result is None: - # No example batch was found, so get one from the `.train` dataset - result = next(iter(self.train)) - # And cache it for next time - self._example = result - return result \ No newline at end of file diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py deleted file mode 100644 index 6ced4749e..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearning.py +++ /dev/null @@ -1,273 +0,0 @@ -import logging -from typing import Any, Dict, Tuple - -import numpy as np -import numpy.typing as npt -import pandas as pd -import torch as th -from pandas import DataFrame -from stable_baselines3 import PPO -from stable_baselines3.common.buffers import ReplayBuffer -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.monitor import Monitor -from stable_baselines3.common.vec_env import SubprocVecEnv - -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.freqai_interface import IFreqaiModel -from freqtrade.freqai.prediction_models.RL.RLPrediction_agent_TDQN import TDQN -from freqtrade.freqai.prediction_models.RL.RLPrediction_env_TDQN_5ac import DEnv -#from freqtrade.freqai.prediction_models.RL.RLPrediction_env_TDQN_3ac import DEnv -from freqtrade.persistence import Trade - -logger = logging.getLogger(__name__) - -class ReinforcementLearning(IFreqaiModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def train( - self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen - ) -> Any: - """ - Filter the training data and train a model to it. Train makes heavy use of the datakitchen - for storing, saving, loading, and analyzing the data. - :param unfiltered_dataframe: Full dataframe for the current training period - :param metadata: pair metadata from strategy. - :returns: - :model: Trained model which can be used to inference (self.predict) - """ - - logger.info("--------------------Starting training " f"{pair} --------------------") - - # filter the features requested by user in the configuration file and elegantly handle NaNs - features_filtered, labels_filtered = dk.filter_features( - unfiltered_dataframe, - dk.training_features_list, - dk.label_list, - training_filter=True, - ) - - data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( - features_filtered, labels_filtered) - dk.fit_labels() # useless for now, but just satiating append methods - - # normalize all data based on train_dataset only - data_dictionary = dk.normalize_data(data_dictionary) - - # optional additional data cleaning/analysis - self.data_cleaning_train(dk) - - logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" - ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - - model = self.fit(data_dictionary, pair) - - if pair not in self.dd.historic_predictions: - self.set_initial_historic_predictions( - data_dictionary['train_features'], model, dk, pair) - - self.dd.save_historic_predictions_to_disk() - - logger.info(f"--------------------done training {pair}--------------------") - - return model - - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): - - # train_df = data_dictionary["train_features"] - # # train_labels = data_dictionary["train_labels"] - # test_df = data_dictionary["test_features"] - # # test_labels = data_dictionary["test_labels"] - # # sep = '/' - # # coin = pair.split(sep, 1)[0] - # # price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] - # # price.reset_index(inplace=True, drop=True) - # # price = price.to_frame() - # price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - # price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) - # #train_env = GymAnytrading(train_df, price, self.CONV_WIDTH) - # agent_params = self.freqai_info['model_training_parameters'] - # reward_params = self.freqai_info['model_reward_parameters'] - # train_env = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - # #eval_env = DEnv(df=test_df, prices=price_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - # #env_instance = SubprocVecEnv([DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params)]) - # #train_env.reset() - # #eval_env.reset() - # # model - # #policy_kwargs = dict(net_arch=[512, 512, 512]) - # policy_kwargs = dict(activation_fn=th.nn.Tanh, - # net_arch=[256, 256, 256]) - # agent = RLPrediction_agent(train_env) - # #eval_agent = RLPrediction_agent(eval_env) - - # # PPO - # model_name = 'ppo' - # model = agent.get_model(model_name, model_kwargs=agent_params, policy_kwargs=policy_kwargs) - # trained_model = agent.train_model(model=model, - # tb_log_name=model_name, - # model_kwargs=agent_params, - # train_df=train_df, - # test_df=test_df, - # price=price, - # price_test=price_test, - # window_size=self.CONV_WIDTH) - # # best_model = eval_agent.train_model(model=model, - # # tb_log_name=model_name, - # # model_kwargs=agent_params, - # # eval=eval_env) - # # TDQN - # # model_name = 'TDQN' - # # model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log='./tensorboard_log/', - # # learning_rate=agent_params["learning_rate"], gamma=0.9, - # # target_update_interval=5000, buffer_size=50000, - # # exploration_initial_eps=1, exploration_final_eps=0.1, - # # replay_buffer_class=ReplayBuffer - # # ) - # # trained_model = agent.train_model(model=model, - # # tb_log_name=model_name, - # # model_kwargs=agent_params) - # #model.learn( - # # total_timesteps=5000, - # # callback=callback - # # ) - - agent_params = self.freqai_info['model_training_parameters'] - reward_params = self.freqai_info['model_reward_parameters'] - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - eval_freq = agent_params["eval_cycles"] * len(test_df) - total_timesteps = agent_params["train_cycles"] * len(train_df) - - # price data for model training and evaluation - price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) - - # environments - train_env = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - eval = DEnv(df=test_df, prices=price_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - eval_env = Monitor(eval, ".") - eval_env.reset() - - # this should be in config - TODO - agent_type = 'tdqn' - - path = self.dk.data_path - eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/{agent_type}/logs/", eval_freq=int(eval_freq), - deterministic=True, render=False) - - # model arch - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) - - if agent_type == 'tdqn': - model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/{agent_type}/tensorboard/", - learning_rate=0.00025, gamma=0.9, - target_update_interval=5000, buffer_size=50000, - exploration_initial_eps=1, exploration_final_eps=0.1, - replay_buffer_class=ReplayBuffer - ) - elif agent_type == 'ppo': - model = PPO('MultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/{agent_type}/tensorboard/", - learning_rate=0.00025, gamma=0.9 - ) - - model.learn( - total_timesteps=int(total_timesteps), - callback=eval_callback - ) - - print('Training finished!') - - return model - - - - def get_state_info(self, pair): - open_trades = Trade.get_trades(trade_filter=Trade.is_open.is_(True)) - market_side = 0.5 - current_profit = 0 - for trade in open_trades: - if trade.pair == pair: - current_value = trade.open_trade_value - openrate = trade.open_rate - if 'long' in trade.enter_tag: - market_side = 1 - else: - market_side = 0 - current_profit = current_value / openrate -1 - - total_profit = 0 - closed_trades = Trade.get_trades(trade_filter=[Trade.is_open.is_(False), Trade.pair == pair]) - for trade in closed_trades: - total_profit += trade.close_profit - - return market_side, current_profit, total_profit - - - def predict( - self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False - ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: - """ - Filter the prediction features data and predict with it. - :param: unfiltered_dataframe: Full dataframe for the current backtest period. - :return: - :pred_df: dataframe containing the predictions - :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove - data (NaNs) or felt uncertain about data (PCA and DI index) - """ - - dk.find_features(unfiltered_dataframe) - filtered_dataframe, _ = dk.filter_features( - unfiltered_dataframe, dk.training_features_list, training_filter=False - ) - filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) - dk.data_dictionary["prediction_features"] = filtered_dataframe - - # optional additional data cleaning/analysis - self.data_cleaning_predict(dk, filtered_dataframe) - - pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) - pred_df.fillna(0, inplace=True) - - return (pred_df, dk.do_predict) - - def rl_model_predict(self, dataframe: DataFrame, - dk: FreqaiDataKitchen, model: Any) -> DataFrame: - - output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) - - def _predict(window): - observations = dataframe.iloc[window.index] - res, _ = model.predict(observations, deterministic=True) - return res - - output = output.rolling(window=self.CONV_WIDTH).apply(_predict) - - return output - - def set_initial_historic_predictions( - self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str - ) -> None: - - pred_df = self.rl_model_predict(df, dk, model) - pred_df.fillna(0, inplace=True) - self.dd.historic_predictions[pair] = pred_df - hist_preds_df = self.dd.historic_predictions[pair] - - for label in hist_preds_df.columns: - if hist_preds_df[label].dtype == object: - continue - hist_preds_df[f'{label}_mean'] = 0 - hist_preds_df[f'{label}_std'] = 0 - - hist_preds_df['do_predict'] = 0 - - if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: - hist_preds_df['DI_values'] = 0 - - for return_str in dk.data['extra_returns_per_train']: - hist_preds_df[return_str] = 0 diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py new file mode 100644 index 000000000..2fa87c432 --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -0,0 +1,155 @@ +import logging +from typing import Any, Dict # , Tuple + +import numpy as np +# import numpy.typing as npt +# import pandas as pd +import torch as th +# from pandas import DataFrame +from stable_baselines3 import PPO +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.monitor import Monitor +# from stable_baselines3.common.vec_env import SubprocVecEnv +from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv, Actions, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel + + +logger = logging.getLogger(__name__) + + +class ReinforcementLearningPPO(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + + agent_params = self.freqai_info['model_training_parameters'] + reward_params = self.freqai_info['model_reward_parameters'] + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + eval_freq = agent_params["eval_cycles"] * len(test_df) + total_timesteps = agent_params["train_cycles"] * len(train_df) + + # price data for model training and evaluation + price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( + len(test_df.index)) + + # environments + train_env = MyRLEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, + reward_kwargs=reward_params) + eval = MyRLEnv(df=test_df, prices=price_test, + window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + eval_env = Monitor(eval, ".") + eval_env.reset() + + path = self.dk.data_path + eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), + deterministic=True, render=False) + + # model arch + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[256, 256, 128]) + + model = PPO('MultiInputPolicy', train_env, policy_kwargs=policy_kwargs, + tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, gamma=0.9 + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=eval_callback + ) + + print('Training finished!') + + return model + + +class MyRLEnv(BaseRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env + """ + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self._calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick=self._current_tick, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if (action == Actions.Short.value or + action == Actions.Neutral.value) and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + # close short + if (action == Actions.Long.value or + action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py new file mode 100644 index 000000000..a022a10ba --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -0,0 +1,168 @@ +import logging +from typing import Any, Dict, Optional + +import numpy as np +import torch as th +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.monitor import Monitor +# from stable_baselines3.common.vec_env import SubprocVecEnv +from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv, Actions, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +from freqtrade.freqai.RL.TDQNagent import TDQN +from stable_baselines3.common.buffers import ReplayBuffer + + +logger = logging.getLogger(__name__) + + +class ReinforcementLearningPPO(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + + agent_params = self.freqai_info['model_training_parameters'] + reward_params = self.freqai_info['model_reward_parameters'] + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + eval_freq = agent_params["eval_cycles"] * len(test_df) + total_timesteps = agent_params["train_cycles"] * len(train_df) + + # price data for model training and evaluation + price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( + len(test_df.index)) + + # environments + train_env = MyRLEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, + reward_kwargs=reward_params) + eval = MyRLEnv(df=test_df, prices=price_test, + window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + eval_env = Monitor(eval, ".") + eval_env.reset() + + path = self.dk.data_path + eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), + deterministic=True, render=False) + + # model arch + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[256, 256, 128]) + + model = TDQN('TMultiInputPolicy', train_env, + policy_kwargs=policy_kwargs, + tensorboard_log=f"{path}/tdqn/tensorboard/", + learning_rate=0.00025, gamma=0.9, + target_update_interval=5000, buffer_size=50000, + exploration_initial_eps=1, exploration_final_eps=0.1, + replay_buffer_class=Optional(ReplayBuffer) + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=eval_callback + ) + + print('Training finished!') + + return model + + +class MyRLEnv(BaseRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env + """ + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self._calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick=self._current_tick, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if action == Actions.Long_sell.value and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + if action == Actions.Long_sell.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + + # close short + if action == Actions.Short_buy.value and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + if action == Actions.Short_buy.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + + return 0. diff --git a/freqtrade/freqai/rl/BaseRLAgent.py b/freqtrade/freqai/rl/BaseRLAgent.py deleted file mode 100644 index e69de29bb..000000000 From d4db5c32812e65d93fde173cd61e57d1a8a035f1 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 10:29:33 +0200 Subject: [PATCH 012/421] ensure TDQN class is properly named --- freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index a022a10ba..f042762e4 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -15,7 +15,7 @@ from stable_baselines3.common.buffers import ReplayBuffer logger = logging.getLogger(__name__) -class ReinforcementLearningPPO(BaseReinforcementLearningModel): +class ReinforcementLearningTDQN(BaseReinforcementLearningModel): """ User created Reinforcement Learning Model prediction model. """ From 6048f60f13195a9db5488c06436d49921156d87d Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 11:11:16 +0200 Subject: [PATCH 013/421] get TDQN working with 5 action environment --- .../ReinforcementLearningTDQN.py | 201 +++++++++++++++--- 1 file changed, 168 insertions(+), 33 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index f042762e4..5ec917719 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -1,16 +1,17 @@ import logging -from typing import Any, Dict, Optional - +from typing import Any, Dict # Optional +from enum import Enum import numpy as np import torch as th from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor # from stable_baselines3.common.vec_env import SubprocVecEnv -from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv, Actions, Positions +from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3.common.buffers import ReplayBuffer - +from gym import spaces +from gym.utils import seeding logger = logging.getLogger(__name__) @@ -57,7 +58,7 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): learning_rate=0.00025, gamma=0.9, target_update_interval=5000, buffer_size=50000, exploration_initial_eps=1, exploration_final_eps=0.1, - replay_buffer_class=Optional(ReplayBuffer) + replay_buffer_class=ReplayBuffer ) model.learn( @@ -70,11 +71,102 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): return model +class Actions(Enum): + Neutral = 0 + Long_buy = 1 + Long_sell = 2 + Short_buy = 3 + Short_sell = 4 + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + + class MyRLEnv(BaseRLEnv): """ - User can override any function in BaseRLEnv and gym.Env + User can override any function in BaseRLEnv and gym.Env. Here the user + Adds 5 actions. """ + metadata = {'render.modes': ['human']} + + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + assert df.ndim == 2 + + self.seed() + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee = 0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1]) + self.action_space = spaces.Discrete(len(Actions)) + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick = self.window_size + self._end_tick = len(self.prices) - 1 + self._done = None + self._current_tick = None + self._last_trade_tick = None + self._position = Positions.Neutral + self._position_history = None + self.total_reward = None + self._total_profit = None + self._first_rendering = None + self.history = None + self.trade_history = [] + + # self.A_t, self.B_t = 0.000639, 0.00001954 + self.r_t_change = 0. + + self.returns_report = [] + + def seed(self, seed=None): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + + self._done = False + + if self.starting_point is True: + self._position_history = (self._start_tick * [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self._first_rendering = True + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self.r_t_change = 0. + + self.returns_report = [] + + return self._get_observation() + def step(self, action): self._done = False self._current_tick += 1 @@ -85,11 +177,12 @@ class MyRLEnv(BaseRLEnv): self.update_portfolio_log_returns(action) self._update_profit(action) - step_reward = self._calculate_reward(action) + step_reward = self.calculate_reward(action) self.total_reward += step_reward trade_type = None - if self.is_tradesignal(action): + if self.is_tradesignal(action): # exclude 3 case not trade + # Update position """ Action: Neutral, position: Long -> Close Long Action: Neutral, position: Short -> Close Short @@ -104,12 +197,18 @@ class MyRLEnv(BaseRLEnv): if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions.Long.value: + elif action == Actions.Long_buy.value: self._position = Positions.Long trade_type = "long" - elif action == Actions.Short.value: + elif action == Actions.Short_buy.value: self._position = Positions.Short trade_type = "short" + elif action == Actions.Long_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Short_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" else: print("case not defined") @@ -136,33 +235,69 @@ class MyRLEnv(BaseRLEnv): return observation, step_reward, self._done, info - def calculate_reward(self, action): + def _get_observation(self): + return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + def get_unrealized_profit(self): if self._last_trade_tick is None: return 0. - # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_sell.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - - # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price) / last_trade_price + else: + return 0. - if action == Actions.Short_buy.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) + def is_tradesignal(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or + (action == Actions.Short_buy.value and self._position == Positions.Short) or + (action == Actions.Short_sell.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Short_sell.value and self._position == Positions.Long) or - return 0. + (action == Actions.Long_buy.value and self._position == Positions.Long) or + (action == Actions.Long_sell.value and self._position == Positions.Long) or + (action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Long_sell.value and self._position == Positions.Short)) + + def _is_trade(self, action): + return ((action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + + (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or + (action == Actions.Neutral.Long_sell and self._position == Positions.Short) + ) + + def is_hold(self, action): + return ((action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def add_buy_fee(self, price): + return price * (1 + self.fee) + + def add_sell_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) From 9c78e6c26f39e903c1bc899d1b3bbc8cc9f4e09a Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 11:24:57 +0200 Subject: [PATCH 014/421] base PPO model only customizes reward for 3AC --- .../ReinforcementLearningPPO.py | 63 +------------------ 1 file changed, 1 insertion(+), 62 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index 2fa87c432..4d995c4e3 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -28,7 +28,7 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): reward_params = self.freqai_info['model_reward_parameters'] train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = agent_params["eval_cycles"] * len(test_df) + eval_freq = agent_params.get("eval_cycles", 4) * len(test_df) total_timesteps = agent_params["train_cycles"] * len(train_df) # price data for model training and evaluation @@ -72,67 +72,6 @@ class MyRLEnv(BaseRLEnv): User can override any function in BaseRLEnv and gym.Env """ - def step(self, action): - self._done = False - self._current_tick += 1 - - if self._current_tick == self._end_tick: - self._done = True - - self.update_portfolio_log_returns(action) - - self._update_profit(action) - step_reward = self._calculate_reward(action) - self.total_reward += step_reward - - trade_type = None - if self.is_tradesignal(action): - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short.value: - self._position = Positions.Short - trade_type = "short" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type is not None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, - 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick=self._current_tick, - total_reward=self.total_reward, - total_profit=self._total_profit, - position=self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info - def calculate_reward(self, action): if self._last_trade_tick is None: From 718c9d044010470fc8be207608c4ad2ebf29fd1c Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 12:29:44 +0300 Subject: [PATCH 015/421] action fix --- .../ReinforcementLearningTDQN.py | 69 +++++++------------ 1 file changed, 26 insertions(+), 43 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index 5ec917719..8f5fe4e03 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -72,11 +72,9 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): class Actions(Enum): - Neutral = 0 - Long_buy = 1 - Long_sell = 2 - Short_buy = 3 - Short_sell = 4 + Short = 0 + Long = 1 + Neutral = 2 class Positions(Enum): @@ -181,36 +179,31 @@ class MyRLEnv(BaseRLEnv): self.total_reward += step_reward trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade + if self.is_tradesignal(action): # exclude 3 case not trade # Update position """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short + + Action: Short, position: Neutral -> Open Short Action: Short, position: Long -> Close Long and Open Short """ - + + temp_position = self._position if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions.Long_buy.value: + elif action == Actions.Long.value: self._position = Positions.Long trade_type = "long" - elif action == Actions.Short_buy.value: + elif action == Actions.Short.value: self._position = Positions.Short trade_type = "short" - elif action == Actions.Long_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Short_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" else: - print("case not defined") + print("case not define") # Update last trade tick self._last_trade_tick = self._current_tick @@ -257,33 +250,23 @@ class MyRLEnv(BaseRLEnv): return 0. def is_tradesignal(self, action): - # trade signal + # trade signal """ not trade signal is : - Action: Neutral, position: Neutral -> Nothing + Action: Neutral, position: Neutral -> Nothing Action: Long, position: Long -> Hold Long Action: Short, position: Short -> Hold Short """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or - (action == Actions.Short_buy.value and self._position == Positions.Short) or - (action == Actions.Short_sell.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Short_sell.value and self._position == Positions.Long) or + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) + or (action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) - (action == Actions.Long_buy.value and self._position == Positions.Long) or - (action == Actions.Long_sell.value and self._position == Positions.Long) or - (action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Long_sell.value and self._position == Positions.Short)) - - def _is_trade(self, action): - return ((action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) or - - (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or - (action == Actions.Neutral.Long_sell and self._position == Positions.Short) - ) + def _is_trade(self, action: Actions): + return ((action == Actions.Long.value and self._position == Positions.Short) or + (action == Actions.Short.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) + ) def is_hold(self, action): return ((action == Actions.Short.value and self._position == Positions.Short) From 096533bcb9e3f2c3685986c00ffbecd2eddb0f18 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 12:45:08 +0300 Subject: [PATCH 016/421] 3ac to 5ac --- .../ReinforcementLearningTDQN.py | 69 ++++++++++++------- 1 file changed, 43 insertions(+), 26 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index 8f5fe4e03..5ec917719 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -72,9 +72,11 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): class Actions(Enum): - Short = 0 - Long = 1 - Neutral = 2 + Neutral = 0 + Long_buy = 1 + Long_sell = 2 + Short_buy = 3 + Short_sell = 4 class Positions(Enum): @@ -179,31 +181,36 @@ class MyRLEnv(BaseRLEnv): self.total_reward += step_reward trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade + if self.is_tradesignal(action): # exclude 3 case not trade # Update position """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short + + Action: Short, position: Neutral -> Open Short Action: Short, position: Long -> Close Long and Open Short """ - - temp_position = self._position + if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions.Long.value: + elif action == Actions.Long_buy.value: self._position = Positions.Long trade_type = "long" - elif action == Actions.Short.value: + elif action == Actions.Short_buy.value: self._position = Positions.Short trade_type = "short" + elif action == Actions.Long_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Short_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" else: - print("case not define") + print("case not defined") # Update last trade tick self._last_trade_tick = self._current_tick @@ -250,23 +257,33 @@ class MyRLEnv(BaseRLEnv): return 0. def is_tradesignal(self, action): - # trade signal + # trade signal """ not trade signal is : - Action: Neutral, position: Neutral -> Nothing + Action: Neutral, position: Neutral -> Nothing Action: Long, position: Long -> Hold Long Action: Short, position: Short -> Hold Short """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) - or (action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or + (action == Actions.Short_buy.value and self._position == Positions.Short) or + (action == Actions.Short_sell.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Short_sell.value and self._position == Positions.Long) or - def _is_trade(self, action: Actions): - return ((action == Actions.Long.value and self._position == Positions.Short) or - (action == Actions.Short.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) - ) + (action == Actions.Long_buy.value and self._position == Positions.Long) or + (action == Actions.Long_sell.value and self._position == Positions.Long) or + (action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Long_sell.value and self._position == Positions.Short)) + + def _is_trade(self, action): + return ((action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + + (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or + (action == Actions.Neutral.Long_sell and self._position == Positions.Short) + ) def is_hold(self, action): return ((action == Actions.Short.value and self._position == Positions.Short) From 926023935f52a69d7e8830843729978121b68bc3 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 12:13:37 +0200 Subject: [PATCH 017/421] make base 3ac and base 5ac environments. TDQN defaults to 3AC. --- .../RL/{BaseRLEnv.py => Base3ActionRLEnv.py} | 2 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 364 ++++++++++++++++++ .../RL/BaseReinforcementLearningModel.py | 4 +- .../ReinforcementLearningPPO.py | 4 +- .../ReinforcementLearningTDQN.py | 266 +++---------- 5 files changed, 417 insertions(+), 223 deletions(-) rename freqtrade/freqai/RL/{BaseRLEnv.py => Base3ActionRLEnv.py} (99%) create mode 100644 freqtrade/freqai/RL/Base5ActionRLEnv.py diff --git a/freqtrade/freqai/RL/BaseRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py similarity index 99% rename from freqtrade/freqai/RL/BaseRLEnv.py rename to freqtrade/freqai/RL/Base3ActionRLEnv.py index 607262acd..443ce7025 100644 --- a/freqtrade/freqai/RL/BaseRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -31,7 +31,7 @@ def mean_over_std(x): return mean / std if std > 0 else 0 -class BaseRLEnv(gym.Env): +class Base3ActionRLEnv(gym.Env): metadata = {'render.modes': ['human']} diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py new file mode 100644 index 000000000..01fb77481 --- /dev/null +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -0,0 +1,364 @@ +import logging +from enum import Enum +# from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union + +import gym +import numpy as np +from gym import spaces +from gym.utils import seeding + +logger = logging.getLogger(__name__) + + +class Actions(Enum): + Neutral = 0 + Long_buy = 1 + Long_sell = 2 + Short_buy = 3 + Short_sell = 4 + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + + +def mean_over_std(x): + std = np.std(x, ddof=1) + mean = np.mean(x) + return mean / std if std > 0 else 0 + + +class Base5ActionRLEnv(gym.Env): + """ + Base class for a 5 action environment + """ + metadata = {'render.modes': ['human']} + + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + assert df.ndim == 2 + + self.seed() + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee = 0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1]) + self.action_space = spaces.Discrete(len(Actions)) + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick = self.window_size + self._end_tick = len(self.prices) - 1 + self._done = None + self._current_tick = None + self._last_trade_tick = None + self._position = Positions.Neutral + self._position_history = None + self.total_reward = None + self._total_profit = None + self._first_rendering = None + self.history = None + self.trade_history = [] + + # self.A_t, self.B_t = 0.000639, 0.00001954 + self.r_t_change = 0. + + self.returns_report = [] + + def seed(self, seed=None): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + + self._done = False + + if self.starting_point is True: + self._position_history = (self._start_tick * [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self._first_rendering = True + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self.r_t_change = 0. + + self.returns_report = [] + + return self._get_observation() + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self.calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): # exclude 3 case not trade + # Update position + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long_buy.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short_buy.value: + self._position = Positions.Short + trade_type = "short" + elif action == Actions.Long_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Short_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick=self._current_tick, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + # def processState(self, state): + # return state.to_numpy() + + # def convert_mlp_Policy(self, obs_): + # pass + + def _get_observation(self): + return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + def get_unrealized_profit(self): + + if self._last_trade_tick is None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price) / last_trade_price + else: + return 0. + + def is_tradesignal(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or + (action == Actions.Short_buy.value and self._position == Positions.Short) or + (action == Actions.Short_sell.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Short_sell.value and self._position == Positions.Long) or + + (action == Actions.Long_buy.value and self._position == Positions.Long) or + (action == Actions.Long_sell.value and self._position == Positions.Long) or + (action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Long_sell.value and self._position == Positions.Short)) + + def _is_trade(self, action: Actions): + return ((action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + + (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or + (action == Actions.Neutral.Long_sell and self._position == Positions.Short) + ) + + def is_hold(self, action): + return ((action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def add_buy_fee(self, price): + return price * (1 + self.fee) + + def add_sell_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + def get_sharpe_ratio(self): + return mean_over_std(self.get_portfolio_log_returns()) + + def _update_profit(self, action): + # if self._is_trade(action) or self._done: + if self._is_trade(action) or self._done: + pnl = self.get_unrealized_profit() + + if self._position == Positions.Long: + self._total_profit = self._total_profit + self._total_profit * pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + if self._position == Positions.Short: + self._total_profit = self._total_profit + self._total_profit * pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + def most_recent_return(self, action): + """ + We support Long, Neutral and Short positions. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + # if action == Actions.Short.value or action == Actions.Neutral.value: + if action == Actions.Short_buy.value or action == Actions.Neutral.value: + current_price = self.add_sell_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_buy_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + # if action == Actions.Long.value or action == Actions.Neutral.value: + if action == Actions.Long_buy.value or action == Actions.Neutral.value: + current_price = self.add_buy_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_sell_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + def get_trading_log_return(self): + return self.portfolio_log_returns[self._start_tick:] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick - 1].open + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward + + def get_bnh_log_return(self): + return np.diff(np.log(self.prices['open'][self._start_tick:])) + + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if action == Actions.Long_sell.value and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + if action == Actions.Long_sell.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + + # close short + if action == Actions.Short_buy.value and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + if action == Actions.Short_buy.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + + return 0. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index accddc94d..a28b88c42 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -8,7 +8,7 @@ from pandas import DataFrame from abc import abstractmethod from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel -from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv, Actions, Positions +from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.persistence import Trade logger = logging.getLogger(__name__) @@ -165,7 +165,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): hist_preds_df[return_str] = 0 -class MyRLEnv(BaseRLEnv): +class MyRLEnv(Base3ActionRLEnv): def step(self, action): self._done = False diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index 4d995c4e3..cc56852df 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -10,7 +10,7 @@ from stable_baselines3 import PPO from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor # from stable_baselines3.common.vec_env import SubprocVecEnv -from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv, Actions, Positions +from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel @@ -67,7 +67,7 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): return model -class MyRLEnv(BaseRLEnv): +class MyRLEnv(Base3ActionRLEnv): """ User can override any function in BaseRLEnv and gym.Env """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index 5ec917719..2a8570d3e 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -1,17 +1,14 @@ import logging from typing import Any, Dict # Optional -from enum import Enum -import numpy as np import torch as th from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor # from stable_baselines3.common.vec_env import SubprocVecEnv -from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv +from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3.common.buffers import ReplayBuffer -from gym import spaces -from gym.utils import seeding +import numpy as np logger = logging.getLogger(__name__) @@ -71,233 +68,66 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): return model -class Actions(Enum): - Neutral = 0 - Long_buy = 1 - Long_sell = 2 - Short_buy = 3 - Short_sell = 4 - - -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 - - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long - - -class MyRLEnv(BaseRLEnv): +class MyRLEnv(Base3ActionRLEnv): """ - User can override any function in BaseRLEnv and gym.Env. Here the user - Adds 5 actions. + User can override any function in BaseRLEnv and gym.Env """ - metadata = {'render.modes': ['human']} - - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): - assert df.ndim == 2 - - self.seed() - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] - - self.fee = 0.0015 - - # # spaces - self.shape = (window_size, self.signal_features.shape[1]) - self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick = self.window_size - self._end_tick = len(self.prices) - 1 - self._done = None - self._current_tick = None - self._last_trade_tick = None - self._position = Positions.Neutral - self._position_history = None - self.total_reward = None - self._total_profit = None - self._first_rendering = None - self.history = None - self.trade_history = [] - - # self.A_t, self.B_t = 0.000639, 0.00001954 - self.r_t_change = 0. - - self.returns_report = [] - - def seed(self, seed=None): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - def reset(self): - - self._done = False - - if self.starting_point is True: - self._position_history = (self._start_tick * [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self._first_rendering = True - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - self.r_t_change = 0. - - self.returns_report = [] - - return self._get_observation() - - def step(self, action): - self._done = False - self._current_tick += 1 - - if self._current_tick == self._end_tick: - self._done = True - - self.update_portfolio_log_returns(action) - - self._update_profit(action) - step_reward = self.calculate_reward(action) - self.total_reward += step_reward - - trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade - # Update position - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long_buy.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short_buy.value: - self._position = Positions.Short - trade_type = "short" - elif action == Actions.Long_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Short_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type is not None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, - 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick=self._current_tick, - total_reward=self.total_reward, - total_profit=self._total_profit, - position=self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info - - def _get_observation(self): - return self.signal_features[(self._current_tick - self.window_size):self._current_tick] - - def get_unrealized_profit(self): + def calculate_reward(self, action): if self._last_trade_tick is None: return 0. - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price) / last_trade_price - elif self._position == Positions.Long: - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + # close long + if (action == Actions.Short.value or + action == Actions.Neutral.value) and self._position == Positions.Long: last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price) / last_trade_price - else: - return 0. + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) - def is_tradesignal(self, action): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or - (action == Actions.Short_buy.value and self._position == Positions.Short) or - (action == Actions.Short_sell.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Short_sell.value and self._position == Positions.Long) or + # close short + if (action == Actions.Long.value or + action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) - (action == Actions.Long_buy.value and self._position == Positions.Long) or - (action == Actions.Long_sell.value and self._position == Positions.Long) or - (action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Long_sell.value and self._position == Positions.Short)) + return 0. - def _is_trade(self, action): - return ((action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) or +# User can inherit and customize 5 action environment +# class MyRLEnv(Base5ActionRLEnv): +# """ +# User can override any function in BaseRLEnv and gym.Env. Here the user +# Adds 5 actions. +# """ - (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or - (action == Actions.Neutral.Long_sell and self._position == Positions.Short) - ) +# def calculate_reward(self, action): - def is_hold(self, action): - return ((action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) +# if self._last_trade_tick is None: +# return 0. - def add_buy_fee(self, price): - return price * (1 + self.fee) +# # close long +# if action == Actions.Long_sell.value and self._position == Positions.Long: +# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) +# return float(np.log(current_price) - np.log(last_trade_price)) - def add_sell_fee(self, price): - return price / (1 + self.fee) +# if action == Actions.Long_sell.value and self._position == Positions.Long: +# if self.close_trade_profit[-1] > self.profit_aim * self.rr: +# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) +# return float((np.log(current_price) - np.log(last_trade_price)) * 2) - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} +# # close short +# if action == Actions.Short_buy.value and self._position == Positions.Short: +# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) +# return float(np.log(last_trade_price) - np.log(current_price)) - for key, value in info.items(): - self.history[key].append(value) +# if action == Actions.Short_buy.value and self._position == Positions.Short: +# if self.close_trade_profit[-1] > self.profit_aim * self.rr: +# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) +# return float((np.log(last_trade_price) - np.log(current_price)) * 2) + +# return 0. From 13cd18dc9a84e36fbc11ed2d17a2a65ebdaecdb5 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 14:05:01 +0300 Subject: [PATCH 018/421] PPO policy change + verbose=1 --- .../freqai/prediction_models/ReinforcementLearningPPO.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index cc56852df..5bc33bff1 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -53,8 +53,8 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[256, 256, 128]) - model = PPO('MultiInputPolicy', train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, gamma=0.9 + model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, + tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, gamma=0.9, verbose=1 ) model.learn( From 1c81ec601683205460ccf1cd86b2522249e0d255 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 14:20:57 +0300 Subject: [PATCH 019/421] 3ac and 5ac example strategies --- ....py => ReinforcementLearningExample3ac.py} | 2 +- .../ReinforcementLearningExample5ac.py | 147 ++++++++++++++++++ 2 files changed, 148 insertions(+), 1 deletion(-) rename freqtrade/freqai/example_strats/{ReinforcementLearningExample.py => ReinforcementLearningExample3ac.py} (99%) create mode 100644 freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py similarity index 99% rename from freqtrade/freqai/example_strats/ReinforcementLearningExample.py rename to freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py index 1bafdbb80..8473fc6a9 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py @@ -11,7 +11,7 @@ from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_ logger = logging.getLogger(__name__) -class ReinforcementLearningExample(IStrategy): +class RLExample3ac(IStrategy): """ Test strategy - used for testing freqAI functionalities. DO not use in production. diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py new file mode 100644 index 000000000..1da9a8ab1 --- /dev/null +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -0,0 +1,147 @@ +import logging +from functools import reduce + +import pandas as pd +import talib.abstract as ta +from pandas import DataFrame + +from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_informative_pair + + +logger = logging.getLogger(__name__) + + +class RLExample5ac(IStrategy): + """ + Test strategy - used for testing freqAI functionalities. + DO not use in production. + """ + + minimal_roi = {"0": 0.1, "240": -1} + + plot_config = { + "main_plot": {}, + "subplots": { + "prediction": {"prediction": {"color": "blue"}}, + "target_roi": { + "target_roi": {"color": "brown"}, + }, + "do_predict": { + "do_predict": {"color": "brown"}, + }, + }, + } + + process_only_new_candles = True + stoploss = -0.05 + use_exit_signal = True + startup_candle_count: int = 300 + can_short = False + + linear_roi_offset = DecimalParameter( + 0.00, 0.02, default=0.005, space="sell", optimize=False, load=True + ) + max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True) + + def informative_pairs(self): + whitelist_pairs = self.dp.current_whitelist() + corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] + informative_pairs = [] + for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: + for pair in whitelist_pairs: + informative_pairs.append((pair, tf)) + for pair in corr_pairs: + if pair in whitelist_pairs: + continue # avoid duplication + informative_pairs.append((pair, tf)) + return informative_pairs + + def populate_any_indicators( + self, pair, df, tf, informative=None, set_generalized_indicators=False + ): + + coin = pair.split('/')[0] + + with self.freqai.lock: + if informative is None: + informative = self.dp.get_pair_dataframe(pair, tf) + + # first loop is automatically duplicating indicators for time periods + for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: + + t = int(t) + informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + + informative[f"%-{coin}pct-change"] = informative["close"].pct_change() + informative[f"%-{coin}raw_volume"] = informative["volume"] + + # Raw price currently necessary for RL models: + informative[f"%-{coin}raw_price"] = informative["close"] + + indicators = [col for col in informative if col.startswith("%")] + # This loop duplicates and shifts all indicators to add a sense of recency to data + for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): + if n == 0: + continue + informative_shift = informative[indicators].shift(n) + informative_shift = informative_shift.add_suffix("_shift-" + str(n)) + informative = pd.concat((informative, informative_shift), axis=1) + + df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) + skip_columns = [ + (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] + ] + df = df.drop(columns=skip_columns) + + # Add generalized indicators here (because in live, it will call this + # function to populate indicators during training). Notice how we ensure not to + # add them multiple times + if set_generalized_indicators: + df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 + df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + + # user adds targets here by prepending them with &- (see convention below) + # If user wishes to use multiple targets, a multioutput prediction model + # needs to be used such as templates/CatboostPredictionMultiModel.py + df["&-action"] = 2 + + return df + + def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: + + self.freqai_info = self.config["freqai"] + + dataframe = self.freqai.start(dataframe, metadata, self) + + return dataframe + + def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + + enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + + if enter_long_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] + ] = (1, "long") + + enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] + + if enter_short_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] + ] = (1, "short") + + return df + + def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + if exit_long_conditions: + df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 + + exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4] + if exit_short_conditions: + df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 + + return df From cf0731095f91fdec5adc84087f10404baf009c60 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 14:23:00 +0300 Subject: [PATCH 020/421] type fix --- .../freqai/example_strats/ReinforcementLearningExample3ac.py | 2 +- .../freqai/example_strats/ReinforcementLearningExample5ac.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py index 8473fc6a9..2173f3d2f 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py @@ -11,7 +11,7 @@ from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_ logger = logging.getLogger(__name__) -class RLExample3ac(IStrategy): +class ReinforcementLearningExample3ac(IStrategy): """ Test strategy - used for testing freqAI functionalities. DO not use in production. diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index 1da9a8ab1..bf0d91390 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -11,7 +11,7 @@ from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_ logger = logging.getLogger(__name__) -class RLExample5ac(IStrategy): +class ReinforcementLearningExample5ac(IStrategy): """ Test strategy - used for testing freqAI functionalities. DO not use in production. From acf3484e8857854d37deefd30fd631b3dbcf336c Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 13:46:12 +0200 Subject: [PATCH 021/421] add multiprocessing variant of ReinforcementLearningPPO --- freqtrade/freqai/RL/Base3ActionRLEnv.py | 6 +- .../ReinforcementLearningPPO_multiproc.py | 114 ++++++++++++++++++ 2 files changed, 118 insertions(+), 2 deletions(-) create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index 443ce7025..5e8bff024 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -35,10 +35,12 @@ class Base3ActionRLEnv(gym.Env): metadata = {'render.modes': ['human']} - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, + id: str = 'baseenv-1', seed: int = 1): assert df.ndim == 2 - self.seed() + self.id = id + self.seed(seed) self.df = df self.signal_features = self.df self.prices = prices diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py new file mode 100644 index 000000000..1b2873334 --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -0,0 +1,114 @@ +import logging +from typing import Any, Dict # , Tuple + +import numpy as np +# import numpy.typing as npt +# import pandas as pd +import torch as th +# from pandas import DataFrame +from typing import Callable +from stable_baselines3 import PPO +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.vec_env import SubprocVecEnv +from stable_baselines3.common.utils import set_random_seed +from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +import gym +logger = logging.getLogger(__name__) + + +def make_env(env_id: str, rank: int, seed: int, train_df, price, + reward_params, window_size) -> Callable: + """ + Utility function for multiprocessed env. + + :param env_id: (str) the environment ID + :param num_env: (int) the number of environment you wish to have in subprocesses + :param seed: (int) the inital seed for RNG + :param rank: (int) index of the subprocess + :return: (Callable) + """ + def _init() -> gym.Env: + + env = MyRLEnv(df=train_df, prices=price, window_size=window_size, + reward_kwargs=reward_params, id=env_id, seed=seed + rank) + return env + set_random_seed(seed) + return _init + + +class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + + agent_params = self.freqai_info['model_training_parameters'] + reward_params = self.freqai_info['model_reward_parameters'] + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + eval_freq = agent_params.get("eval_cycles", 4) * len(test_df) + total_timesteps = agent_params["train_cycles"] * len(train_df) + + # price data for model training and evaluation + price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( + len(test_df.index)) + + env_id = "CartPole-v1" + num_cpu = 4 + train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, + self.CONV_WIDTH) for i in range(num_cpu)]) + + eval_env = SubprocVecEnv([make_env(env_id, i, 1, test_df, price_test, reward_params, + self.CONV_WIDTH) for i in range(num_cpu)]) + + path = self.dk.data_path + eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), + deterministic=True, render=False) + + # model arch + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[256, 256, 128]) + + model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, + tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, gamma=0.9, verbose=1 + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=eval_callback + ) + + print('Training finished!') + + return model + + +class MyRLEnv(Base3ActionRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env + """ + + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if (action == Actions.Short.value or + action == Actions.Neutral.value) and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + # close short + if (action == Actions.Long.value or + action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + return 0. From 6d8e838a8f1443c7f915cd6283850b134c36a7d9 Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Mon, 15 Aug 2022 22:07:42 +0700 Subject: [PATCH 022/421] update tensorboard dependency --- requirements-freqai.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements-freqai.txt b/requirements-freqai.txt index 8d8bb03c5..869606365 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -8,4 +8,5 @@ catboost==1.0.6; platform_machine != 'aarch64' lightgbm==3.3.2 torch==1.12.1 stable-baselines3==1.5.0 -gym==0.21.0 \ No newline at end of file +gym==0.21.0 +tensorboard==2.9.1 \ No newline at end of file From b1fc5a06ca0265203b53296069f25b1bffce1891 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 14:33:08 +0300 Subject: [PATCH 023/421] example config added --- .../config_reinforcementlearning_example.json | 110 ++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 config_examples/config_reinforcementlearning_example.json diff --git a/config_examples/config_reinforcementlearning_example.json b/config_examples/config_reinforcementlearning_example.json new file mode 100644 index 000000000..89b33653d --- /dev/null +++ b/config_examples/config_reinforcementlearning_example.json @@ -0,0 +1,110 @@ +{ + "trading_mode": "futures", + "new_pairs_days": 30, + "margin_mode": "isolated", + "max_open_trades": 8, + "stake_currency": "USDT", + "stake_amount": 1000, + "tradable_balance_ratio": 1, + "fiat_display_currency": "USD", + "dry_run": true, + "timeframe": "3m", + "dataformat_ohlcv": "json", + "dry_run_wallet": 12000, + "cancel_open_orders_on_exit": true, + "unfilledtimeout": { + "entry": 10, + "exit": 30 + }, + "exchange": { + "name": "binance", + "key": "", + "secret": "", + "ccxt_config": { + "enableRateLimit": true + }, + "ccxt_async_config": { + "enableRateLimit": true, + "rateLimit": 200 + }, + "pair_whitelist": [ + "1INCH/USDT", + "AAVE/USDT" + ], + "pair_blacklist": [] + }, + "entry_pricing": { + "price_side": "same", + "purge_old_models": true, + "use_order_book": true, + "order_book_top": 1, + "price_last_balance": 0.0, + "check_depth_of_market": { + "enabled": false, + "bids_to_ask_delta": 1 + } + }, + "exit_pricing": { + "price_side": "other", + "use_order_book": true, + "order_book_top": 1 + }, + "pairlists": [ + { + "method": "StaticPairList" + } + ], + "freqai": { + "model_save_type": "stable_baselines", + "conv_width": 10, + "follow_mode": false, + "purge_old_models": true, + "expiration_hours": 1, + "train_period_days": 10, + "backtest_period_days": 2, + "identifier": "test_rl9", + "feature_parameters": { + "include_corr_pairlist": [ + "BTC/USDT", + "ETH/USDT" + ], + "include_timeframes": [ + "3m", + "15m" + ], + "label_period_candles": 80, + "include_shifted_candles": 0, + "DI_threshold": 0, + "weight_factor": 0.9, + "principal_component_analysis": false, + "use_SVM_to_remove_outliers": false, + "svm_params": {"shuffle": true, "nu": 0.1}, + "stratify_training_data": 0, + "indicator_max_period_candles": 10, + "indicator_periods_candles": [5] + }, + "data_split_parameters": { + "test_size": 0.5, + "random_state": 1, + "shuffle": false + }, + "model_training_parameters": { + "n_steps": 2048, + "ent_coef": 0.005, + "learning_rate": 0.000025, + "batch_size": 256, + "eval_cycles" : 5, + "train_cycles" : 15 + }, + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.01 + } + }, + "bot_name": "RL_test", + "force_entry_enable": true, + "initial_state": "running", + "internals": { + "process_throttle_secs": 5 + } +} \ No newline at end of file From 48bb51b458dfd02dff006c866e9b96ddf070ec06 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 14:41:24 +0300 Subject: [PATCH 024/421] example config added --- config_examples/config_reinforcementlearning_example.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/config_examples/config_reinforcementlearning_example.json b/config_examples/config_reinforcementlearning_example.json index 89b33653d..29f088ef3 100644 --- a/config_examples/config_reinforcementlearning_example.json +++ b/config_examples/config_reinforcementlearning_example.json @@ -8,7 +8,7 @@ "tradable_balance_ratio": 1, "fiat_display_currency": "USD", "dry_run": true, - "timeframe": "3m", + "timeframe": "5m", "dataformat_ohlcv": "json", "dry_run_wallet": 12000, "cancel_open_orders_on_exit": true, @@ -62,15 +62,15 @@ "expiration_hours": 1, "train_period_days": 10, "backtest_period_days": 2, - "identifier": "test_rl9", + "identifier": "test_rl10", "feature_parameters": { "include_corr_pairlist": [ "BTC/USDT", "ETH/USDT" ], "include_timeframes": [ - "3m", - "15m" + "15m", + "30m" ], "label_period_candles": 80, "include_shifted_candles": 0, From 57c488a6f172a713a8625352b07480299c24f91c Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 18:35:41 +0300 Subject: [PATCH 025/421] learning_rate + multicpu changes --- .../ReinforcementLearningPPO_multiproc.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index 1b2873334..c00784d7a 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -50,19 +50,22 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): test_df = data_dictionary["test_features"] eval_freq = agent_params.get("eval_cycles", 4) * len(test_df) total_timesteps = agent_params["train_cycles"] * len(train_df) + learning_rate = agent_params["learning_rate"] # price data for model training and evaluation price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( len(test_df.index)) - env_id = "CartPole-v1" - num_cpu = 4 + env_id = "train_env" + train_num_cpu = 6 train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, - self.CONV_WIDTH) for i in range(num_cpu)]) + self.CONV_WIDTH) for i in range(train_num_cpu)]) - eval_env = SubprocVecEnv([make_env(env_id, i, 1, test_df, price_test, reward_params, - self.CONV_WIDTH) for i in range(num_cpu)]) + eval_num_cpu = 6 + eval_env_id = 'eval_env' + eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, + self.CONV_WIDTH) for i in range(eval_num_cpu)]) path = self.dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", @@ -71,10 +74,10 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) + net_arch=[512, 512, 512]) model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, gamma=0.9, verbose=1 + tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=learning_rate, gamma=0.9, verbose=1 ) model.learn( @@ -83,6 +86,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): ) print('Training finished!') + eval_env.close() return model From bf7ceba95857ab6880929c8387d32a84155d92fd Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 18:01:15 +0200 Subject: [PATCH 026/421] set cpu threads in config --- .gitignore | 1 + config_examples/config_freqai-rl.example.json | 110 ++++++++++++++++++ .../RL/BaseReinforcementLearningModel.py | 19 ++- freqtrade/freqai/data_drawer.py | 7 +- .../prediction_models/CatboostClassifier.py | 2 +- .../prediction_models/CatboostRegressor.py | 2 +- .../ReinforcementLearningPPO.py | 13 ++- .../ReinforcementLearningPPO_multiproc.py | 15 ++- .../ReinforcementLearningTDQN.py | 11 +- 9 files changed, 159 insertions(+), 21 deletions(-) create mode 100644 config_examples/config_freqai-rl.example.json diff --git a/.gitignore b/.gitignore index e400c01f5..2d2d526d9 100644 --- a/.gitignore +++ b/.gitignore @@ -113,3 +113,4 @@ target/ !config_examples/config_full.example.json !config_examples/config_kraken.example.json !config_examples/config_freqai.example.json +!config_examples/config_freqai-rl.example.json diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json new file mode 100644 index 000000000..826fe7187 --- /dev/null +++ b/config_examples/config_freqai-rl.example.json @@ -0,0 +1,110 @@ +{ + "trading_mode": "futures", + "new_pairs_days": 30, + "margin_mode": "isolated", + "max_open_trades": 8, + "stake_currency": "USDT", + "stake_amount": 1000, + "tradable_balance_ratio": 1, + "fiat_display_currency": "USD", + "dry_run": true, + "timeframe": "5m", + "dataformat_ohlcv": "json", + "dry_run_wallet": 12000, + "cancel_open_orders_on_exit": true, + "unfilledtimeout": { + "entry": 10, + "exit": 30 + }, + "exchange": { + "name": "binance", + "key": "", + "secret": "", + "ccxt_config": { + "enableRateLimit": true + }, + "ccxt_async_config": { + "enableRateLimit": true, + "rateLimit": 200 + }, + "pair_whitelist": [ + "1INCH/USDT", + "AAVE/USDT" + ], + "pair_blacklist": [] + }, + "entry_pricing": { + "price_side": "same", + "purge_old_models": true, + "use_order_book": true, + "order_book_top": 1, + "price_last_balance": 0.0, + "check_depth_of_market": { + "enabled": false, + "bids_to_ask_delta": 1 + } + }, + "exit_pricing": { + "price_side": "other", + "use_order_book": true, + "order_book_top": 1 + }, + "pairlists": [ + { + "method": "StaticPairList" + } + ], + "freqai": { + "model_save_type": "stable_baselines_ppo", + "conv_width": 10, + "follow_mode": false, + "purge_old_models": true, + "train_period_days": 10, + "backtest_period_days": 2, + "identifier": "unique-id", + "data_kitchen_thread_count": 4, + "feature_parameters": { + "include_corr_pairlist": [ + "BTC/USDT", + "ETH/USDT" + ], + "include_timeframes": [ + "5m", + "30m" + ], + "label_period_candles": 80, + "include_shifted_candles": 0, + "DI_threshold": 0, + "weight_factor": 0.9, + "principal_component_analysis": false, + "use_SVM_to_remove_outliers": false, + "svm_params": {"shuffle": true, "nu": 0.1}, + "stratify_training_data": 0, + "indicator_max_period_candles": 10, + "indicator_periods_candles": [5] + }, + "data_split_parameters": { + "test_size": 0.5, + "random_state": 1, + "shuffle": false + }, + "model_training_parameters": { + "n_steps": 2048, + "ent_coef": 0.005, + "learning_rate": 0.000025, + "batch_size": 256, + "eval_cycles" : 5, + "train_cycles" : 15 + }, + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.01 + } + }, + "bot_name": "RL_test", + "force_entry_enable": true, + "initial_state": "running", + "internals": { + "process_throttle_secs": 5 + } +} \ No newline at end of file diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a28b88c42..8fa784f12 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -56,7 +56,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): ) logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - model = self.fit(data_dictionary, pair) + model = self.fit_rl(data_dictionary, pair, dk) if pair not in self.dd.historic_predictions: self.set_initial_historic_predictions( @@ -69,7 +69,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): return model @abstractmethod - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): """ Agent customizations and abstract Reinforcement Learning customizations go in here. Abstract method, so this function must be overridden by @@ -164,6 +164,21 @@ class BaseReinforcementLearningModel(IFreqaiModel): for return_str in dk.data['extra_returns_per_train']: hist_preds_df[return_str] = 0 + # TODO take care of this appendage. Right now it needs to be called because FreqAI enforces it. + # But FreqaiRL needs more objects passed to fit() (like DK) and we dont want to go refactor + # all the other existing fit() functions to include dk argument. For now we instantiate and + # leave it. + def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: + """ + Most regressors use the same function names and arguments e.g. user + can drop in LGBMRegressor in place of CatBoostRegressor and all data + management will be properly handled by Freqai. + :param data_dictionary: Dict = the dictionary constructed by DataHandler to hold + all the training and test data/labels. + """ + + return + class MyRLEnv(Base3ActionRLEnv): diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index f9d56c4b4..68f688ed4 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -471,11 +471,12 @@ class FreqaiDataDrawer: elif model_type == 'keras': from tensorflow import keras model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") - elif model_type == 'stable_baselines': + elif model_type == 'stable_baselines_ppo': from stable_baselines3.ppo.ppo import PPO + model = PPO.load(dk.data_path / f"{dk.model_filename}_model.zip") + elif model_type == 'stable_baselines_dqn': from stable_baselines3 import DQN - #model = PPO.load(dk.data_path / f"{dk.model_filename}_model.zip") - model = DQN.load(dk.data_path / f"best_model.zip") + model = DQN.load(dk.data_path / f"{dk.model_filename}_model.zip") if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file(): dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib") diff --git a/freqtrade/freqai/prediction_models/CatboostClassifier.py b/freqtrade/freqai/prediction_models/CatboostClassifier.py index b88b28b25..fad74d7a8 100644 --- a/freqtrade/freqai/prediction_models/CatboostClassifier.py +++ b/freqtrade/freqai/prediction_models/CatboostClassifier.py @@ -16,7 +16,7 @@ class CatboostClassifier(BaseClassifierModel): has its own DataHandler where data is held, saved, loaded, and managed. """ - def fit(self, data_dictionary: Dict) -> Any: + def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: """ User sets up the training and test data to fit their desired model here :params: diff --git a/freqtrade/freqai/prediction_models/CatboostRegressor.py b/freqtrade/freqai/prediction_models/CatboostRegressor.py index d93569c91..018f55879 100644 --- a/freqtrade/freqai/prediction_models/CatboostRegressor.py +++ b/freqtrade/freqai/prediction_models/CatboostRegressor.py @@ -17,7 +17,7 @@ class CatboostRegressor(BaseRegressionModel): has its own DataHandler where data is held, saved, loaded, and managed. """ - def fit(self, data_dictionary: Dict) -> Any: + def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: """ User sets up the training and test data to fit their desired model here :param data_dictionary: the dictionary constructed by DataHandler to hold diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index 5bc33bff1..d1cd2293e 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -9,9 +9,9 @@ import torch as th from stable_baselines3 import PPO from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor -# from stable_baselines3.common.vec_env import SubprocVecEnv from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen logger = logging.getLogger(__name__) @@ -22,7 +22,7 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -44,7 +44,7 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): eval_env = Monitor(eval, ".") eval_env.reset() - path = self.dk.data_path + path = dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) @@ -54,7 +54,8 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): net_arch=[256, 256, 128]) model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, gamma=0.9, verbose=1 + tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, + gamma=0.9, verbose=1 ) model.learn( @@ -62,9 +63,11 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): callback=eval_callback ) + best_model = PPO.load(dk.data_path / "best_model.zip") + print('Training finished!') - return model + return best_model class MyRLEnv(Base3ActionRLEnv): diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index c00784d7a..743caf8c6 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -13,7 +13,9 @@ from stable_baselines3.common.vec_env import SubprocVecEnv from stable_baselines3.common.utils import set_random_seed from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen import gym + logger = logging.getLogger(__name__) @@ -42,7 +44,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -58,16 +60,15 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): len(test_df.index)) env_id = "train_env" - train_num_cpu = 6 + num_cpu = int(dk.thread_count / 2) train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, self.CONV_WIDTH) for i in range(train_num_cpu)]) - eval_num_cpu = 6 eval_env_id = 'eval_env' eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, - self.CONV_WIDTH) for i in range(eval_num_cpu)]) + self.CONV_WIDTH) for i in range(num_cpu)]) - path = self.dk.data_path + path = dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) @@ -85,10 +86,12 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): callback=eval_callback ) + # TODO get callback working so the best model is saved. For now we save last model + # best_model = PPO.load(dk.data_path / "best_model.zip") print('Training finished!') eval_env.close() - return model + return model # best_model class MyRLEnv(Base3ActionRLEnv): diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index 2a8570d3e..8bc5f9152 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -7,9 +7,12 @@ from stable_baselines3.common.monitor import Monitor from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.RL.TDQNagent import TDQN +from stable_baselines3 import DQN from stable_baselines3.common.buffers import ReplayBuffer import numpy as np +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen + logger = logging.getLogger(__name__) @@ -18,7 +21,7 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -40,7 +43,7 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): eval_env = Monitor(eval, ".") eval_env.reset() - path = self.dk.data_path + path = dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) @@ -63,9 +66,11 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): callback=eval_callback ) + best_model = DQN.load(dk.data_path / "best_model.zip") + print('Training finished!') - return model + return best_model class MyRLEnv(Base3ActionRLEnv): From e5df39e8913721847c0f6ab9754a71cde0cea38b Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 18:08:20 +0200 Subject: [PATCH 027/421] ensuring best_model is placed in ram and saved to disk and loaded from disk --- .../prediction_models/ReinforcementLearningPPO_multiproc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index 743caf8c6..8370500b9 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -62,7 +62,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): env_id = "train_env" num_cpu = int(dk.thread_count / 2) train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, - self.CONV_WIDTH) for i in range(train_num_cpu)]) + self.CONV_WIDTH) for i in range(num_cpu)]) eval_env_id = 'eval_env' eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, From 69d542d3e2a464ae6fbd039489d2187b20705c01 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 18:27:48 +0200 Subject: [PATCH 028/421] match config and strats to upstream freqai --- config_examples/config_freqai-rl.example.json | 1 + .../ReinforcementLearningExample3ac.py | 74 +++++++++---------- .../ReinforcementLearningExample5ac.py | 74 +++++++++---------- 3 files changed, 73 insertions(+), 76 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 826fe7187..736f3e022 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -55,6 +55,7 @@ } ], "freqai": { + "enabled": true, "model_save_type": "stable_baselines_ppo", "conv_width": 10, "follow_mode": false, diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py index 2173f3d2f..1976620fb 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py @@ -62,57 +62,55 @@ class ReinforcementLearningExample3ac(IStrategy): coin = pair.split('/')[0] - with self.freqai.lock: - if informative is None: - informative = self.dp.get_pair_dataframe(pair, tf) - # first loop is automatically duplicating indicators for time periods - for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: + if informative is None: + informative = self.dp.get_pair_dataframe(pair, tf) - t = int(t) - informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + # first loop is automatically duplicating indicators for time periods + for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: - informative[f"%-{coin}pct-change"] = informative["close"].pct_change() - informative[f"%-{coin}raw_volume"] = informative["volume"] + t = int(t) + informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) - # Raw price currently necessary for RL models: - informative[f"%-{coin}raw_price"] = informative["close"] + informative[f"%-{coin}pct-change"] = informative["close"].pct_change() + informative[f"%-{coin}raw_volume"] = informative["volume"] - indicators = [col for col in informative if col.startswith("%")] - # This loop duplicates and shifts all indicators to add a sense of recency to data - for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): - if n == 0: - continue - informative_shift = informative[indicators].shift(n) - informative_shift = informative_shift.add_suffix("_shift-" + str(n)) - informative = pd.concat((informative, informative_shift), axis=1) + # Raw price currently necessary for RL models: + informative[f"%-{coin}raw_price"] = informative["close"] - df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) - skip_columns = [ - (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] - ] - df = df.drop(columns=skip_columns) + indicators = [col for col in informative if col.startswith("%")] + # This loop duplicates and shifts all indicators to add a sense of recency to data + for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): + if n == 0: + continue + informative_shift = informative[indicators].shift(n) + informative_shift = informative_shift.add_suffix("_shift-" + str(n)) + informative = pd.concat((informative, informative_shift), axis=1) - # Add generalized indicators here (because in live, it will call this - # function to populate indicators during training). Notice how we ensure not to - # add them multiple times - if set_generalized_indicators: - df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 - df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) + skip_columns = [ + (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] + ] + df = df.drop(columns=skip_columns) - # user adds targets here by prepending them with &- (see convention below) - # If user wishes to use multiple targets, a multioutput prediction model - # needs to be used such as templates/CatboostPredictionMultiModel.py - df["&-action"] = 2 + # Add generalized indicators here (because in live, it will call this + # function to populate indicators during training). Notice how we ensure not to + # add them multiple times + if set_generalized_indicators: + df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 + df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + + # user adds targets here by prepending them with &- (see convention below) + # If user wishes to use multiple targets, a multioutput prediction model + # needs to be used such as templates/CatboostPredictionMultiModel.py + df["&-action"] = 2 return df def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: - self.freqai_info = self.config["freqai"] - dataframe = self.freqai.start(dataframe, metadata, self) return dataframe diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index bf0d91390..8c19cc0fa 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -62,57 +62,55 @@ class ReinforcementLearningExample5ac(IStrategy): coin = pair.split('/')[0] - with self.freqai.lock: - if informative is None: - informative = self.dp.get_pair_dataframe(pair, tf) - # first loop is automatically duplicating indicators for time periods - for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: + if informative is None: + informative = self.dp.get_pair_dataframe(pair, tf) - t = int(t) - informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + # first loop is automatically duplicating indicators for time periods + for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: - informative[f"%-{coin}pct-change"] = informative["close"].pct_change() - informative[f"%-{coin}raw_volume"] = informative["volume"] + t = int(t) + informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) - # Raw price currently necessary for RL models: - informative[f"%-{coin}raw_price"] = informative["close"] + informative[f"%-{coin}pct-change"] = informative["close"].pct_change() + informative[f"%-{coin}raw_volume"] = informative["volume"] - indicators = [col for col in informative if col.startswith("%")] - # This loop duplicates and shifts all indicators to add a sense of recency to data - for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): - if n == 0: - continue - informative_shift = informative[indicators].shift(n) - informative_shift = informative_shift.add_suffix("_shift-" + str(n)) - informative = pd.concat((informative, informative_shift), axis=1) + # Raw price currently necessary for RL models: + informative[f"%-{coin}raw_price"] = informative["close"] - df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) - skip_columns = [ - (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] - ] - df = df.drop(columns=skip_columns) + indicators = [col for col in informative if col.startswith("%")] + # This loop duplicates and shifts all indicators to add a sense of recency to data + for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): + if n == 0: + continue + informative_shift = informative[indicators].shift(n) + informative_shift = informative_shift.add_suffix("_shift-" + str(n)) + informative = pd.concat((informative, informative_shift), axis=1) - # Add generalized indicators here (because in live, it will call this - # function to populate indicators during training). Notice how we ensure not to - # add them multiple times - if set_generalized_indicators: - df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 - df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) + skip_columns = [ + (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] + ] + df = df.drop(columns=skip_columns) - # user adds targets here by prepending them with &- (see convention below) - # If user wishes to use multiple targets, a multioutput prediction model - # needs to be used such as templates/CatboostPredictionMultiModel.py - df["&-action"] = 2 + # Add generalized indicators here (because in live, it will call this + # function to populate indicators during training). Notice how we ensure not to + # add them multiple times + if set_generalized_indicators: + df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 + df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + + # user adds targets here by prepending them with &- (see convention below) + # If user wishes to use multiple targets, a multioutput prediction model + # needs to be used such as templates/CatboostPredictionMultiModel.py + df["&-action"] = 2 return df def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: - self.freqai_info = self.config["freqai"] - dataframe = self.freqai.start(dataframe, metadata, self) return dataframe From dd382dd3702cfe7edf2848adc9f7958d08ac62dc Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 18:56:53 +0200 Subject: [PATCH 029/421] add monitor to eval env so that multiproc can save best_model --- .../ReinforcementLearningPPO_multiproc.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index 8370500b9..e8f67cbb8 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -6,6 +6,7 @@ import numpy as np # import pandas as pd import torch as th # from pandas import DataFrame +from stable_baselines3.common.monitor import Monitor from typing import Callable from stable_baselines3 import PPO from stable_baselines3.common.callbacks import EvalCallback @@ -20,7 +21,7 @@ logger = logging.getLogger(__name__) def make_env(env_id: str, rank: int, seed: int, train_df, price, - reward_params, window_size) -> Callable: + reward_params, window_size, monitor=False) -> Callable: """ Utility function for multiprocessed env. @@ -34,6 +35,8 @@ def make_env(env_id: str, rank: int, seed: int, train_df, price, env = MyRLEnv(df=train_df, prices=price, window_size=window_size, reward_kwargs=reward_params, id=env_id, seed=seed + rank) + if monitor: + env = Monitor(env, ".") return env set_random_seed(seed) return _init @@ -66,7 +69,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): eval_env_id = 'eval_env' eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, - self.CONV_WIDTH) for i in range(num_cpu)]) + self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) path = dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", @@ -86,12 +89,11 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): callback=eval_callback ) - # TODO get callback working so the best model is saved. For now we save last model - # best_model = PPO.load(dk.data_path / "best_model.zip") + best_model = PPO.load(dk.data_path / "best_model.zip") print('Training finished!') eval_env.close() - return model # best_model + return best_model class MyRLEnv(Base3ActionRLEnv): From d60a166fbf8a63f7d0107a115d8e00cd190630d4 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 22:39:33 +0300 Subject: [PATCH 030/421] multiproc TDQN with xtra callbacks --- .../ReinforcementLearningTDQN_multiproc.py | 164 ++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py new file mode 100644 index 000000000..d05184d87 --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py @@ -0,0 +1,164 @@ +import logging +from typing import Any, Dict # Optional +import torch as th +import numpy as np +import gym +from typing import Callable +from stable_baselines3.common.callbacks import EvalCallback, StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold +from stable_baselines3.common.monitor import Monitor +from stable_baselines3.common.vec_env import SubprocVecEnv +from stable_baselines3.common.utils import set_random_seed +from stable_baselines3 import DQN +from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +from freqtrade.freqai.RL.TDQNagent import TDQN +from stable_baselines3.common.buffers import ReplayBuffer +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen + + +logger = logging.getLogger(__name__) + +def make_env(env_id: str, rank: int, seed: int, train_df, price, + reward_params, window_size, monitor=False) -> Callable: + """ + Utility function for multiprocessed env. + + :param env_id: (str) the environment ID + :param num_env: (int) the number of environment you wish to have in subprocesses + :param seed: (int) the inital seed for RNG + :param rank: (int) index of the subprocess + :return: (Callable) + """ + def _init() -> gym.Env: + + env = MyRLEnv(df=train_df, prices=price, window_size=window_size, + reward_kwargs=reward_params, id=env_id, seed=seed + rank) + if monitor: + env = Monitor(env, ".") + return env + set_random_seed(seed) + return _init + +class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): + + agent_params = self.freqai_info['model_training_parameters'] + reward_params = self.freqai_info['model_reward_parameters'] + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + eval_freq = agent_params["eval_cycles"] * len(test_df) + total_timesteps = agent_params["train_cycles"] * len(train_df) + learning_rate = agent_params["learning_rate"] + + # price data for model training and evaluation + price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( + len(test_df.index)) + + env_id = "train_env" + num_cpu = int(dk.thread_count / 2) + train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, + self.CONV_WIDTH) for i in range(num_cpu)]) + + eval_env_id = 'eval_env' + eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, + self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) + + path = dk.data_path + stop_train_callback = StopTrainingOnNoModelImprovement(max_no_improvement_evals=5, min_evals=10, verbose=2) + callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=-200, verbose=2) + eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), + deterministic=True, render=True, callback_after_eval=stop_train_callback, callback_on_new_best=callback_on_best, verbose=2) + # model arch + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[512, 512, 512]) + + model = TDQN('TMultiInputPolicy', train_env, + policy_kwargs=policy_kwargs, + tensorboard_log=f"{path}/tdqn/tensorboard/", + learning_rate=learning_rate, gamma=0.9, + target_update_interval=5000, buffer_size=50000, + exploration_initial_eps=1, exploration_final_eps=0.1, + replay_buffer_class=ReplayBuffer + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=eval_callback + ) + + best_model = DQN.load(dk.data_path / "best_model.zip") + print('Training finished!') + eval_env.close() + + return best_model + + +class MyRLEnv(Base3ActionRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env + """ + + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if (action == Actions.Short.value or + action == Actions.Neutral.value) and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + # close short + if (action == Actions.Long.value or + action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + return 0. + +# User can inherit and customize 5 action environment +# class MyRLEnv(Base5ActionRLEnv): +# """ +# User can override any function in BaseRLEnv and gym.Env. Here the user +# Adds 5 actions. +# """ + +# def calculate_reward(self, action): + +# if self._last_trade_tick is None: +# return 0. + +# # close long +# if action == Actions.Long_sell.value and self._position == Positions.Long: +# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) +# return float(np.log(current_price) - np.log(last_trade_price)) + +# if action == Actions.Long_sell.value and self._position == Positions.Long: +# if self.close_trade_profit[-1] > self.profit_aim * self.rr: +# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) +# return float((np.log(current_price) - np.log(last_trade_price)) * 2) + +# # close short +# if action == Actions.Short_buy.value and self._position == Positions.Short: +# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) +# return float(np.log(last_trade_price) - np.log(current_price)) + +# if action == Actions.Short_buy.value and self._position == Positions.Short: +# if self.close_trade_profit[-1] > self.profit_aim * self.rr: +# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) +# return float((np.log(last_trade_price) - np.log(current_price)) * 2) + +# return 0. From 0475b7cb1838f46bcbb31771eca0b3c3cb6ed940 Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Tue, 16 Aug 2022 09:30:35 +0700 Subject: [PATCH 031/421] remove unuse code and fix coding conventions --- freqtrade/freqai/RL/Base3ActionRLEnv.py | 7 ------ freqtrade/freqai/RL/Base5ActionRLEnv.py | 14 ----------- .../ReinforcementLearningExample3ac.py | 1 - .../ReinforcementLearningExample5ac.py | 1 - .../ReinforcementLearningPPO_multiproc.py | 5 +++- .../ReinforcementLearningTDQN_multiproc.py | 24 +++++++++++++++---- 6 files changed, 23 insertions(+), 29 deletions(-) diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index 5e8bff024..bf7b2fc7b 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -71,10 +71,6 @@ class Base3ActionRLEnv(gym.Env): self.history = None self.trade_history = [] - self.r_t_change = 0. - - self.returns_report = [] - def seed(self, seed: int = 1): self.np_random, seed = seeding.np_random(seed) return [seed] @@ -101,9 +97,6 @@ class Base3ActionRLEnv(gym.Env): self._profits = [(self._start_tick, 1)] self.close_trade_profit = [] - self.r_t_change = 0. - - self.returns_report = [] return self._get_observation() diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 01fb77481..00b031e54 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -73,11 +73,6 @@ class Base5ActionRLEnv(gym.Env): self.history = None self.trade_history = [] - # self.A_t, self.B_t = 0.000639, 0.00001954 - self.r_t_change = 0. - - self.returns_report = [] - def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] @@ -104,9 +99,6 @@ class Base5ActionRLEnv(gym.Env): self._profits = [(self._start_tick, 1)] self.close_trade_profit = [] - self.r_t_change = 0. - - self.returns_report = [] return self._get_observation() @@ -178,12 +170,6 @@ class Base5ActionRLEnv(gym.Env): return observation, step_reward, self._done, info - # def processState(self, state): - # return state.to_numpy() - - # def convert_mlp_Policy(self, obs_): - # pass - def _get_observation(self): return self.signal_features[(self._current_tick - self.window_size):self._current_tick] diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py index 1976620fb..be7a8973b 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py @@ -62,7 +62,6 @@ class ReinforcementLearningExample3ac(IStrategy): coin = pair.split('/')[0] - if informative is None: informative = self.dp.get_pair_dataframe(pair, tf) diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index 8c19cc0fa..0ecea92a9 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -62,7 +62,6 @@ class ReinforcementLearningExample5ac(IStrategy): coin = pair.split('/')[0] - if informative is None: informative = self.dp.get_pair_dataframe(pair, tf) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index e8f67cbb8..26099a9e3 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -81,7 +81,10 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): net_arch=[512, 512, 512]) model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=learning_rate, gamma=0.9, verbose=1 + tensorboard_log=f"{path}/ppo/tensorboard/", + learning_rate=learning_rate, + gamma=0.9, + verbose=1 ) model.learn( diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py index d05184d87..dd34c96c1 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py @@ -4,7 +4,8 @@ import torch as th import numpy as np import gym from typing import Callable -from stable_baselines3.common.callbacks import EvalCallback, StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold +from stable_baselines3.common.callbacks import ( + EvalCallback, StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold) from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.vec_env import SubprocVecEnv from stable_baselines3.common.utils import set_random_seed @@ -18,6 +19,7 @@ from freqtrade.freqai.data_kitchen import FreqaiDataKitchen logger = logging.getLogger(__name__) + def make_env(env_id: str, rank: int, seed: int, train_df, price, reward_params, window_size, monitor=False) -> Callable: """ @@ -39,6 +41,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df, price, set_random_seed(seed) return _init + class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): """ User created Reinforcement Learning Model prediction model. @@ -69,11 +72,22 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) path = dk.data_path - stop_train_callback = StopTrainingOnNoModelImprovement(max_no_improvement_evals=5, min_evals=10, verbose=2) + stop_train_callback = StopTrainingOnNoModelImprovement( + max_no_improvement_evals=5, + min_evals=10, + verbose=2 + ) callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=-200, verbose=2) - eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), - deterministic=True, render=True, callback_after_eval=stop_train_callback, callback_on_new_best=callback_on_best, verbose=2) + eval_callback = EvalCallback( + eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/tdqn/logs/", + eval_freq=int(eval_freq), + deterministic=True, + render=True, + callback_after_eval=stop_train_callback, + callback_on_new_best=callback_on_best, + verbose=2 + ) # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[512, 512, 512]) From 16cec7dfbd51f34c479d842ca023c8cd34aa79a7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 16 Aug 2022 12:18:06 +0200 Subject: [PATCH 032/421] fix save/reload functionality for stablebaselines --- .../config_reinforcementlearning_example.json | 110 ------------------ freqtrade/freqai/data_drawer.py | 6 +- 2 files changed, 3 insertions(+), 113 deletions(-) delete mode 100644 config_examples/config_reinforcementlearning_example.json diff --git a/config_examples/config_reinforcementlearning_example.json b/config_examples/config_reinforcementlearning_example.json deleted file mode 100644 index 29f088ef3..000000000 --- a/config_examples/config_reinforcementlearning_example.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "trading_mode": "futures", - "new_pairs_days": 30, - "margin_mode": "isolated", - "max_open_trades": 8, - "stake_currency": "USDT", - "stake_amount": 1000, - "tradable_balance_ratio": 1, - "fiat_display_currency": "USD", - "dry_run": true, - "timeframe": "5m", - "dataformat_ohlcv": "json", - "dry_run_wallet": 12000, - "cancel_open_orders_on_exit": true, - "unfilledtimeout": { - "entry": 10, - "exit": 30 - }, - "exchange": { - "name": "binance", - "key": "", - "secret": "", - "ccxt_config": { - "enableRateLimit": true - }, - "ccxt_async_config": { - "enableRateLimit": true, - "rateLimit": 200 - }, - "pair_whitelist": [ - "1INCH/USDT", - "AAVE/USDT" - ], - "pair_blacklist": [] - }, - "entry_pricing": { - "price_side": "same", - "purge_old_models": true, - "use_order_book": true, - "order_book_top": 1, - "price_last_balance": 0.0, - "check_depth_of_market": { - "enabled": false, - "bids_to_ask_delta": 1 - } - }, - "exit_pricing": { - "price_side": "other", - "use_order_book": true, - "order_book_top": 1 - }, - "pairlists": [ - { - "method": "StaticPairList" - } - ], - "freqai": { - "model_save_type": "stable_baselines", - "conv_width": 10, - "follow_mode": false, - "purge_old_models": true, - "expiration_hours": 1, - "train_period_days": 10, - "backtest_period_days": 2, - "identifier": "test_rl10", - "feature_parameters": { - "include_corr_pairlist": [ - "BTC/USDT", - "ETH/USDT" - ], - "include_timeframes": [ - "15m", - "30m" - ], - "label_period_candles": 80, - "include_shifted_candles": 0, - "DI_threshold": 0, - "weight_factor": 0.9, - "principal_component_analysis": false, - "use_SVM_to_remove_outliers": false, - "svm_params": {"shuffle": true, "nu": 0.1}, - "stratify_training_data": 0, - "indicator_max_period_candles": 10, - "indicator_periods_candles": [5] - }, - "data_split_parameters": { - "test_size": 0.5, - "random_state": 1, - "shuffle": false - }, - "model_training_parameters": { - "n_steps": 2048, - "ent_coef": 0.005, - "learning_rate": 0.000025, - "batch_size": 256, - "eval_cycles" : 5, - "train_cycles" : 15 - }, - "model_reward_parameters": { - "rr": 1, - "profit_aim": 0.01 - } - }, - "bot_name": "RL_test", - "force_entry_enable": true, - "initial_state": "running", - "internals": { - "process_throttle_secs": 5 - } -} \ No newline at end of file diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 68f688ed4..9603fb9ab 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -395,7 +395,7 @@ class FreqaiDataDrawer: dump(model, save_path / f"{dk.model_filename}_model.joblib") elif model_type == 'keras': model.save(save_path / f"{dk.model_filename}_model.h5") - elif model_type == 'stable_baselines': + elif 'stable_baselines' in model_type: model.save(save_path / f"{dk.model_filename}_model.zip") if dk.svm_model is not None: @@ -473,10 +473,10 @@ class FreqaiDataDrawer: model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") elif model_type == 'stable_baselines_ppo': from stable_baselines3.ppo.ppo import PPO - model = PPO.load(dk.data_path / f"{dk.model_filename}_model.zip") + model = PPO.load(dk.data_path / f"{dk.model_filename}_model") elif model_type == 'stable_baselines_dqn': from stable_baselines3 import DQN - model = DQN.load(dk.data_path / f"{dk.model_filename}_model.zip") + model = DQN.load(dk.data_path / f"{dk.model_filename}_model") if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file(): dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib") From 2080ff86ed77b1b4c430d9a9b2f9cbf7ffc08a8a Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Wed, 17 Aug 2022 08:36:10 +0300 Subject: [PATCH 033/421] 5ac base fixes in logic --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 129 +++++++++++++----------- 1 file changed, 68 insertions(+), 61 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 00b031e54..574e71857 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -26,23 +26,23 @@ class Positions(Enum): def opposite(self): return Positions.Short if self == Positions.Long else Positions.Long - def mean_over_std(x): std = np.std(x, ddof=1) mean = np.mean(x) return mean / std if std > 0 else 0 - class Base5ActionRLEnv(gym.Env): """ Base class for a 5 action environment """ metadata = {'render.modes': ['human']} - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, + id: str = 'baseenv-1', seed: int = 1): assert df.ndim == 2 - self.seed() + self.id = id + self.seed(seed) self.df = df self.signal_features = self.df self.prices = prices @@ -73,7 +73,7 @@ class Base5ActionRLEnv(gym.Env): self.history = None self.trade_history = [] - def seed(self, seed=None): + def seed(self, seed: int = 1): self.np_random, seed = seeding.np_random(seed) return [seed] @@ -102,7 +102,7 @@ class Base5ActionRLEnv(gym.Env): return self._get_observation() - def step(self, action): + def step(self, action: int): self._done = False self._current_tick += 1 @@ -191,7 +191,7 @@ class Base5ActionRLEnv(gym.Env): else: return 0. - def is_tradesignal(self, action): + def is_tradesignal(self, action: int): # trade signal """ not trade signal is : @@ -200,29 +200,29 @@ class Base5ActionRLEnv(gym.Env): Action: Short, position: Short -> Hold Short """ return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + (action == Actions.Neutral.value and self._position == Positions.Long) or (action == Actions.Short_buy.value and self._position == Positions.Short) or - (action == Actions.Short_sell.value and self._position == Positions.Short) or (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Short_sell.value and self._position == Positions.Short) or (action == Actions.Short_sell.value and self._position == Positions.Long) or - + (action == Actions.Short_sell.value and self._position == Positions.Neutral) or (action == Actions.Long_buy.value and self._position == Positions.Long) or - (action == Actions.Long_sell.value and self._position == Positions.Long) or (action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Long_sell.value and self._position == Positions.Short)) + (action == Actions.Long_sell.value and self._position == Positions.Long) or + (action == Actions.Long_sell.value and self._position == Positions.Short) or + (action == Actions.Long_sell.value and self._position == Positions.Neutral)) def _is_trade(self, action: Actions): - return ((action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) or - - (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or - (action == Actions.Neutral.Long_sell and self._position == Positions.Short) - ) + return ((action == Actions.Long_buy.value and self._position == Positions.Neutral) or + (action == Actions.Short_buy.value and self._position == Positions.Neutral)) def is_hold(self, action): - return ((action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) + return ((action == Actions.Short_buy.value and self._position == Positions.Short) or + (action == Actions.Long_buy.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + (action == Actions.Neutral.value and self._position == Positions.Neutral)) def add_buy_fee(self, price): return price * (1 + self.fee) @@ -240,6 +240,52 @@ class Base5ActionRLEnv(gym.Env): def get_sharpe_ratio(self): return mean_over_std(self.get_portfolio_log_returns()) + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if action == Actions.Long_sell.value and self._position == Positions.Long: + if len(self.close_trade_profit): + # aim x2 rw + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + # less than aim x1 rw + elif self.close_trade_profit[-1] < self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + # # less than RR SL x2 neg rw + # elif self.close_trade_profit[-1] < (self.profit_aim * -1): + # last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + # current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + # return float((np.log(current_price) - np.log(last_trade_price)) * 2) * -1 + + + # close short + if action == Actions.Short_buy.value and self._position == Positions.Short: + if len(self.close_trade_profit): + # aim x2 rw + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + # less than aim x1 rw + elif self.close_trade_profit[-1] < self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + # # less than RR SL x2 neg rw + # elif self.close_trade_profit[-1] > self.profit_aim * self.rr: + # last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + # current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + # return float((np.log(last_trade_price) - np.log(current_price)) * 2) * -1 + return 0. + + def _update_profit(self, action): # if self._is_trade(action) or self._done: if self._is_trade(action) or self._done: @@ -255,7 +301,7 @@ class Base5ActionRLEnv(gym.Env): self._profits.append((self._current_tick, self._total_profit)) self.close_trade_profit.append(pnl) - def most_recent_return(self, action): + def most_recent_return(self, action: int): """ We support Long, Neutral and Short positions. Return is generated from rising prices in Long @@ -265,7 +311,6 @@ class Base5ActionRLEnv(gym.Env): # Long positions if self._position == Positions.Long: current_price = self.prices.iloc[self._current_tick].open - # if action == Actions.Short.value or action == Actions.Neutral.value: if action == Actions.Short_buy.value or action == Actions.Neutral.value: current_price = self.add_sell_fee(current_price) @@ -280,7 +325,6 @@ class Base5ActionRLEnv(gym.Env): # Short positions if self._position == Positions.Short: current_price = self.prices.iloc[self._current_tick].open - # if action == Actions.Long.value or action == Actions.Neutral.value: if action == Actions.Long_buy.value or action == Actions.Neutral.value: current_price = self.add_buy_fee(current_price) @@ -296,9 +340,6 @@ class Base5ActionRLEnv(gym.Env): def get_portfolio_log_returns(self): return self.portfolio_log_returns[1:self._current_tick + 1] - def get_trading_log_return(self): - return self.portfolio_log_returns[self._start_tick:] - def update_portfolio_log_returns(self, action): self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) @@ -314,37 +355,3 @@ class Base5ActionRLEnv(gym.Env): returns = np.array(self.close_trade_profit) reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) return reward - - def get_bnh_log_return(self): - return np.diff(np.log(self.prices['open'][self._start_tick:])) - - def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_sell.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - - # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_buy.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) - - return 0. From b90da46b1b0889bea477e65edf58d1375d2a352f Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 17 Aug 2022 12:51:14 +0200 Subject: [PATCH 034/421] improve price df handling to enable backtesting --- config_examples/config_freqai-rl.example.json | 7 +--- .../RL/BaseReinforcementLearningModel.py | 39 +++++++++++++++++-- .../ReinforcementLearningExample3ac.py | 15 ++++--- .../ReinforcementLearningExample5ac.py | 12 +++--- .../ReinforcementLearningPPO.py | 18 +++------ .../ReinforcementLearningPPO_multiproc.py | 16 ++++---- .../ReinforcementLearningTDQN.py | 15 +++---- .../ReinforcementLearningTDQN_multiproc.py | 14 +++---- 8 files changed, 77 insertions(+), 59 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 736f3e022..565eeda00 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -73,16 +73,12 @@ "5m", "30m" ], - "label_period_candles": 80, "include_shifted_candles": 0, - "DI_threshold": 0, "weight_factor": 0.9, "principal_component_analysis": false, "use_SVM_to_remove_outliers": false, - "svm_params": {"shuffle": true, "nu": 0.1}, - "stratify_training_data": 0, "indicator_max_period_candles": 10, - "indicator_periods_candles": [5] + "indicator_periods_candles": [5, 10] }, "data_split_parameters": { "test_size": 0.5, @@ -90,7 +86,6 @@ "shuffle": false }, "model_training_parameters": { - "n_steps": 2048, "ent_coef": 0.005, "learning_rate": 0.000025, "batch_size": 256, diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 8fa784f12..78feea6d1 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -10,8 +10,11 @@ from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.persistence import Trade - +import torch.multiprocessing +import torch as th logger = logging.getLogger(__name__) +th.set_num_threads(8) +torch.multiprocessing.set_sharing_strategy('file_system') class BaseReinforcementLearningModel(IFreqaiModel): @@ -46,6 +49,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): dk.fit_labels() # useless for now, but just satiating append methods # normalize all data based on train_dataset only + prices_train, prices_test = self.build_ohlc_price_dataframes(dk.data_dictionary, pair, dk) data_dictionary = dk.normalize_data(data_dictionary) # optional additional data cleaning/analysis @@ -56,7 +60,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): ) logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - model = self.fit_rl(data_dictionary, pair, dk) + model = self.fit_rl(data_dictionary, pair, dk, prices_train, prices_test) if pair not in self.dd.historic_predictions: self.set_initial_historic_predictions( @@ -69,7 +73,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): return model @abstractmethod - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, + prices_train: DataFrame, prices_test: DataFrame): """ Agent customizations and abstract Reinforcement Learning customizations go in here. Abstract method, so this function must be overridden by @@ -141,6 +146,34 @@ class BaseReinforcementLearningModel(IFreqaiModel): return output + def build_ohlc_price_dataframes(self, data_dictionary: dict, + pair: str, dk: FreqaiDataKitchen) -> Tuple[DataFrame, + DataFrame]: + """ + Builds the train prices and test prices for the environment. + """ + + coin = pair.split('/')[0] + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + + # price data for model training and evaluation + tf = self.config['timeframe'] + ohlc_list = [f'%-{coin}raw_open_{tf}', f'%-{coin}raw_low_{tf}', + f'%-{coin}raw_high_{tf}', f'%-{coin}raw_close_{tf}'] + rename_dict = {f'%-{coin}raw_open_{tf}': 'open', f'%-{coin}raw_low_{tf}': 'low', + f'%-{coin}raw_high_{tf}': ' high', f'%-{coin}raw_close_{tf}': 'close'} + + prices_train = train_df.filter(ohlc_list, axis=1) + prices_train.rename(columns=rename_dict, inplace=True) + prices_train.reset_index(drop=True) + + prices_test = test_df.filter(ohlc_list, axis=1) + prices_test.rename(columns=rename_dict, inplace=True) + prices_test.reset_index(drop=True) + + return prices_train, prices_test + def set_initial_historic_predictions( self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str ) -> None: diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py index be7a8973b..ec0977455 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py @@ -36,7 +36,7 @@ class ReinforcementLearningExample3ac(IStrategy): stoploss = -0.05 use_exit_signal = True startup_candle_count: int = 300 - can_short = False + can_short = True linear_roi_offset = DecimalParameter( 0.00, 0.02, default=0.005, space="sell", optimize=False, load=True @@ -76,8 +76,11 @@ class ReinforcementLearningExample3ac(IStrategy): informative[f"%-{coin}pct-change"] = informative["close"].pct_change() informative[f"%-{coin}raw_volume"] = informative["volume"] - # Raw price currently necessary for RL models: - informative[f"%-{coin}raw_price"] = informative["close"] + # The following features are necessary for RL models + informative[f"%-{coin}raw_close"] = informative["close"] + informative[f"%-{coin}raw_open"] = informative["open"] + informative[f"%-{coin}raw_high"] = informative["high"] + informative[f"%-{coin}raw_low"] = informative["low"] indicators = [col for col in informative if col.startswith("%")] # This loop duplicates and shifts all indicators to add a sense of recency to data @@ -101,9 +104,9 @@ class ReinforcementLearningExample3ac(IStrategy): df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 - # user adds targets here by prepending them with &- (see convention below) - # If user wishes to use multiple targets, a multioutput prediction model - # needs to be used such as templates/CatboostPredictionMultiModel.py + # For RL, this is not a target, it is simply a filler until actions come out + # of the model. + # for Base3ActionEnv, 2 is netural (hold) df["&-action"] = 2 return df diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index 0ecea92a9..70727f6db 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -76,8 +76,11 @@ class ReinforcementLearningExample5ac(IStrategy): informative[f"%-{coin}pct-change"] = informative["close"].pct_change() informative[f"%-{coin}raw_volume"] = informative["volume"] - # Raw price currently necessary for RL models: - informative[f"%-{coin}raw_price"] = informative["close"] + # The following features are necessary for RL models + informative[f"%-{coin}raw_close"] = informative["close"] + informative[f"%-{coin}raw_open"] = informative["open"] + informative[f"%-{coin}raw_high"] = informative["high"] + informative[f"%-{coin}raw_low"] = informative["low"] indicators = [col for col in informative if col.startswith("%")] # This loop duplicates and shifts all indicators to add a sense of recency to data @@ -101,9 +104,8 @@ class ReinforcementLearningExample5ac(IStrategy): df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 - # user adds targets here by prepending them with &- (see convention below) - # If user wishes to use multiple targets, a multioutput prediction model - # needs to be used such as templates/CatboostPredictionMultiModel.py + # For RL, there are no direct targets to set. This is filler (neutral) + # until the agent sends an action. df["&-action"] = 2 return df diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index d1cd2293e..b437ea8aa 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -3,9 +3,8 @@ from typing import Any, Dict # , Tuple import numpy as np # import numpy.typing as npt -# import pandas as pd import torch as th -# from pandas import DataFrame +from pandas import DataFrame from stable_baselines3 import PPO from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor @@ -22,7 +21,8 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, + prices_train: DataFrame, prices_test: DataFrame): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -31,18 +31,12 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): eval_freq = agent_params.get("eval_cycles", 4) * len(test_df) total_timesteps = agent_params["train_cycles"] * len(train_df) - # price data for model training and evaluation - price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( - len(test_df.index)) - # environments - train_env = MyRLEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, + train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - eval = MyRLEnv(df=test_df, prices=price_test, + eval = MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) eval_env = Monitor(eval, ".") - eval_env.reset() path = dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", @@ -63,7 +57,7 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): callback=eval_callback ) - best_model = PPO.load(dk.data_path / "best_model.zip") + best_model = PPO.load(dk.data_path / "best_model") print('Training finished!') diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index 26099a9e3..b1c5f316f 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -16,6 +16,7 @@ from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Posi from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.data_kitchen import FreqaiDataKitchen import gym +from pandas import DataFrame logger = logging.getLogger(__name__) @@ -47,7 +48,8 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, + prices_train: DataFrame, prices_test: DataFrame): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -57,18 +59,14 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): total_timesteps = agent_params["train_cycles"] * len(train_df) learning_rate = agent_params["learning_rate"] - # price data for model training and evaluation - price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( - len(test_df.index)) - env_id = "train_env" + th.set_num_threads(dk.thread_count) num_cpu = int(dk.thread_count / 2) - train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, + train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, reward_params, self.CONV_WIDTH) for i in range(num_cpu)]) eval_env_id = 'eval_env' - eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, + eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, reward_params, self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) path = dk.data_path @@ -92,7 +90,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): callback=eval_callback ) - best_model = PPO.load(dk.data_path / "best_model.zip") + best_model = PPO.load(dk.data_path / "best_model") print('Training finished!') eval_env.close() diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index 8bc5f9152..a60bc1fa1 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -10,6 +10,7 @@ from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3 import DQN from stable_baselines3.common.buffers import ReplayBuffer import numpy as np +from pandas import DataFrame from freqtrade.freqai.data_kitchen import FreqaiDataKitchen @@ -21,7 +22,8 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, + prices_train: DataFrame, prices_test: DataFrame): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -30,15 +32,10 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): eval_freq = agent_params["eval_cycles"] * len(test_df) total_timesteps = agent_params["train_cycles"] * len(train_df) - # price data for model training and evaluation - price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( - len(test_df.index)) - # environments - train_env = MyRLEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, + train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - eval = MyRLEnv(df=test_df, prices=price_test, + eval = MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) eval_env = Monitor(eval, ".") eval_env.reset() @@ -66,7 +63,7 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): callback=eval_callback ) - best_model = DQN.load(dk.data_path / "best_model.zip") + best_model = DQN.load(dk.data_path / "best_model") print('Training finished!') diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py index dd34c96c1..51e3c07c4 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py @@ -15,7 +15,7 @@ from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcement from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3.common.buffers import ReplayBuffer from freqtrade.freqai.data_kitchen import FreqaiDataKitchen - +from pandas import DataFrame logger = logging.getLogger(__name__) @@ -47,7 +47,8 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, + prices_train: DataFrame, prices_test: DataFrame): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -57,18 +58,13 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): total_timesteps = agent_params["train_cycles"] * len(train_df) learning_rate = agent_params["learning_rate"] - # price data for model training and evaluation - price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( - len(test_df.index)) - env_id = "train_env" num_cpu = int(dk.thread_count / 2) - train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, + train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, reward_params, self.CONV_WIDTH) for i in range(num_cpu)]) eval_env_id = 'eval_env' - eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, + eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, reward_params, self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) path = dk.data_path From 74e4fd0633ac288cb86eb6b847f0214a9f774dc6 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 17 Aug 2022 12:58:29 +0200 Subject: [PATCH 035/421] ensure config example can work with backtesting RL --- config_examples/config_freqai-rl.example.json | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 565eeda00..053c1a08e 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -8,7 +8,7 @@ "tradable_balance_ratio": 1, "fiat_display_currency": "USD", "dry_run": true, - "timeframe": "5m", + "timeframe": "3m", "dataformat_ohlcv": "json", "dry_run_wallet": 12000, "cancel_open_orders_on_exit": true, @@ -56,6 +56,7 @@ ], "freqai": { "enabled": true, + "startup_candles": 1000, "model_save_type": "stable_baselines_ppo", "conv_width": 10, "follow_mode": false, @@ -70,8 +71,8 @@ "ETH/USDT" ], "include_timeframes": [ - "5m", - "30m" + "3m", + "15m" ], "include_shifted_candles": 0, "weight_factor": 0.9, From d55092ff178600d6882bfe5b3149a48bc26856de Mon Sep 17 00:00:00 2001 From: richardjozsa Date: Wed, 17 Aug 2022 17:31:27 +0200 Subject: [PATCH 036/421] Docker building update, and TDQN repair with the newer release of SB+ --- Dockerfile | 2 +- requirements-freqai.txt | 2 +- requirements-hyperopt.txt | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 14a67edc8..d06b53202 100644 --- a/Dockerfile +++ b/Dockerfile @@ -33,7 +33,7 @@ RUN cd /tmp && /tmp/install_ta-lib.sh && rm -r /tmp/*ta-lib* ENV LD_LIBRARY_PATH /usr/local/lib # Install dependencies -COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt /freqtrade/ +COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt requirements-freqai.txt /freqtrade/ USER ftuser RUN pip install --user --no-cache-dir numpy \ && pip install --user --no-cache-dir -r requirements-hyperopt.txt diff --git a/requirements-freqai.txt b/requirements-freqai.txt index 869606365..6000f8e0f 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -7,6 +7,6 @@ joblib==1.1.0 catboost==1.0.6; platform_machine != 'aarch64' lightgbm==3.3.2 torch==1.12.1 -stable-baselines3==1.5.0 +stable-baselines3==1.6.0 gym==0.21.0 tensorboard==2.9.1 \ No newline at end of file diff --git a/requirements-hyperopt.txt b/requirements-hyperopt.txt index 020ccdda8..e19eb27c1 100644 --- a/requirements-hyperopt.txt +++ b/requirements-hyperopt.txt @@ -7,3 +7,4 @@ scikit-learn==1.1.2 scikit-optimize==0.9.0 filelock==3.8.0 progressbar2==4.0.0 +-r requirements-freqai.txt \ No newline at end of file From 45218faeb0c91f07c40e20071278bee2b865f084 Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Thu, 18 Aug 2022 17:01:04 +0700 Subject: [PATCH 037/421] fix coding convention --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 46 +++++++++++++++++-------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 574e71857..5f817f14e 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -26,11 +26,13 @@ class Positions(Enum): def opposite(self): return Positions.Short if self == Positions.Long else Positions.Long + def mean_over_std(x): std = np.std(x, ddof=1) mean = np.mean(x) return mean / std if std > 0 else 0 + class Base5ActionRLEnv(gym.Env): """ Base class for a 5 action environment @@ -250,42 +252,58 @@ class Base5ActionRLEnv(gym.Env): if len(self.close_trade_profit): # aim x2 rw if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee( + self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee( + self.prices.iloc[self._current_tick].open) return float((np.log(current_price) - np.log(last_trade_price)) * 2) # less than aim x1 rw elif self.close_trade_profit[-1] < self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee( + self.prices.iloc[self._last_trade_tick].open + ) + current_price = self.add_sell_fee( + self.prices.iloc[self._current_tick].open + ) return float(np.log(current_price) - np.log(last_trade_price)) # # less than RR SL x2 neg rw # elif self.close_trade_profit[-1] < (self.profit_aim * -1): - # last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - # current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + # last_trade_price = self.add_buy_fee( + # self.prices.iloc[self._last_trade_tick].open) + # current_price = self.add_sell_fee( + # self.prices.iloc[self._current_tick].open) # return float((np.log(current_price) - np.log(last_trade_price)) * 2) * -1 - # close short if action == Actions.Short_buy.value and self._position == Positions.Short: if len(self.close_trade_profit): # aim x2 rw if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee( + self.prices.iloc[self._last_trade_tick].open + ) + current_price = self.add_buy_fee( + self.prices.iloc[self._current_tick].open + ) return float((np.log(last_trade_price) - np.log(current_price)) * 2) # less than aim x1 rw elif self.close_trade_profit[-1] < self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee( + self.prices.iloc[self._last_trade_tick].open + ) + current_price = self.add_buy_fee( + self.prices.iloc[self._current_tick].open + ) return float(np.log(last_trade_price) - np.log(current_price)) # # less than RR SL x2 neg rw # elif self.close_trade_profit[-1] > self.profit_aim * self.rr: - # last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - # current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + # last_trade_price = self.add_sell_fee( + # self.prices.iloc[self._last_trade_tick].open) + # current_price = self.add_buy_fee( + # self.prices.iloc[self._current_tick].open) # return float((np.log(last_trade_price) - np.log(current_price)) * 2) * -1 return 0. - def _update_profit(self, action): # if self._is_trade(action) or self._done: if self._is_trade(action) or self._done: From 81b5aa66e847453989f20418829bc4c52b5b6c4c Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Thu, 18 Aug 2022 17:37:26 +0700 Subject: [PATCH 038/421] make env keep current position when low profit --- freqtrade/freqai/RL/Base3ActionRLEnv.py | 28 ++++++++++++++++--------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index bf7b2fc7b..9bb4cc39f 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -127,17 +127,25 @@ class Base3ActionRLEnv(gym.Env): Action: Short, position: Long -> Close Long and Open Short """ - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short.value: - self._position = Positions.Short - trade_type = "short" + u_pnl = self.get_unrealized_profit() + # keep current position if upnl from -0.4% to 0.4% + if u_pnl <= 0.004 and u_pnl >= -0.004 and self._position != Positions.Neutral: + if action == Actions.Long.value and self._position == Positions.Short: + self._position = Positions.Short + elif action == Actions.Short.value and self._position == Positions.Long: + self._position = Positions.Long else: - print("case not defined") + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" + else: + print("case not defined") # Update last trade tick self._last_trade_tick = self._current_tick From 7962a1439be4ead6d21089cfaa385380b88e2752 Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Thu, 18 Aug 2022 17:53:52 +0700 Subject: [PATCH 039/421] remove keep low profit --- freqtrade/freqai/RL/Base3ActionRLEnv.py | 28 +++++++++---------------- 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index 9bb4cc39f..bf7b2fc7b 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -127,25 +127,17 @@ class Base3ActionRLEnv(gym.Env): Action: Short, position: Long -> Close Long and Open Short """ - u_pnl = self.get_unrealized_profit() - # keep current position if upnl from -0.4% to 0.4% - if u_pnl <= 0.004 and u_pnl >= -0.004 and self._position != Positions.Neutral: - if action == Actions.Long.value and self._position == Positions.Short: - self._position = Positions.Short - elif action == Actions.Short.value and self._position == Positions.Long: - self._position = Positions.Long + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" else: - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short.value: - self._position = Positions.Short - trade_type = "short" - else: - print("case not defined") + print("case not defined") # Update last trade tick self._last_trade_tick = self._current_tick From 5d4e5e69fe44aa9dedb9dcfdf43adfe240d9832b Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 18 Aug 2022 13:02:47 +0200 Subject: [PATCH 040/421] reinforce training with state info, reinforce prediction with state info, restructure config to accommodate all parameters from any user imported model type. Set 5Act to default env on TDQN. Clean example config. --- config_examples/config_freqai-rl.example.json | 39 ++++----- freqtrade/freqai/RL/Base3ActionRLEnv.py | 4 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 17 +++- .../RL/BaseReinforcementLearningModel.py | 44 +++------- .../ReinforcementLearningPPO.py | 12 ++- .../ReinforcementLearningPPO_multiproc.py | 21 ++--- .../ReinforcementLearningTDQN.py | 83 ++++++------------ .../ReinforcementLearningTDQN_multiproc.py | 86 ++++++------------- 8 files changed, 114 insertions(+), 192 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 053c1a08e..1f12cbc6c 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -8,7 +8,7 @@ "tradable_balance_ratio": 1, "fiat_display_currency": "USD", "dry_run": true, - "timeframe": "3m", + "timeframe": "5m", "dataformat_ohlcv": "json", "dry_run_wallet": 12000, "cancel_open_orders_on_exit": true, @@ -35,7 +35,6 @@ }, "entry_pricing": { "price_side": "same", - "purge_old_models": true, "use_order_book": true, "order_book_top": 1, "price_last_balance": 0.0, @@ -56,10 +55,8 @@ ], "freqai": { "enabled": true, - "startup_candles": 1000, - "model_save_type": "stable_baselines_ppo", + "model_save_type": "stable_baselines_dqn", "conv_width": 10, - "follow_mode": false, "purge_old_models": true, "train_period_days": 10, "backtest_period_days": 2, @@ -71,13 +68,9 @@ "ETH/USDT" ], "include_timeframes": [ - "3m", - "15m" + "5m", + "30m" ], - "include_shifted_candles": 0, - "weight_factor": 0.9, - "principal_component_analysis": false, - "use_SVM_to_remove_outliers": false, "indicator_max_period_candles": 10, "indicator_periods_candles": [5, 10] }, @@ -86,16 +79,22 @@ "random_state": 1, "shuffle": false }, - "model_training_parameters": { - "ent_coef": 0.005, - "learning_rate": 0.000025, - "batch_size": 256, - "eval_cycles" : 5, - "train_cycles" : 15 + "model_training_parameters": { + "learning_rate": 0.00025, + "gamma": 0.9, + "target_update_interval": 5000, + "buffer_size": 50000, + "exploration_initial_eps":1, + "exploration_final_eps": 0.1, + "verbose": 1 }, - "model_reward_parameters": { - "rr": 1, - "profit_aim": 0.01 + "rl_config": { + "train_cycles": 15, + "eval_cycles": 5, + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.02 + } } }, "bot_name": "RL_test", diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index bf7b2fc7b..9d17b982d 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -6,6 +6,7 @@ import gym import numpy as np from gym import spaces from gym.utils import seeding +from pandas import DataFrame logger = logging.getLogger(__name__) @@ -35,7 +36,8 @@ class Base3ActionRLEnv(gym.Env): metadata = {'render.modes': ['human']} - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, + def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), + reward_kwargs: dict = {}, window_size=10, starting_point=True, id: str = 'baseenv-1', seed: int = 1): assert df.ndim == 2 diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 5f817f14e..d7ceb5ff3 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -6,6 +6,7 @@ import gym import numpy as np from gym import spaces from gym.utils import seeding +from pandas import DataFrame logger = logging.getLogger(__name__) @@ -39,7 +40,8 @@ class Base5ActionRLEnv(gym.Env): """ metadata = {'render.modes': ['human']} - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, + def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), + reward_kwargs: dict = {}, window_size=10, starting_point=True, id: str = 'baseenv-1', seed: int = 1): assert df.ndim == 2 @@ -56,7 +58,7 @@ class Base5ActionRLEnv(gym.Env): self.fee = 0.0015 # # spaces - self.shape = (window_size, self.signal_features.shape[1]) + self.shape = (window_size, self.signal_features.shape[1] + 2) self.action_space = spaces.Discrete(len(Actions)) self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) @@ -161,19 +163,26 @@ class Base5ActionRLEnv(gym.Env): self._done = True self._position_history.append(self._position) - observation = self._get_observation() + info = dict( tick=self._current_tick, total_reward=self.total_reward, total_profit=self._total_profit, position=self._position.value ) + + observation = self._get_observation() + self._update_history(info) return observation, step_reward, self._done, info def _get_observation(self): - return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + features_and_state = self.signal_features[( + self._current_tick - self.window_size):self._current_tick] + features_and_state['current_profit_pct'] = self.get_unrealized_profit() + features_and_state['position'] = self._position.value + return features_and_state def get_unrealized_profit(self): diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 78feea6d1..395b2a1a6 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -13,7 +13,7 @@ from freqtrade.persistence import Trade import torch.multiprocessing import torch as th logger = logging.getLogger(__name__) -th.set_num_threads(8) + torch.multiprocessing.set_sharing_strategy('file_system') @@ -22,6 +22,11 @@ class BaseReinforcementLearningModel(IFreqaiModel): User created Reinforcement Learning Model prediction model. """ + def __init__(self, **kwargs): + super().__init__(config=kwargs['config']) + th.set_num_threads(self.freqai_info.get('data_kitchen_thread_count', 4)) + self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] + def train( self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen ) -> Any: @@ -62,12 +67,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): model = self.fit_rl(data_dictionary, pair, dk, prices_train, prices_test) - if pair not in self.dd.historic_predictions: - self.set_initial_historic_predictions( - data_dictionary['train_features'], model, dk, pair) - - self.dd.save_historic_predictions_to_disk() - logger.info(f"--------------------done training {pair}--------------------") return model @@ -127,7 +126,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): # optional additional data cleaning/analysis self.data_cleaning_predict(dk, filtered_dataframe) - pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) + pred_df = self.rl_model_predict( + dk.data_dictionary["prediction_features"], dk, self.model) pred_df.fillna(0, inplace=True) return (pred_df, dk.do_predict) @@ -135,10 +135,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): def rl_model_predict(self, dataframe: DataFrame, dk: FreqaiDataKitchen, model: Any) -> DataFrame: - output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) + output = pd.DataFrame(np.zeros(len(dataframe)), columns=dk.label_list) def _predict(window): + market_side, current_profit, total_profit = self.get_state_info(dk.pair) observations = dataframe.iloc[window.index] + observations['current_profit'] = current_profit + observations['position'] = market_side res, _ = model.predict(observations, deterministic=True) return res @@ -174,29 +177,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): return prices_train, prices_test - def set_initial_historic_predictions( - self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str - ) -> None: - - pred_df = self.rl_model_predict(df, dk, model) - pred_df.fillna(0, inplace=True) - self.dd.historic_predictions[pair] = pred_df - hist_preds_df = self.dd.historic_predictions[pair] - - for label in hist_preds_df.columns: - if hist_preds_df[label].dtype == object: - continue - hist_preds_df[f'{label}_mean'] = 0 - hist_preds_df[f'{label}_std'] = 0 - - hist_preds_df['do_predict'] = 0 - - if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: - hist_preds_df['DI_values'] = 0 - - for return_str in dk.data['extra_returns_per_train']: - hist_preds_df[return_str] = 0 - # TODO take care of this appendage. Right now it needs to be called because FreqAI enforces it. # But FreqaiRL needs more objects passed to fit() (like DK) and we dont want to go refactor # all the other existing fit() functions to include dk argument. For now we instantiate and diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index b437ea8aa..5dc7735d3 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -24,18 +24,16 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, prices_train: DataFrame, prices_test: DataFrame): - agent_params = self.freqai_info['model_training_parameters'] - reward_params = self.freqai_info['model_reward_parameters'] train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = agent_params.get("eval_cycles", 4) * len(test_df) - total_timesteps = agent_params["train_cycles"] * len(train_df) + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) # environments train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=reward_params) + reward_kwargs=self.reward_params) eval = MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params) eval_env = Monitor(eval, ".") path = dk.data_path @@ -49,7 +47,7 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, - gamma=0.9, verbose=1 + **self.freqai_info['model_training_parameters'] ) model.learn( diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index b1c5f316f..337e94607 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -51,23 +51,20 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, prices_train: DataFrame, prices_test: DataFrame): - agent_params = self.freqai_info['model_training_parameters'] - reward_params = self.freqai_info['model_reward_parameters'] train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = agent_params.get("eval_cycles", 4) * len(test_df) - total_timesteps = agent_params["train_cycles"] * len(train_df) - learning_rate = agent_params["learning_rate"] + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) env_id = "train_env" - th.set_num_threads(dk.thread_count) num_cpu = int(dk.thread_count / 2) - train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, reward_params, - self.CONV_WIDTH) for i in range(num_cpu)]) + train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.reward_params, self.CONV_WIDTH) for i in range(num_cpu)]) eval_env_id = 'eval_env' - eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, reward_params, - self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) + eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.reward_params, self.CONV_WIDTH, monitor=True) for i in + range(num_cpu)]) path = dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", @@ -80,9 +77,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/ppo/tensorboard/", - learning_rate=learning_rate, - gamma=0.9, - verbose=1 + **self.freqai_info['model_training_parameters'] ) model.learn( diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index a60bc1fa1..3a57142cf 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -3,8 +3,7 @@ from typing import Any, Dict # Optional import torch as th from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor -# from stable_baselines3.common.vec_env import SubprocVecEnv -from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions +from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3 import DQN @@ -25,18 +24,16 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, prices_train: DataFrame, prices_test: DataFrame): - agent_params = self.freqai_info['model_training_parameters'] - reward_params = self.freqai_info['model_reward_parameters'] train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = agent_params["eval_cycles"] * len(test_df) - total_timesteps = agent_params["train_cycles"] * len(train_df) + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) # environments train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=reward_params) + reward_kwargs=self.reward_params) eval = MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params) eval_env = Monitor(eval, ".") eval_env.reset() @@ -50,12 +47,10 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): net_arch=[256, 256, 128]) model = TDQN('TMultiInputPolicy', train_env, - policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/tdqn/tensorboard/", - learning_rate=0.00025, gamma=0.9, - target_update_interval=5000, buffer_size=50000, - exploration_initial_eps=1, exploration_final_eps=0.1, - replay_buffer_class=ReplayBuffer + policy_kwargs=policy_kwargs, + replay_buffer_class=ReplayBuffer, + **self.freqai_info['model_training_parameters'] ) model.learn( @@ -70,9 +65,11 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): return best_model -class MyRLEnv(Base3ActionRLEnv): +# User can inherit and customize 5 action environment +class MyRLEnv(Base5ActionRLEnv): """ - User can override any function in BaseRLEnv and gym.Env + User can override any function in BaseRLEnv and gym.Env. Here the user + Adds 5 actions. """ def calculate_reward(self, action): @@ -81,55 +78,27 @@ class MyRLEnv(Base3ActionRLEnv): return 0. # close long - if (action == Actions.Short.value or - action == Actions.Neutral.value) and self._position == Positions.Long: + if action == Actions.Long_sell.value and self._position == Positions.Long: last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) return float(np.log(current_price) - np.log(last_trade_price)) + if action == Actions.Long_sell.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + # close short - if (action == Actions.Long.value or - action == Actions.Neutral.value) and self._position == Positions.Short: + if action == Actions.Short_buy.value and self._position == Positions.Short: last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) return float(np.log(last_trade_price) - np.log(current_price)) + if action == Actions.Short_buy.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + return 0. - -# User can inherit and customize 5 action environment -# class MyRLEnv(Base5ActionRLEnv): -# """ -# User can override any function in BaseRLEnv and gym.Env. Here the user -# Adds 5 actions. -# """ - -# def calculate_reward(self, action): - -# if self._last_trade_tick is None: -# return 0. - -# # close long -# if action == Actions.Long_sell.value and self._position == Positions.Long: -# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) -# return float(np.log(current_price) - np.log(last_trade_price)) - -# if action == Actions.Long_sell.value and self._position == Positions.Long: -# if self.close_trade_profit[-1] > self.profit_aim * self.rr: -# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) -# return float((np.log(current_price) - np.log(last_trade_price)) * 2) - -# # close short -# if action == Actions.Short_buy.value and self._position == Positions.Short: -# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) -# return float(np.log(last_trade_price) - np.log(current_price)) - -# if action == Actions.Short_buy.value and self._position == Positions.Short: -# if self.close_trade_profit[-1] > self.profit_aim * self.rr: -# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) -# return float((np.log(last_trade_price) - np.log(current_price)) * 2) - -# return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py index 51e3c07c4..bf9e03b7f 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py @@ -10,7 +10,7 @@ from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.vec_env import SubprocVecEnv from stable_baselines3.common.utils import set_random_seed from stable_baselines3 import DQN -from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions +from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3.common.buffers import ReplayBuffer @@ -50,22 +50,20 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, prices_train: DataFrame, prices_test: DataFrame): - agent_params = self.freqai_info['model_training_parameters'] - reward_params = self.freqai_info['model_reward_parameters'] train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = agent_params["eval_cycles"] * len(test_df) - total_timesteps = agent_params["train_cycles"] * len(train_df) - learning_rate = agent_params["learning_rate"] + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) env_id = "train_env" num_cpu = int(dk.thread_count / 2) - train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, reward_params, - self.CONV_WIDTH) for i in range(num_cpu)]) + train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.reward_params, self.CONV_WIDTH) for i in range(num_cpu)]) eval_env_id = 'eval_env' - eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, reward_params, - self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) + eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.reward_params, self.CONV_WIDTH, monitor=True) for i in + range(num_cpu)]) path = dk.data_path stop_train_callback = StopTrainingOnNoModelImprovement( @@ -91,10 +89,8 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/tdqn/tensorboard/", - learning_rate=learning_rate, gamma=0.9, - target_update_interval=5000, buffer_size=50000, - exploration_initial_eps=1, exploration_final_eps=0.1, - replay_buffer_class=ReplayBuffer + replay_buffer_class=ReplayBuffer, + **self.freqai_info['model_training_parameters'] ) model.learn( @@ -109,9 +105,11 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): return best_model -class MyRLEnv(Base3ActionRLEnv): +# User can inherit and customize 5 action environment +class MyRLEnv(Base5ActionRLEnv): """ - User can override any function in BaseRLEnv and gym.Env + User can override any function in BaseRLEnv and gym.Env. Here the user + Adds 5 actions. """ def calculate_reward(self, action): @@ -120,55 +118,27 @@ class MyRLEnv(Base3ActionRLEnv): return 0. # close long - if (action == Actions.Short.value or - action == Actions.Neutral.value) and self._position == Positions.Long: + if action == Actions.Long_sell.value and self._position == Positions.Long: last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) return float(np.log(current_price) - np.log(last_trade_price)) + if action == Actions.Long_sell.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + # close short - if (action == Actions.Long.value or - action == Actions.Neutral.value) and self._position == Positions.Short: + if action == Actions.Short_buy.value and self._position == Positions.Short: last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) return float(np.log(last_trade_price) - np.log(current_price)) + if action == Actions.Short_buy.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + return 0. - -# User can inherit and customize 5 action environment -# class MyRLEnv(Base5ActionRLEnv): -# """ -# User can override any function in BaseRLEnv and gym.Env. Here the user -# Adds 5 actions. -# """ - -# def calculate_reward(self, action): - -# if self._last_trade_tick is None: -# return 0. - -# # close long -# if action == Actions.Long_sell.value and self._position == Positions.Long: -# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) -# return float(np.log(current_price) - np.log(last_trade_price)) - -# if action == Actions.Long_sell.value and self._position == Positions.Long: -# if self.close_trade_profit[-1] > self.profit_aim * self.rr: -# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) -# return float((np.log(current_price) - np.log(last_trade_price)) * 2) - -# # close short -# if action == Actions.Short_buy.value and self._position == Positions.Short: -# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) -# return float(np.log(last_trade_price) - np.log(current_price)) - -# if action == Actions.Short_buy.value and self._position == Positions.Short: -# if self.close_trade_profit[-1] > self.profit_aim * self.rr: -# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) -# return float((np.log(last_trade_price) - np.log(current_price)) * 2) - -# return 0. From f95602f6bd3a9e9ef6d2e83921828e33be2d9b91 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 18 Aug 2022 16:07:19 +0200 Subject: [PATCH 041/421] persist a single training environment. --- config_examples/config_freqai-rl.example.json | 2 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 12 +- .../RL/BaseReinforcementLearningModel.py | 117 +++++++++--------- .../ReinforcementLearningPPO_multiproc.py | 49 +++++--- .../ReinforcementLearningTDQN.py | 41 +++--- .../ReinforcementLearningTDQN_multiproc.py | 70 ++++++----- 6 files changed, 162 insertions(+), 129 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 1f12cbc6c..ccc977705 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -61,7 +61,7 @@ "train_period_days": 10, "backtest_period_days": 2, "identifier": "unique-id", - "data_kitchen_thread_count": 4, + "data_kitchen_thread_count": 2, "feature_parameters": { "include_corr_pairlist": [ "BTC/USDT", diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index d7ceb5ff3..bf3f0df33 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -7,7 +7,7 @@ import numpy as np from gym import spaces from gym.utils import seeding from pandas import DataFrame - +import pandas as pd logger = logging.getLogger(__name__) @@ -47,6 +47,9 @@ class Base5ActionRLEnv(gym.Env): self.id = id self.seed(seed) + self.reset_env(df, prices, window_size, reward_kwargs, starting_point) + + def reset_env(self, df, prices, window_size, reward_kwargs, starting_point=True): self.df = df self.signal_features = self.df self.prices = prices @@ -178,10 +181,15 @@ class Base5ActionRLEnv(gym.Env): return observation, step_reward, self._done, info def _get_observation(self): - features_and_state = self.signal_features[( + features_window = self.signal_features[( self._current_tick - self.window_size):self._current_tick] + features_and_state = DataFrame(np.zeros((len(features_window), 2)), + columns=['current_profit_pct', 'position'], + index=features_window.index) + features_and_state['current_profit_pct'] = self.get_unrealized_profit() features_and_state['position'] = self._position.value + features_and_state = pd.concat([features_window, features_and_state], axis=1) return features_and_state def get_unrealized_profit(self): diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 395b2a1a6..9c7b1e4b4 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -8,9 +8,10 @@ from pandas import DataFrame from abc import abstractmethod from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel -from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions +from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions from freqtrade.persistence import Trade import torch.multiprocessing +from stable_baselines3.common.monitor import Monitor import torch as th logger = logging.getLogger(__name__) @@ -26,6 +27,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): super().__init__(config=kwargs['config']) th.set_num_threads(self.freqai_info.get('data_kitchen_thread_count', 4)) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] + self.train_env: Base5ActionRLEnv = None def train( self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen @@ -65,15 +67,37 @@ class BaseReinforcementLearningModel(IFreqaiModel): ) logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - model = self.fit_rl(data_dictionary, pair, dk, prices_train, prices_test) + self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test) + + model = self.fit_rl(data_dictionary, dk) logger.info(f"--------------------done training {pair}--------------------") return model + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): + """ + User overrides this in their prediction model if they are custom a MyRLEnv. Othwerwise + leaving this will default to Base5ActEnv + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + + # environments + if not self.train_env: + self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params) + self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params), ".") + else: + self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.eval_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.train_env.reset() + self.eval_env.reset() + @abstractmethod - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, - prices_train: DataFrame, prices_test: DataFrame): + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): """ Agent customizations and abstract Reinforcement Learning customizations go in here. Abstract method, so this function must be overridden by @@ -193,66 +217,39 @@ class BaseReinforcementLearningModel(IFreqaiModel): return -class MyRLEnv(Base3ActionRLEnv): +class MyRLEnv(Base5ActionRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env. Here the user + Adds 5 actions. + """ - def step(self, action): - self._done = False - self._current_tick += 1 + def calculate_reward(self, action): - if self._current_tick == self._end_tick: - self._done = True + if self._last_trade_tick is None: + return 0. - self.update_portfolio_log_returns(action) + # close long + if action == Actions.Long_sell.value and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) - self._update_profit(action) - step_reward = self._calculate_reward(action) - self.total_reward += step_reward + if action == Actions.Long_sell.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) - trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade - # Update position - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short + # close short + if action == Actions.Short_buy.value and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long + if action == Actions.Short_buy.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short.value: - self._position = Positions.Short - trade_type = "short" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type is not None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, - 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick=self._current_tick, - total_reward=self.total_reward, - total_profit=self._total_profit, - position=self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info + return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index 337e94607..5fa24a599 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -3,9 +3,7 @@ from typing import Any, Dict # , Tuple import numpy as np # import numpy.typing as npt -# import pandas as pd import torch as th -# from pandas import DataFrame from stable_baselines3.common.monitor import Monitor from typing import Callable from stable_baselines3 import PPO @@ -16,7 +14,6 @@ from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Posi from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.data_kitchen import FreqaiDataKitchen import gym -from pandas import DataFrame logger = logging.getLogger(__name__) @@ -48,26 +45,15 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, - prices_train: DataFrame, prices_test: DataFrame): + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - env_id = "train_env" - num_cpu = int(dk.thread_count / 2) - train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH) for i in range(num_cpu)]) - - eval_env_id = 'eval_env' - eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True) for i in - range(num_cpu)]) - path = dk.data_path - eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) @@ -75,7 +61,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[512, 512, 512]) - model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, + model = PPO('MlpPolicy', self.train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/ppo/tensorboard/", **self.freqai_info['model_training_parameters'] ) @@ -87,10 +73,37 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): best_model = PPO.load(dk.data_path / "best_model") print('Training finished!') - eval_env.close() return best_model + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): + """ + User overrides this in their prediction model if they are custom a MyRLEnv. Othwerwise + leaving this will default to Base5ActEnv + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + + # environments + if not self.train_env: + env_id = "train_env" + num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) + self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.reward_params, self.CONV_WIDTH) for i + in range(num_cpu)]) + + eval_env_id = 'eval_env' + self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.reward_params, self.CONV_WIDTH, monitor=True) for i + in range(num_cpu)]) + else: + self.train_env.env_method('reset_env', train_df, prices_train, + self.CONV_WIDTH, self.reward_params) + self.eval_env.env_method('reset_env', train_df, prices_train, + self.CONV_WIDTH, self.reward_params) + self.train_env.env_method('reset') + self.eval_env.env_method('reset') + class MyRLEnv(Base3ActionRLEnv): """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index 3a57142cf..3c4ac6bdb 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -9,8 +9,7 @@ from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3 import DQN from stable_baselines3.common.buffers import ReplayBuffer import numpy as np -from pandas import DataFrame - +import gc from freqtrade.freqai.data_kitchen import FreqaiDataKitchen logger = logging.getLogger(__name__) @@ -21,24 +20,15 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, - prices_train: DataFrame, prices_test: DataFrame): + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - # environments - train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params) - eval = MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params) - eval_env = Monitor(eval, ".") - eval_env.reset() - path = dk.data_path - eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) @@ -46,7 +36,7 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[256, 256, 128]) - model = TDQN('TMultiInputPolicy', train_env, + model = TDQN('TMultiInputPolicy', self.train_env, tensorboard_log=f"{path}/tdqn/tensorboard/", policy_kwargs=policy_kwargs, replay_buffer_class=ReplayBuffer, @@ -58,12 +48,33 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): callback=eval_callback ) + del model best_model = DQN.load(dk.data_path / "best_model") print('Training finished!') - + gc.collect() return best_model + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): + """ + User overrides this as shown here if they are using a custom MyRLEnv + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + + # environments + if not self.train_env: + self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params) + self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params), ".") + else: + self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.eval_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.train_env.reset() + self.eval_env.reset() + # User can inherit and customize 5 action environment class MyRLEnv(Base5ActionRLEnv): diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py index bf9e03b7f..8634fd958 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py @@ -4,8 +4,8 @@ import torch as th import numpy as np import gym from typing import Callable -from stable_baselines3.common.callbacks import ( - EvalCallback, StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold) +from stable_baselines3.common.callbacks import EvalCallback +# EvalCallback , StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.vec_env import SubprocVecEnv from stable_baselines3.common.utils import set_random_seed @@ -15,7 +15,6 @@ from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcement from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3.common.buffers import ReplayBuffer from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from pandas import DataFrame logger = logging.getLogger(__name__) @@ -47,46 +46,23 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, - prices_train: DataFrame, prices_test: DataFrame): + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - env_id = "train_env" - num_cpu = int(dk.thread_count / 2) - train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH) for i in range(num_cpu)]) - - eval_env_id = 'eval_env' - eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True) for i in - range(num_cpu)]) - path = dk.data_path - stop_train_callback = StopTrainingOnNoModelImprovement( - max_no_improvement_evals=5, - min_evals=10, - verbose=2 - ) - callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=-200, verbose=2) - eval_callback = EvalCallback( - eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/tdqn/logs/", - eval_freq=int(eval_freq), - deterministic=True, - render=True, - callback_after_eval=stop_train_callback, - callback_on_new_best=callback_on_best, - verbose=2 - ) + + eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), + deterministic=True, render=False) # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[512, 512, 512]) - model = TDQN('TMultiInputPolicy', train_env, + model = TDQN('TMultiInputPolicy', self.train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/tdqn/tensorboard/", replay_buffer_class=ReplayBuffer, @@ -100,12 +76,40 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): best_model = DQN.load(dk.data_path / "best_model.zip") print('Training finished!') - eval_env.close() return best_model + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): + """ + User overrides this in their prediction model if they are custom a MyRLEnv. Othwerwise + leaving this will default to Base5ActEnv + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + + # environments + if not self.train_env: + env_id = "train_env" + num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) + self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.reward_params, self.CONV_WIDTH) for i + in range(num_cpu)]) + + eval_env_id = 'eval_env' + self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.reward_params, self.CONV_WIDTH, monitor=True) for i + in range(num_cpu)]) + else: + self.train_env.env_method('reset_env', train_df, prices_train, + self.CONV_WIDTH, self.reward_params) + self.eval_env.env_method('reset_env', train_df, prices_train, + self.CONV_WIDTH, self.reward_params) + self.train_env.env_method('reset') + self.eval_env.env_method('reset') # User can inherit and customize 5 action environment + + class MyRLEnv(Base5ActionRLEnv): """ User can override any function in BaseRLEnv and gym.Env. Here the user From 4baa36bdcf449e224eaa4c69001bc2c503253988 Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Fri, 19 Aug 2022 01:49:11 +0700 Subject: [PATCH 042/421] fix persist a single training environment for PPO --- config_examples/config_freqai-rl.example.json | 8 +--- freqtrade/freqai/RL/Base3ActionRLEnv.py | 23 ++++++++-- .../ReinforcementLearningPPO.py | 45 ++++++++++++------- 3 files changed, 51 insertions(+), 25 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index ccc977705..1af872552 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -79,13 +79,9 @@ "random_state": 1, "shuffle": false }, - "model_training_parameters": { + "model_training_parameters": { "learning_rate": 0.00025, "gamma": 0.9, - "target_update_interval": 5000, - "buffer_size": 50000, - "exploration_initial_eps":1, - "exploration_final_eps": 0.1, "verbose": 1 }, "rl_config": { @@ -103,4 +99,4 @@ "internals": { "process_throttle_secs": 5 } -} \ No newline at end of file +} diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index 9d17b982d..df53c729b 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -1,13 +1,16 @@ import logging from enum import Enum -# from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union import gym import numpy as np +import pandas as pd from gym import spaces from gym.utils import seeding from pandas import DataFrame + +# from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union + logger = logging.getLogger(__name__) @@ -43,6 +46,9 @@ class Base3ActionRLEnv(gym.Env): self.id = id self.seed(seed) + self.reset_env(df, prices, window_size, reward_kwargs, starting_point) + + def reset_env(self, df, prices, window_size, reward_kwargs, starting_point=True): self.df = df self.signal_features = self.df self.prices = prices @@ -54,7 +60,7 @@ class Base3ActionRLEnv(gym.Env): self.fee = 0.0015 # # spaces - self.shape = (window_size, self.signal_features.shape[1]) + self.shape = (window_size, self.signal_features.shape[1] + 2) self.action_space = spaces.Discrete(len(Actions)) self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) @@ -165,7 +171,16 @@ class Base3ActionRLEnv(gym.Env): return observation, step_reward, self._done, info def _get_observation(self): - return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + features_window = self.signal_features[( + self._current_tick - self.window_size):self._current_tick] + features_and_state = DataFrame(np.zeros((len(features_window), 2)), + columns=['current_profit_pct', 'position'], + index=features_window.index) + + features_and_state['current_profit_pct'] = self.get_unrealized_profit() + features_and_state['position'] = self._position.value + features_and_state = pd.concat([features_window, features_and_state], axis=1) + return features_and_state def get_unrealized_profit(self): @@ -307,7 +322,7 @@ class Base3ActionRLEnv(gym.Env): def prev_price(self) -> float: return self.prices.iloc[self._current_tick - 1].open - def sharpe_ratio(self): + def sharpe_ratio(self) -> float: if len(self.close_trade_profit) == 0: return 0. returns = np.array(self.close_trade_profit) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index 5dc7735d3..993ac263b 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -1,16 +1,17 @@ +import gc import logging from typing import Any, Dict # , Tuple import numpy as np # import numpy.typing as npt import torch as th -from pandas import DataFrame from stable_baselines3 import PPO from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor -from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel + from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.RL.Base3ActionRLEnv import Actions, Base3ActionRLEnv, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel logger = logging.getLogger(__name__) @@ -21,23 +22,15 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, - prices_train: DataFrame, prices_test: DataFrame): + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - # environments - train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params) - eval = MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params) - eval_env = Monitor(eval, ".") - path = dk.data_path - eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) @@ -45,8 +38,8 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[256, 256, 128]) - model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, + model = PPO('MlpPolicy', self.train_env, policy_kwargs=policy_kwargs, + tensorboard_log=f"{path}/ppo/tensorboard/", **self.freqai_info['model_training_parameters'] ) @@ -55,12 +48,34 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): callback=eval_callback ) + del model best_model = PPO.load(dk.data_path / "best_model") print('Training finished!') + gc.collect() return best_model + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): + """ + User overrides this as shown here if they are using a custom MyRLEnv + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + + # environments + if not self.train_env: + self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params) + self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params), ".") + else: + self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.eval_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.train_env.reset() + self.eval_env.reset() + class MyRLEnv(Base3ActionRLEnv): """ From 4b9499e321ba107f71db0953ed5718a31b4f8bc1 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 19 Aug 2022 11:04:15 +0200 Subject: [PATCH 043/421] improve nomenclature and fix short exit bug --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 52 ++++++++++++------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index bf3f0df33..4c946a5b2 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -13,10 +13,10 @@ logger = logging.getLogger(__name__) class Actions(Enum): Neutral = 0 - Long_buy = 1 - Long_sell = 2 - Short_buy = 3 - Short_sell = 4 + Long_enter = 1 + Long_exit = 2 + Short_enter = 3 + Short_exit = 4 class Positions(Enum): @@ -139,16 +139,16 @@ class Base5ActionRLEnv(gym.Env): if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions.Long_buy.value: + elif action == Actions.Long_enter.value: self._position = Positions.Long trade_type = "long" - elif action == Actions.Short_buy.value: + elif action == Actions.Short_enter.value: self._position = Positions.Short trade_type = "short" - elif action == Actions.Long_sell.value: + elif action == Actions.Long_exit.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions.Short_sell.value: + elif action == Actions.Short_exit.value: self._position = Positions.Neutral trade_type = "neutral" else: @@ -221,24 +221,24 @@ class Base5ActionRLEnv(gym.Env): return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or (action == Actions.Neutral.value and self._position == Positions.Short) or (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Short_buy.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Short_sell.value and self._position == Positions.Short) or - (action == Actions.Short_sell.value and self._position == Positions.Long) or - (action == Actions.Short_sell.value and self._position == Positions.Neutral) or - (action == Actions.Long_buy.value and self._position == Positions.Long) or - (action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Long_sell.value and self._position == Positions.Long) or - (action == Actions.Long_sell.value and self._position == Positions.Short) or - (action == Actions.Long_sell.value and self._position == Positions.Neutral)) + (action == Actions.Short_enter.value and self._position == Positions.Short) or + (action == Actions.Short_enter.value and self._position == Positions.Long) or + (action == Actions.Short_exit.value and self._position == Positions.Short) or + (action == Actions.Short_exit.value and self._position == Positions.Long) or + (action == Actions.Short_exit.value and self._position == Positions.Neutral) or + (action == Actions.Long_enter.value and self._position == Positions.Long) or + (action == Actions.Long_enter.value and self._position == Positions.Short) or + (action == Actions.Long_exit.value and self._position == Positions.Long) or + (action == Actions.Long_exit.value and self._position == Positions.Short) or + (action == Actions.Long_exit.value and self._position == Positions.Neutral)) def _is_trade(self, action: Actions): - return ((action == Actions.Long_buy.value and self._position == Positions.Neutral) or - (action == Actions.Short_buy.value and self._position == Positions.Neutral)) + return ((action == Actions.Long_enter.value and self._position == Positions.Neutral) or + (action == Actions.Short_enter.value and self._position == Positions.Neutral)) def is_hold(self, action): - return ((action == Actions.Short_buy.value and self._position == Positions.Short) or - (action == Actions.Long_buy.value and self._position == Positions.Long) or + return ((action == Actions.Short_enter.value and self._position == Positions.Short) or + (action == Actions.Long_enter.value and self._position == Positions.Long) or (action == Actions.Neutral.value and self._position == Positions.Long) or (action == Actions.Neutral.value and self._position == Positions.Short) or (action == Actions.Neutral.value and self._position == Positions.Neutral)) @@ -265,7 +265,7 @@ class Base5ActionRLEnv(gym.Env): return 0. # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: + if action == Actions.Long_exit.value and self._position == Positions.Long: if len(self.close_trade_profit): # aim x2 rw if self.close_trade_profit[-1] > self.profit_aim * self.rr: @@ -292,7 +292,7 @@ class Base5ActionRLEnv(gym.Env): # return float((np.log(current_price) - np.log(last_trade_price)) * 2) * -1 # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: + if action == Actions.Short_exit.value and self._position == Positions.Short: if len(self.close_trade_profit): # aim x2 rw if self.close_trade_profit[-1] > self.profit_aim * self.rr: @@ -346,7 +346,7 @@ class Base5ActionRLEnv(gym.Env): # Long positions if self._position == Positions.Long: current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Short_buy.value or action == Actions.Neutral.value: + if action == Actions.Short_enter.value or action == Actions.Neutral.value: current_price = self.add_sell_fee(current_price) previous_price = self.prices.iloc[self._current_tick - 1].open @@ -360,7 +360,7 @@ class Base5ActionRLEnv(gym.Env): # Short positions if self._position == Positions.Short: current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Long_buy.value or action == Actions.Neutral.value: + if action == Actions.Long_enter.value or action == Actions.Neutral.value: current_price = self.add_buy_fee(current_price) previous_price = self.prices.iloc[self._current_tick - 1].open From 3eb897c2f8c89e07f81fbd8675b97a3f7bddab91 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 20 Aug 2022 16:35:29 +0200 Subject: [PATCH 044/421] reuse callback, allow user to acces all stable_baselines3 agents via config --- config_examples/config_freqai-rl.example.json | 9 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 69 +++----- .../RL/BaseReinforcementLearningModel.py | 66 +++++--- freqtrade/freqai/data_drawer.py | 11 +- .../prediction_models/ReinforcementLearner.py | 82 ++++++++++ .../ReinforcementLearnerCustomAgent.py} | 62 ++++++-- .../ReinforcementLearner_multiproc.py | 84 ++++++++++ .../ReinforcementLearningPPO.py | 104 ------------ .../ReinforcementLearningPPO_multiproc.py | 132 ---------------- .../ReinforcementLearningTDQN.py | 115 -------------- .../ReinforcementLearningTDQN_multiproc.py | 148 ------------------ 11 files changed, 295 insertions(+), 587 deletions(-) create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearner.py rename freqtrade/freqai/{RL/TDQNagent.py => prediction_models/ReinforcementLearnerCustomAgent.py} (81%) create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 1af872552..fa08cdd60 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -55,7 +55,7 @@ ], "freqai": { "enabled": true, - "model_save_type": "stable_baselines_dqn", + "model_save_type": "stable_baselines", "conv_width": 10, "purge_old_models": true, "train_period_days": 10, @@ -85,8 +85,11 @@ "verbose": 1 }, "rl_config": { - "train_cycles": 15, - "eval_cycles": 5, + "train_cycles": 10, + "eval_cycles": 3, + "thread_count": 4, + "model_type": "PPO", + "policy_type": "MlpPolicy", "model_reward_parameters": { "rr": 1, "profit_aim": 0.02 diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 4c946a5b2..7d3cbffbe 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -266,59 +266,28 @@ class Base5ActionRLEnv(gym.Env): # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - if len(self.close_trade_profit): - # aim x2 rw - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee( - self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee( - self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - # less than aim x1 rw - elif self.close_trade_profit[-1] < self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee( - self.prices.iloc[self._last_trade_tick].open - ) - current_price = self.add_sell_fee( - self.prices.iloc[self._current_tick].open - ) - return float(np.log(current_price) - np.log(last_trade_price)) - # # less than RR SL x2 neg rw - # elif self.close_trade_profit[-1] < (self.profit_aim * -1): - # last_trade_price = self.add_buy_fee( - # self.prices.iloc[self._last_trade_tick].open) - # current_price = self.add_sell_fee( - # self.prices.iloc[self._current_tick].open) - # return float((np.log(current_price) - np.log(last_trade_price)) * 2) * -1 + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + if action == Actions.Long_exit.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - if len(self.close_trade_profit): - # aim x2 rw - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee( - self.prices.iloc[self._last_trade_tick].open - ) - current_price = self.add_buy_fee( - self.prices.iloc[self._current_tick].open - ) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) - # less than aim x1 rw - elif self.close_trade_profit[-1] < self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee( - self.prices.iloc[self._last_trade_tick].open - ) - current_price = self.add_buy_fee( - self.prices.iloc[self._current_tick].open - ) - return float(np.log(last_trade_price) - np.log(current_price)) - # # less than RR SL x2 neg rw - # elif self.close_trade_profit[-1] > self.profit_aim * self.rr: - # last_trade_price = self.add_sell_fee( - # self.prices.iloc[self._last_trade_tick].open) - # current_price = self.add_buy_fee( - # self.prices.iloc[self._current_tick].open) - # return float((np.log(last_trade_price) - np.log(current_price)) * 2) * -1 + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + if action == Actions.Short_exit.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + return 0. def _update_profit(self, action): diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 9c7b1e4b4..9cada2bf0 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -11,8 +11,12 @@ from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions from freqtrade.persistence import Trade import torch.multiprocessing +from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor import torch as th +from typing import Callable +from stable_baselines3.common.utils import set_random_seed +import gym logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') @@ -25,9 +29,15 @@ class BaseReinforcementLearningModel(IFreqaiModel): def __init__(self, **kwargs): super().__init__(config=kwargs['config']) - th.set_num_threads(self.freqai_info.get('data_kitchen_thread_count', 4)) + th.set_num_threads(self.freqai_info['rl_config'].get('thread_count', 4)) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] self.train_env: Base5ActionRLEnv = None + self.eval_env: Base5ActionRLEnv = None + self.eval_callback: EvalCallback = None + mod = __import__('stable_baselines3', fromlist=[ + self.freqai_info['rl_config']['model_type']]) + self.MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) + self.policy_type = self.freqai_info['rl_config']['policy_type'] def train( self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen @@ -67,7 +77,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): ) logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test) + self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test, dk) model = self.fit_rl(data_dictionary, dk) @@ -75,13 +85,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): return model - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test, dk): """ - User overrides this in their prediction model if they are custom a MyRLEnv. Othwerwise - leaving this will default to Base5ActEnv + User overrides this as shown here if they are using a custom MyRLEnv """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) # environments if not self.train_env: @@ -90,11 +100,17 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params), ".") + self.eval_callback = EvalCallback(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=dk.data_path) else: - self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) - self.eval_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) self.train_env.reset() self.eval_env.reset() + self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.eval_env.reset_env(test_df, prices_test, self.CONV_WIDTH, self.reward_params) + self.eval_callback.__init__(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=dk.data_path) @abstractmethod def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): @@ -206,16 +222,28 @@ class BaseReinforcementLearningModel(IFreqaiModel): # all the other existing fit() functions to include dk argument. For now we instantiate and # leave it. def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: - """ - Most regressors use the same function names and arguments e.g. user - can drop in LGBMRegressor in place of CatBoostRegressor and all data - management will be properly handled by Freqai. - :param data_dictionary: Dict = the dictionary constructed by DataHandler to hold - all the training and test data/labels. - """ - return +def make_env(env_id: str, rank: int, seed: int, train_df, price, + reward_params, window_size, monitor=False) -> Callable: + """ + Utility function for multiprocessed env. + + :param env_id: (str) the environment ID + :param num_env: (int) the number of environment you wish to have in subprocesses + :param seed: (int) the inital seed for RNG + :param rank: (int) index of the subprocess + :return: (Callable) + """ + def _init() -> gym.Env: + + env = MyRLEnv(df=train_df, prices=price, window_size=window_size, + reward_kwargs=reward_params, id=env_id, seed=seed + rank) + if monitor: + env = Monitor(env, ".") + return env + set_random_seed(seed) + return _init class MyRLEnv(Base5ActionRLEnv): """ @@ -229,24 +257,24 @@ class MyRLEnv(Base5ActionRLEnv): return 0. # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: + if action == Actions.Long_exit.value and self._position == Positions.Long: last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) return float(np.log(current_price) - np.log(last_trade_price)) - if action == Actions.Long_sell.value and self._position == Positions.Long: + if action == Actions.Long_exit.value and self._position == Positions.Long: if self.close_trade_profit[-1] > self.profit_aim * self.rr: last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) return float((np.log(current_price) - np.log(last_trade_price)) * 2) # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: + if action == Actions.Short_exit.value and self._position == Positions.Short: last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) return float(np.log(last_trade_price) - np.log(current_price)) - if action == Actions.Short_buy.value and self._position == Positions.Short: + if action == Actions.Short_exit.value and self._position == Positions.Short: if self.close_trade_profit[-1] > self.profit_aim * self.rr: last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 9603fb9ab..c37973551 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -471,12 +471,11 @@ class FreqaiDataDrawer: elif model_type == 'keras': from tensorflow import keras model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") - elif model_type == 'stable_baselines_ppo': - from stable_baselines3.ppo.ppo import PPO - model = PPO.load(dk.data_path / f"{dk.model_filename}_model") - elif model_type == 'stable_baselines_dqn': - from stable_baselines3 import DQN - model = DQN.load(dk.data_path / f"{dk.model_filename}_model") + elif model_type == 'stable_baselines': + mod = __import__('stable_baselines3', fromlist=[ + self.freqai_info['rl_config']['model_type']]) + MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) + model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model") if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file(): dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib") diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py new file mode 100644 index 000000000..2faa6eb3a --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -0,0 +1,82 @@ +import logging +from typing import Any, Dict # , Tuple + +# import numpy.typing as npt +import torch as th +import numpy as np +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +from pathlib import Path + +logger = logging.getLogger(__name__) + + +class ReinforcementLearner(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): + + train_df = data_dictionary["train_features"] + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) + + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[256, 256, 128]) + + model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, + tensorboard_log=Path(dk.data_path / "tensorboard"), + **self.freqai_info['model_training_parameters'] + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=self.eval_callback + ) + + if Path(dk.data_path / "best_model.zip").is_file(): + logger.info('Callback found a best model.') + best_model = self.MODELCLASS.load(dk.data_path / "best_model") + return best_model + + logger.info('Couldnt find best model, using final model instead.') + + return model + + +class MyRLEnv(Base5ActionRLEnv): + """ + User can modify any part of the environment by overriding base + functions + """ + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + if action == Actions.Long_exit.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + if action == Actions.Short_exit.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + + return 0. diff --git a/freqtrade/freqai/RL/TDQNagent.py b/freqtrade/freqai/prediction_models/ReinforcementLearnerCustomAgent.py similarity index 81% rename from freqtrade/freqai/RL/TDQNagent.py rename to freqtrade/freqai/prediction_models/ReinforcementLearnerCustomAgent.py index 584f6a8ef..bb16b612b 100644 --- a/freqtrade/freqai/RL/TDQNagent.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearnerCustomAgent.py @@ -1,17 +1,59 @@ -from typing import Any, Dict, List, Optional, Tuple, Type, Union - -import gym -import torch +import logging import torch as th +from typing import Any, Dict, List, Optional, Tuple, Type, Union +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from stable_baselines3 import DQN from stable_baselines3.common.buffers import ReplayBuffer -from stable_baselines3.common.policies import BasePolicy -from stable_baselines3.common.torch_layers import (BaseFeaturesExtractor, - FlattenExtractor) -from stable_baselines3.common.type_aliases import GymEnv, Schedule +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from pathlib import Path from stable_baselines3.dqn.policies import (CnnPolicy, DQNPolicy, MlpPolicy, QNetwork) from torch import nn +import gym +from stable_baselines3.common.torch_layers import (BaseFeaturesExtractor, + FlattenExtractor) +from stable_baselines3.common.type_aliases import GymEnv, Schedule +from stable_baselines3.common.policies import BasePolicy + +logger = logging.getLogger(__name__) + + +class ReinforcementLearnerCustomAgent(BaseReinforcementLearningModel): + """ + User can customize agent by defining the class and using it directly. + Here the example is "TDQN" + """ + + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): + + train_df = data_dictionary["train_features"] + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) + + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[256, 256, 128]) + + # TDQN is a custom agent defined below + model = TDQN(self.policy_type, self.train_env, + tensorboard_log=Path(dk.data_path / "tensorboard"), + policy_kwargs=policy_kwargs, + **self.freqai_info['model_training_parameters'] + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=self.eval_callback + ) + + if Path(dk.data_path / "best_model.zip").is_file(): + logger.info('Callback found a best model.') + best_model = self.MODELCLASS.load(dk.data_path / "best_model") + return best_model + + logger.info('Couldnt find best model, using final model instead.') + + return model + +# User creates their custom agent and networks as shown below def create_mlp_( @@ -72,7 +114,7 @@ class TDQNetwork(QNetwork): def init_weights(self, m): if type(m) == nn.Linear: - torch.nn.init.kaiming_uniform_(m.weight) + th.nn.init.kaiming_uniform_(m.weight) class TDQNPolicy(DQNPolicy): @@ -175,7 +217,7 @@ class TDQN(DQN): exploration_initial_eps: float = 1.0, exploration_final_eps: float = 0.05, max_grad_norm: float = 10, - tensorboard_log: Optional[str] = None, + tensorboard_log: Optional[Path] = None, create_eval_env: bool = False, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 1, diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py new file mode 100644 index 000000000..1854bb1a5 --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -0,0 +1,84 @@ +import logging +from typing import Any, Dict # , Tuple + +# import numpy.typing as npt +import torch as th +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.vec_env import SubprocVecEnv +from freqtrade.freqai.RL.BaseReinforcementLearningModel import (BaseReinforcementLearningModel, + make_env) +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen + +from pathlib import Path + +logger = logging.getLogger(__name__) + + +class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): + + train_df = data_dictionary["train_features"] + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) + + # model arch + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[512, 512, 512]) + + model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, + tensorboard_log=Path(dk.data_path / "tensorboard"), + **self.freqai_info['model_training_parameters'] + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=self.eval_callback + ) + + if Path(dk.data_path / "best_model.zip").is_file(): + logger.info('Callback found a best model.') + best_model = self.MODELCLASS.load(dk.data_path / "best_model") + return best_model + + logger.info('Couldnt find best model, using final model instead.') + + return model + + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test, dk): + """ + If user has particular environment configuration needs, they can do that by + overriding this function. In the present case, the user wants to setup training + environments for multiple workers. + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) + + # environments + if not self.train_env: + env_id = "train_env" + num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) + self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.reward_params, self.CONV_WIDTH) for i + in range(num_cpu)]) + + eval_env_id = 'eval_env' + self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.reward_params, self.CONV_WIDTH, monitor=True) for i + in range(num_cpu)]) + self.eval_callback = EvalCallback(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=dk.data_path) + else: + self.train_env.env_method('reset') + self.eval_env.env_method('reset') + self.train_env.env_method('reset_env', train_df, prices_train, + self.CONV_WIDTH, self.reward_params) + self.eval_env.env_method('reset_env', train_df, prices_train, + self.CONV_WIDTH, self.reward_params) + self.eval_callback.__init__(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=dk.data_path) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py deleted file mode 100644 index 993ac263b..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ /dev/null @@ -1,104 +0,0 @@ -import gc -import logging -from typing import Any, Dict # , Tuple - -import numpy as np -# import numpy.typing as npt -import torch as th -from stable_baselines3 import PPO -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.monitor import Monitor - -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.RL.Base3ActionRLEnv import Actions, Base3ActionRLEnv, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel - - -logger = logging.getLogger(__name__) - - -class ReinforcementLearningPPO(BaseReinforcementLearningModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): - - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - - path = dk.data_path - eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), - deterministic=True, render=False) - - # model arch - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) - - model = PPO('MlpPolicy', self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", - **self.freqai_info['model_training_parameters'] - ) - - model.learn( - total_timesteps=int(total_timesteps), - callback=eval_callback - ) - - del model - best_model = PPO.load(dk.data_path / "best_model") - - print('Training finished!') - gc.collect() - - return best_model - - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): - """ - User overrides this as shown here if they are using a custom MyRLEnv - """ - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - - # environments - if not self.train_env: - self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params) - self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params), ".") - else: - self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) - self.eval_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) - self.train_env.reset() - self.eval_env.reset() - - -class MyRLEnv(Base3ActionRLEnv): - """ - User can override any function in BaseRLEnv and gym.Env - """ - - def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if (action == Actions.Short.value or - action == Actions.Neutral.value) and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - # close short - if (action == Actions.Long.value or - action == Actions.Neutral.value) and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py deleted file mode 100644 index 5fa24a599..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ /dev/null @@ -1,132 +0,0 @@ -import logging -from typing import Any, Dict # , Tuple - -import numpy as np -# import numpy.typing as npt -import torch as th -from stable_baselines3.common.monitor import Monitor -from typing import Callable -from stable_baselines3 import PPO -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.vec_env import SubprocVecEnv -from stable_baselines3.common.utils import set_random_seed -from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -import gym - -logger = logging.getLogger(__name__) - - -def make_env(env_id: str, rank: int, seed: int, train_df, price, - reward_params, window_size, monitor=False) -> Callable: - """ - Utility function for multiprocessed env. - - :param env_id: (str) the environment ID - :param num_env: (int) the number of environment you wish to have in subprocesses - :param seed: (int) the inital seed for RNG - :param rank: (int) index of the subprocess - :return: (Callable) - """ - def _init() -> gym.Env: - - env = MyRLEnv(df=train_df, prices=price, window_size=window_size, - reward_kwargs=reward_params, id=env_id, seed=seed + rank) - if monitor: - env = Monitor(env, ".") - return env - set_random_seed(seed) - return _init - - -class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): - - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - - path = dk.data_path - eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), - deterministic=True, render=False) - - # model arch - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[512, 512, 512]) - - model = PPO('MlpPolicy', self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", - **self.freqai_info['model_training_parameters'] - ) - - model.learn( - total_timesteps=int(total_timesteps), - callback=eval_callback - ) - - best_model = PPO.load(dk.data_path / "best_model") - print('Training finished!') - - return best_model - - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): - """ - User overrides this in their prediction model if they are custom a MyRLEnv. Othwerwise - leaving this will default to Base5ActEnv - """ - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - - # environments - if not self.train_env: - env_id = "train_env" - num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) - self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH) for i - in range(num_cpu)]) - - eval_env_id = 'eval_env' - self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True) for i - in range(num_cpu)]) - else: - self.train_env.env_method('reset_env', train_df, prices_train, - self.CONV_WIDTH, self.reward_params) - self.eval_env.env_method('reset_env', train_df, prices_train, - self.CONV_WIDTH, self.reward_params) - self.train_env.env_method('reset') - self.eval_env.env_method('reset') - - -class MyRLEnv(Base3ActionRLEnv): - """ - User can override any function in BaseRLEnv and gym.Env - """ - - def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if (action == Actions.Short.value or - action == Actions.Neutral.value) and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - # close short - if (action == Actions.Long.value or - action == Actions.Neutral.value) and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py deleted file mode 100644 index 3c4ac6bdb..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ /dev/null @@ -1,115 +0,0 @@ -import logging -from typing import Any, Dict # Optional -import torch as th -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.monitor import Monitor -from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel -from freqtrade.freqai.RL.TDQNagent import TDQN -from stable_baselines3 import DQN -from stable_baselines3.common.buffers import ReplayBuffer -import numpy as np -import gc -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen - -logger = logging.getLogger(__name__) - - -class ReinforcementLearningTDQN(BaseReinforcementLearningModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): - - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - - path = dk.data_path - eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), - deterministic=True, render=False) - - # model arch - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) - - model = TDQN('TMultiInputPolicy', self.train_env, - tensorboard_log=f"{path}/tdqn/tensorboard/", - policy_kwargs=policy_kwargs, - replay_buffer_class=ReplayBuffer, - **self.freqai_info['model_training_parameters'] - ) - - model.learn( - total_timesteps=int(total_timesteps), - callback=eval_callback - ) - - del model - best_model = DQN.load(dk.data_path / "best_model") - - print('Training finished!') - gc.collect() - return best_model - - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): - """ - User overrides this as shown here if they are using a custom MyRLEnv - """ - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - - # environments - if not self.train_env: - self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params) - self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params), ".") - else: - self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) - self.eval_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) - self.train_env.reset() - self.eval_env.reset() - - -# User can inherit and customize 5 action environment -class MyRLEnv(Base5ActionRLEnv): - """ - User can override any function in BaseRLEnv and gym.Env. Here the user - Adds 5 actions. - """ - - def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_sell.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - - # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_buy.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) - - return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py deleted file mode 100644 index 8634fd958..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py +++ /dev/null @@ -1,148 +0,0 @@ -import logging -from typing import Any, Dict # Optional -import torch as th -import numpy as np -import gym -from typing import Callable -from stable_baselines3.common.callbacks import EvalCallback -# EvalCallback , StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold -from stable_baselines3.common.monitor import Monitor -from stable_baselines3.common.vec_env import SubprocVecEnv -from stable_baselines3.common.utils import set_random_seed -from stable_baselines3 import DQN -from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel -from freqtrade.freqai.RL.TDQNagent import TDQN -from stable_baselines3.common.buffers import ReplayBuffer -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen - -logger = logging.getLogger(__name__) - - -def make_env(env_id: str, rank: int, seed: int, train_df, price, - reward_params, window_size, monitor=False) -> Callable: - """ - Utility function for multiprocessed env. - - :param env_id: (str) the environment ID - :param num_env: (int) the number of environment you wish to have in subprocesses - :param seed: (int) the inital seed for RNG - :param rank: (int) index of the subprocess - :return: (Callable) - """ - def _init() -> gym.Env: - - env = MyRLEnv(df=train_df, prices=price, window_size=window_size, - reward_kwargs=reward_params, id=env_id, seed=seed + rank) - if monitor: - env = Monitor(env, ".") - return env - set_random_seed(seed) - return _init - - -class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): - - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - - path = dk.data_path - - eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), - deterministic=True, render=False) - # model arch - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[512, 512, 512]) - - model = TDQN('TMultiInputPolicy', self.train_env, - policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/tdqn/tensorboard/", - replay_buffer_class=ReplayBuffer, - **self.freqai_info['model_training_parameters'] - ) - - model.learn( - total_timesteps=int(total_timesteps), - callback=eval_callback - ) - - best_model = DQN.load(dk.data_path / "best_model.zip") - print('Training finished!') - - return best_model - - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): - """ - User overrides this in their prediction model if they are custom a MyRLEnv. Othwerwise - leaving this will default to Base5ActEnv - """ - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - - # environments - if not self.train_env: - env_id = "train_env" - num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) - self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH) for i - in range(num_cpu)]) - - eval_env_id = 'eval_env' - self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True) for i - in range(num_cpu)]) - else: - self.train_env.env_method('reset_env', train_df, prices_train, - self.CONV_WIDTH, self.reward_params) - self.eval_env.env_method('reset_env', train_df, prices_train, - self.CONV_WIDTH, self.reward_params) - self.train_env.env_method('reset') - self.eval_env.env_method('reset') - -# User can inherit and customize 5 action environment - - -class MyRLEnv(Base5ActionRLEnv): - """ - User can override any function in BaseRLEnv and gym.Env. Here the user - Adds 5 actions. - """ - - def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_sell.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - - # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_buy.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) - - return 0. From 8cd4daad0ad1e511f95c7881d18713a54897567a Mon Sep 17 00:00:00 2001 From: mrzdev <106373816+mrzdev@users.noreply.github.com> Date: Sun, 21 Aug 2022 17:43:40 +0200 Subject: [PATCH 045/421] Feat/freqai rl dev (#7) * access trades through get_trades_proxy method to allow backtesting --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 9cada2bf0..a0d5425d3 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -123,7 +123,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): return def get_state_info(self, pair): - open_trades = Trade.get_trades(trade_filter=Trade.is_open.is_(True)) + open_trades = Trade.get_trades_proxy(is_open=True) market_side = 0.5 current_profit = 0 for trade in open_trades: @@ -137,8 +137,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): current_profit = current_value / openrate - 1 total_profit = 0 - closed_trades = Trade.get_trades( - trade_filter=[Trade.is_open.is_(False), Trade.pair == pair]) + closed_trades = Trade.get_trades_proxy(pair = pair, is_open=False) for trade in closed_trades: total_profit += trade.close_profit From 8b3a8234ac96d91b9544005df8d7b5983134ea1a Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 21 Aug 2022 19:43:39 +0200 Subject: [PATCH 046/421] fix env bug, allow example strat to short --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 56 ++++++++----------- .../ReinforcementLearningExample5ac.py | 2 +- 2 files changed, 23 insertions(+), 35 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 7d3cbffbe..b2aeef73b 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -200,12 +200,12 @@ class Base5ActionRLEnv(gym.Env): if self._position == Positions.Neutral: return 0. elif self._position == Positions.Short: - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) return (last_trade_price - current_price) / last_trade_price elif self._position == Positions.Long: - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) return (current_price - last_trade_price) / last_trade_price else: return 0. @@ -223,12 +223,12 @@ class Base5ActionRLEnv(gym.Env): (action == Actions.Neutral.value and self._position == Positions.Long) or (action == Actions.Short_enter.value and self._position == Positions.Short) or (action == Actions.Short_enter.value and self._position == Positions.Long) or - (action == Actions.Short_exit.value and self._position == Positions.Short) or + # (action == Actions.Short_exit.value and self._position == Positions.Short) or (action == Actions.Short_exit.value and self._position == Positions.Long) or (action == Actions.Short_exit.value and self._position == Positions.Neutral) or (action == Actions.Long_enter.value and self._position == Positions.Long) or (action == Actions.Long_enter.value and self._position == Positions.Short) or - (action == Actions.Long_exit.value and self._position == Positions.Long) or + # (action == Actions.Long_exit.value and self._position == Positions.Long) or (action == Actions.Long_exit.value and self._position == Positions.Short) or (action == Actions.Long_exit.value and self._position == Positions.Neutral)) @@ -243,10 +243,10 @@ class Base5ActionRLEnv(gym.Env): (action == Actions.Neutral.value and self._position == Positions.Short) or (action == Actions.Neutral.value and self._position == Positions.Neutral)) - def add_buy_fee(self, price): + def add_entry_fee(self, price): return price * (1 + self.fee) - def add_sell_fee(self, price): + def add_exit_fee(self, price): return price / (1 + self.fee) def _update_history(self, info): @@ -266,27 +266,21 @@ class Base5ActionRLEnv(gym.Env): # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_exit.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + factor = 1 + if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + factor = 2 + return float((np.log(current_price) - np.log(last_trade_price)) * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_exit.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) + factor = 1 + if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + factor = 2 + return float(np.log(last_trade_price) - np.log(current_price) * factor) return 0. @@ -315,27 +309,21 @@ class Base5ActionRLEnv(gym.Env): # Long positions if self._position == Positions.Long: current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Short_enter.value or action == Actions.Neutral.value: - current_price = self.add_sell_fee(current_price) - previous_price = self.prices.iloc[self._current_tick - 1].open if (self._position_history[self._current_tick - 1] == Positions.Short or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_buy_fee(previous_price) + previous_price = self.add_entry_fee(previous_price) return np.log(current_price) - np.log(previous_price) # Short positions if self._position == Positions.Short: current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Long_enter.value or action == Actions.Neutral.value: - current_price = self.add_buy_fee(current_price) - previous_price = self.prices.iloc[self._current_tick - 1].open if (self._position_history[self._current_tick - 1] == Positions.Long or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_sell_fee(previous_price) + previous_price = self.add_exit_fee(previous_price) return np.log(previous_price) - np.log(current_price) diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index 70727f6db..437b53b05 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -36,7 +36,7 @@ class ReinforcementLearningExample5ac(IStrategy): stoploss = -0.05 use_exit_signal = True startup_candle_count: int = 300 - can_short = False + can_short = True linear_roi_offset = DecimalParameter( 0.00, 0.02, default=0.005, space="sell", optimize=False, load=True From d88a0dbf82bd180e66b53cca2bc0781179de42a9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 21 Aug 2022 19:58:36 +0200 Subject: [PATCH 047/421] add sb3_contrib models to the available agents. include sb3_contrib in requirements. --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 2 - .../RL/BaseReinforcementLearningModel.py | 54 +++++++++++-------- requirements-freqai.txt | 4 +- 3 files changed, 35 insertions(+), 25 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index b2aeef73b..94de259a9 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -223,12 +223,10 @@ class Base5ActionRLEnv(gym.Env): (action == Actions.Neutral.value and self._position == Positions.Long) or (action == Actions.Short_enter.value and self._position == Positions.Short) or (action == Actions.Short_enter.value and self._position == Positions.Long) or - # (action == Actions.Short_exit.value and self._position == Positions.Short) or (action == Actions.Short_exit.value and self._position == Positions.Long) or (action == Actions.Short_exit.value and self._position == Positions.Neutral) or (action == Actions.Long_enter.value and self._position == Positions.Long) or (action == Actions.Long_enter.value and self._position == Positions.Short) or - # (action == Actions.Long_exit.value and self._position == Positions.Long) or (action == Actions.Long_exit.value and self._position == Positions.Short) or (action == Actions.Long_exit.value and self._position == Positions.Neutral)) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a0d5425d3..bb858f3cf 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -6,6 +6,7 @@ import numpy.typing as npt import pandas as pd from pandas import DataFrame from abc import abstractmethod +from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions @@ -21,6 +22,9 @@ logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') +SB3_MODELS = ['PPO', 'A2C', 'DQN', 'TD3', 'SAC'] +SB3_CONTRIB_MODELS = ['TRPO', 'ARS'] + class BaseReinforcementLearningModel(IFreqaiModel): """ @@ -34,9 +38,19 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.train_env: Base5ActionRLEnv = None self.eval_env: Base5ActionRLEnv = None self.eval_callback: EvalCallback = None - mod = __import__('stable_baselines3', fromlist=[ - self.freqai_info['rl_config']['model_type']]) - self.MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) + self.model_type = self.freqai_info['rl_config']['model_type'] + if self.model_type in SB3_MODELS: + import_str = 'stable_baselines3' + elif self.model_type in SB3_CONTRIB_MODELS: + import_str = 'sb3_contrib' + else: + raise OperationalException(f'{self.model_type} not available in stable_baselines3 or ' + f'sb3_contrib. please choose one of {SB3_MODELS} or ' + f'{SB3_CONTRIB_MODELS}') + + mod = __import__(import_str, fromlist=[ + self.model_type]) + self.MODELCLASS = getattr(mod, self.model_type) self.policy_type = self.freqai_info['rl_config']['policy_type'] def train( @@ -137,7 +151,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): current_profit = current_value / openrate - 1 total_profit = 0 - closed_trades = Trade.get_trades_proxy(pair = pair, is_open=False) + closed_trades = Trade.get_trades_proxy(pair=pair, is_open=False) for trade in closed_trades: total_profit += trade.close_profit @@ -223,6 +237,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: return + def make_env(env_id: str, rank: int, seed: int, train_df, price, reward_params, window_size, monitor=False) -> Callable: """ @@ -244,6 +259,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df, price, set_random_seed(seed) return _init + class MyRLEnv(Base5ActionRLEnv): """ User can override any function in BaseRLEnv and gym.Env. Here the user @@ -257,26 +273,20 @@ class MyRLEnv(Base5ActionRLEnv): # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_exit.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + factor = 1 + if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + factor = 2 + return float((np.log(current_price) - np.log(last_trade_price)) * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_exit.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + factor = 1 + if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + factor = 2 + return float(np.log(last_trade_price) - np.log(current_price) * factor) return 0. diff --git a/requirements-freqai.txt b/requirements-freqai.txt index 6000f8e0f..de1b6670a 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -9,4 +9,6 @@ lightgbm==3.3.2 torch==1.12.1 stable-baselines3==1.6.0 gym==0.21.0 -tensorboard==2.9.1 \ No newline at end of file +tensorboard==2.9.1 +optuna==2.10.1 +sb3-contrib==1.6.0 \ No newline at end of file From 29f0e01c4a50e7b955a100ef49b47049eff3737a Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 21 Aug 2022 20:33:09 +0200 Subject: [PATCH 048/421] expose environment reward parameters to the user config --- config_examples/config_freqai-rl.example.json | 3 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 7 +++-- .../RL/BaseReinforcementLearningModel.py | 16 +++++----- .../prediction_models/ReinforcementLearner.py | 30 ++++++++----------- .../ReinforcementLearner_multiproc.py | 4 +-- 5 files changed, 28 insertions(+), 32 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index fa08cdd60..07ddb04d3 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -92,7 +92,8 @@ "policy_type": "MlpPolicy", "model_reward_parameters": { "rr": 1, - "profit_aim": 0.02 + "profit_aim": 0.02, + "win_reward_factor": 2 } } }, diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 94de259a9..84a82c5de 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -42,9 +42,10 @@ class Base5ActionRLEnv(gym.Env): def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1): + id: str = 'baseenv-1', seed: int = 1, config: dict = {}): assert df.ndim == 2 + self.rl_config = config['freqai']['rl_config'] self.id = id self.seed(seed) self.reset_env(df, prices, window_size, reward_kwargs, starting_point) @@ -268,7 +269,7 @@ class Base5ActionRLEnv(gym.Env): current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = 2 + factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) return float((np.log(current_price) - np.log(last_trade_price)) * factor) # close short @@ -277,7 +278,7 @@ class Base5ActionRLEnv(gym.Env): current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = 2 + factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) return float(np.log(last_trade_price) - np.log(current_price) * factor) return 0. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index bb858f3cf..0618a91ed 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -110,10 +110,10 @@ class BaseReinforcementLearningModel(IFreqaiModel): # environments if not self.train_env: self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params) + reward_kwargs=self.reward_params, config=self.config) self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params), ".") + reward_kwargs=self.reward_params, config=self.config), ".") self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=eval_freq, best_model_save_path=dk.data_path) @@ -239,7 +239,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def make_env(env_id: str, rank: int, seed: int, train_df, price, - reward_params, window_size, monitor=False) -> Callable: + reward_params, window_size, monitor=False, config={}) -> Callable: """ Utility function for multiprocessed env. @@ -252,7 +252,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df, price, def _init() -> gym.Env: env = MyRLEnv(df=train_df, prices=price, window_size=window_size, - reward_kwargs=reward_params, id=env_id, seed=seed + rank) + reward_kwargs=reward_params, id=env_id, seed=seed + rank, config=config) if monitor: env = Monitor(env, ".") return env @@ -277,16 +277,16 @@ class MyRLEnv(Base5ActionRLEnv): current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = 2 + factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) return float((np.log(current_price) - np.log(last_trade_price)) * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = 2 + factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) return float(np.log(last_trade_price) - np.log(current_price) * factor) return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 2faa6eb3a..5f22971e1 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -57,26 +57,20 @@ class MyRLEnv(Base5ActionRLEnv): # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_exit.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + factor = 1 + if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float((np.log(current_price) - np.log(last_trade_price)) * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_exit.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) + factor = 1 + if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(np.log(last_trade_price) - np.log(current_price) * factor) return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 1854bb1a5..ee9a407c9 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -62,12 +62,12 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): env_id = "train_env" num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH) for i + self.reward_params, self.CONV_WIDTH, config=self.config) for i in range(num_cpu)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True) for i + self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i in range(num_cpu)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=eval_freq, From a2a4bc05dbcd56b94bf87393e78864dde2d5d916 Mon Sep 17 00:00:00 2001 From: richardjozsa Date: Mon, 22 Aug 2022 18:06:33 +0200 Subject: [PATCH 049/421] Fix the state profit calculation logic --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 0618a91ed..a9f406c9d 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -142,13 +142,14 @@ class BaseReinforcementLearningModel(IFreqaiModel): current_profit = 0 for trade in open_trades: if trade.pair == pair: - current_value = trade.open_trade_value + current_value = self.strategy.dp._exchange.get_rate(pair, refresh=False) openrate = trade.open_rate if 'long' in trade.enter_tag: market_side = 1 + current_profit = (current_value - openrate) / openrate else: market_side = 0 - current_profit = current_value / openrate - 1 + current_profit = (openrate - current_value ) / openrate total_profit = 0 closed_trades = Trade.get_trades_proxy(pair=pair, is_open=False) From f9a49744e6a9c4db7e74d9437c8da4e527adaddd Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 22 Aug 2022 19:15:56 +0200 Subject: [PATCH 050/421] add strategy to the freqai object --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- freqtrade/freqai/freqai_interface.py | 3 ++- freqtrade/strategy/interface.py | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a9f406c9d..360cbf9d4 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -142,7 +142,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): current_profit = 0 for trade in open_trades: if trade.pair == pair: - current_value = self.strategy.dp._exchange.get_rate(pair, refresh=False) + current_value = self.strategy.dp._exchange.get_rate(pair, refresh=False) #, side="buy", is_short=True) openrate = trade.open_rate if 'long' in trade.enter_tag: market_side = 1 diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index b6fde9357..21b79e003 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -7,7 +7,7 @@ import time from abc import ABC, abstractmethod from pathlib import Path from threading import Lock -from typing import Any, Dict, Tuple +from typing import Any, Dict, Tuple, Optional import numpy as np import pandas as pd @@ -90,6 +90,7 @@ class IFreqaiModel(ABC): self.begin_time: float = 0 self.begin_time_train: float = 0 self.base_tf_seconds = timeframe_to_seconds(self.config['timeframe']) + self.strategy: Optional[IStrategy] = None def assert_config(self, config: Dict[str, Any]) -> None: diff --git a/freqtrade/strategy/interface.py b/freqtrade/strategy/interface.py index 79dbd4c69..fe301eb30 100644 --- a/freqtrade/strategy/interface.py +++ b/freqtrade/strategy/interface.py @@ -152,6 +152,7 @@ class IStrategy(ABC, HyperStrategyMixin): self.freqai = FreqaiModelResolver.load_freqaimodel(self.config) self.freqai_info = self.config["freqai"] + self.freqai.strategy = self else: # Gracious failures if freqAI is disabled but "start" is called. class DummyClass(): From 280a1dc3f87f451cf2d8367d910ce1cf01f95d3d Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 23 Aug 2022 09:44:44 +0200 Subject: [PATCH 051/421] add live rate, add trade duration --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 13 ++++++++--- .../RL/BaseReinforcementLearningModel.py | 22 ++++++++++++------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 84a82c5de..2b1c4f975 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -62,7 +62,7 @@ class Base5ActionRLEnv(gym.Env): self.fee = 0.0015 # # spaces - self.shape = (window_size, self.signal_features.shape[1] + 2) + self.shape = (window_size, self.signal_features.shape[1] + 3) self.action_space = spaces.Discrete(len(Actions)) self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) @@ -184,15 +184,22 @@ class Base5ActionRLEnv(gym.Env): def _get_observation(self): features_window = self.signal_features[( self._current_tick - self.window_size):self._current_tick] - features_and_state = DataFrame(np.zeros((len(features_window), 2)), - columns=['current_profit_pct', 'position'], + features_and_state = DataFrame(np.zeros((len(features_window), 3)), + columns=['current_profit_pct', 'position', 'trade_duration'], index=features_window.index) features_and_state['current_profit_pct'] = self.get_unrealized_profit() features_and_state['position'] = self._position.value + features_and_state['trade_duration'] = self.get_trade_duration() features_and_state = pd.concat([features_window, features_and_state], axis=1) return features_and_state + def get_trade_duration(self): + if self._last_trade_tick is None: + return 0 + else: + return self._current_tick - self._last_trade_tick + def get_unrealized_profit(self): if self._last_trade_tick is None: diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 360cbf9d4..6a15b96f9 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -16,6 +16,7 @@ from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor import torch as th from typing import Callable +from datetime import datetime, timezone from stable_baselines3.common.utils import set_random_seed import gym logger = logging.getLogger(__name__) @@ -140,23 +141,27 @@ class BaseReinforcementLearningModel(IFreqaiModel): open_trades = Trade.get_trades_proxy(is_open=True) market_side = 0.5 current_profit = 0 + trade_duration = 0 for trade in open_trades: if trade.pair == pair: - current_value = self.strategy.dp._exchange.get_rate(pair, refresh=False) #, side="buy", is_short=True) + current_value = self.strategy.dp._exchange.get_rate( + pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate + now = datetime.now(timezone.utc).timestamp() + trade_duration = (now - trade.open_date.timestamp()) / self.base_tf_seconds if 'long' in trade.enter_tag: market_side = 1 current_profit = (current_value - openrate) / openrate else: market_side = 0 - current_profit = (openrate - current_value ) / openrate + current_profit = (openrate - current_value) / openrate - total_profit = 0 - closed_trades = Trade.get_trades_proxy(pair=pair, is_open=False) - for trade in closed_trades: - total_profit += trade.close_profit + # total_profit = 0 + # closed_trades = Trade.get_trades_proxy(pair=pair, is_open=False) + # for trade in closed_trades: + # total_profit += trade.close_profit - return market_side, current_profit, total_profit + return market_side, current_profit, int(trade_duration) def predict( self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False @@ -192,10 +197,11 @@ class BaseReinforcementLearningModel(IFreqaiModel): output = pd.DataFrame(np.zeros(len(dataframe)), columns=dk.label_list) def _predict(window): - market_side, current_profit, total_profit = self.get_state_info(dk.pair) + market_side, current_profit, trade_duration = self.get_state_info(dk.pair) observations = dataframe.iloc[window.index] observations['current_profit'] = current_profit observations['position'] = market_side + observations['trade_duration'] = trade_duration res, _ = model.predict(observations, deterministic=True) return res From b26ed7dea4564d55b112cc50ce96e08983913bf2 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 23 Aug 2022 14:58:38 +0200 Subject: [PATCH 052/421] fix generic reward, add time duration to reward --- config_examples/config_freqai-rl.example.json | 1 + freqtrade/freqai/RL/Base5ActionRLEnv.py | 27 ++++------------- .../RL/BaseReinforcementLearningModel.py | 25 +++++++++------- .../prediction_models/ReinforcementLearner.py | 29 +++++++++++-------- .../ReinforcementLearner_multiproc.py | 6 ++-- 5 files changed, 43 insertions(+), 45 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 07ddb04d3..bb67b44b6 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -88,6 +88,7 @@ "train_cycles": 10, "eval_cycles": 3, "thread_count": 4, + "max_trade_duration_candles": 100, "model_type": "PPO", "policy_type": "MlpPolicy", "model_reward_parameters": { diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 2b1c4f975..a14111495 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -8,6 +8,7 @@ from gym import spaces from gym.utils import seeding from pandas import DataFrame import pandas as pd +from abc import abstractmethod logger = logging.getLogger(__name__) @@ -265,28 +266,12 @@ class Base5ActionRLEnv(gym.Env): def get_sharpe_ratio(self): return mean_over_std(self.get_portfolio_log_returns()) + @abstractmethod def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if action == Actions.Long_exit.value and self._position == Positions.Long: - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) - factor = 1 - if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float((np.log(current_price) - np.log(last_trade_price)) * factor) - - # close short - if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) - factor = 1 - if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(np.log(last_trade_price) - np.log(current_price) * factor) + """ + Reward is created by BaseReinforcementLearningModel and can + be inherited/edited by the user made ReinforcementLearner file. + """ return 0. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 6a15b96f9..a9a1377a8 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -270,7 +270,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df, price, class MyRLEnv(Base5ActionRLEnv): """ User can override any function in BaseRLEnv and gym.Env. Here the user - Adds 5 actions. + sets a custom reward based on profit and trade duration. """ def calculate_reward(self, action): @@ -278,22 +278,27 @@ class MyRLEnv(Base5ActionRLEnv): if self._last_trade_tick is None: return 0. + pnl = self.get_unrealized_profit() + max_trade_duration = self.rl_config['max_trade_duration_candles'] + trade_duration = self._current_tick - self._last_trade_tick + + factor = 1 + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) - factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float((np.log(current_price) - np.log(last_trade_price)) * factor) + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(np.log(last_trade_price) - np.log(current_price) * factor) + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(pnl * factor) return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 5f22971e1..d3e6bde7c 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -3,7 +3,6 @@ from typing import Any, Dict # , Tuple # import numpy.typing as npt import torch as th -import numpy as np from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel @@ -47,30 +46,36 @@ class ReinforcementLearner(BaseReinforcementLearningModel): class MyRLEnv(Base5ActionRLEnv): """ - User can modify any part of the environment by overriding base - functions + User can override any function in BaseRLEnv and gym.Env. Here the user + sets a custom reward based on profit and trade duration. """ + def calculate_reward(self, action): if self._last_trade_tick is None: return 0. + pnl = self.get_unrealized_profit() + max_trade_duration = self.rl_config['max_trade_duration_candles'] + trade_duration = self._current_tick - self._last_trade_tick + + factor = 1 + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) - factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float((np.log(current_price) - np.log(last_trade_price)) * factor) + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(np.log(last_trade_price) - np.log(current_price) * factor) + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(pnl * factor) return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index ee9a407c9..96d42ae66 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -62,12 +62,14 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): env_id = "train_env" num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH, config=self.config) for i + self.reward_params, self.CONV_WIDTH, + config=self.config) for i in range(num_cpu)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i + self.reward_params, self.CONV_WIDTH, monitor=True, + config=self.config) for i in range(num_cpu)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=eval_freq, From b708134c1a1e6429216f2605f625e064d51da235 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 23 Aug 2022 15:06:57 +0200 Subject: [PATCH 053/421] switch multiproc thread count to rl_config definition --- .../freqai/prediction_models/ReinforcementLearner_multiproc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 96d42ae66..17281e2d0 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -60,7 +60,7 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): # environments if not self.train_env: env_id = "train_env" - num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) + num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2) self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, config=self.config) for i From c0cee5df07ac18d7f870385586e9007ccc74024b Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 24 Aug 2022 12:54:02 +0200 Subject: [PATCH 054/421] add continual retraining feature, handly mypy typing reqs, improve docstrings --- config_examples/config_freqai-rl.example.json | 3 +- freqtrade/freqai/RL/Base3ActionRLEnv.py | 618 +++++++++--------- freqtrade/freqai/RL/Base5ActionRLEnv.py | 38 +- .../RL/BaseReinforcementLearningModel.py | 42 +- .../ReinforcementLearnerCustomAgent.py | 10 +- freqtrade/freqai/data_drawer.py | 4 + .../ReinforcementLearningExample5ac.py | 3 +- .../prediction_models/BaseClassifierModel.py | 4 +- .../prediction_models/BaseRegressionModel.py | 4 +- .../prediction_models/BaseTensorFlowModel.py | 4 +- .../prediction_models/ReinforcementLearner.py | 19 +- 11 files changed, 387 insertions(+), 362 deletions(-) rename freqtrade/freqai/{prediction_models => RL}/ReinforcementLearnerCustomAgent.py (95%) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index bb67b44b6..b3f8737be 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -85,12 +85,13 @@ "verbose": 1 }, "rl_config": { - "train_cycles": 10, + "train_cycles": 3, "eval_cycles": 3, "thread_count": 4, "max_trade_duration_candles": 100, "model_type": "PPO", "policy_type": "MlpPolicy", + "continual_retraining": true, "model_reward_parameters": { "rr": 1, "profit_aim": 0.02, diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index df53c729b..cddd2f6f9 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -1,330 +1,330 @@ -import logging -from enum import Enum +# import logging +# from enum import Enum -import gym -import numpy as np -import pandas as pd -from gym import spaces -from gym.utils import seeding -from pandas import DataFrame +# import gym +# import numpy as np +# import pandas as pd +# from gym import spaces +# from gym.utils import seeding +# from pandas import DataFrame -# from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union +# # from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union -logger = logging.getLogger(__name__) +# logger = logging.getLogger(__name__) -class Actions(Enum): - Short = 0 - Long = 1 - Neutral = 2 +# class Actions(Enum): +# Short = 0 +# Long = 1 +# Neutral = 2 -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 +# class Positions(Enum): +# Short = 0 +# Long = 1 +# Neutral = 0.5 - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long +# def opposite(self): +# return Positions.Short if self == Positions.Long else Positions.Long -def mean_over_std(x): - std = np.std(x, ddof=1) - mean = np.mean(x) - return mean / std if std > 0 else 0 +# def mean_over_std(x): +# std = np.std(x, ddof=1) +# mean = np.mean(x) +# return mean / std if std > 0 else 0 -class Base3ActionRLEnv(gym.Env): +# class Base3ActionRLEnv(gym.Env): - metadata = {'render.modes': ['human']} +# metadata = {'render.modes': ['human']} - def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), - reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1): - assert df.ndim == 2 +# def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), +# reward_kwargs: dict = {}, window_size=10, starting_point=True, +# id: str = 'baseenv-1', seed: int = 1): +# assert df.ndim == 2 - self.id = id - self.seed(seed) - self.reset_env(df, prices, window_size, reward_kwargs, starting_point) +# self.id = id +# self.seed(seed) +# self.reset_env(df, prices, window_size, reward_kwargs, starting_point) - def reset_env(self, df, prices, window_size, reward_kwargs, starting_point=True): - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] +# def reset_env(self, df, prices, window_size, reward_kwargs, starting_point=True): +# self.df = df +# self.signal_features = self.df +# self.prices = prices +# self.window_size = window_size +# self.starting_point = starting_point +# self.rr = reward_kwargs["rr"] +# self.profit_aim = reward_kwargs["profit_aim"] - self.fee = 0.0015 +# self.fee = 0.0015 - # # spaces - self.shape = (window_size, self.signal_features.shape[1] + 2) - self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick = self.window_size - self._end_tick = len(self.prices) - 1 - self._done = None - self._current_tick = None - self._last_trade_tick = None - self._position = Positions.Neutral - self._position_history = None - self.total_reward = None - self._total_profit = None - self._first_rendering = None - self.history = None - self.trade_history = [] - - def seed(self, seed: int = 1): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - def reset(self): - - self._done = False - - if self.starting_point is True: - self._position_history = (self._start_tick * [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self._first_rendering = True - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - - return self._get_observation() - - def step(self, action: int): - self._done = False - self._current_tick += 1 - - if self._current_tick == self._end_tick: - self._done = True - - self.update_portfolio_log_returns(action) - - self._update_profit(action) - step_reward = self.calculate_reward(action) - self.total_reward += step_reward - - trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade - # Update position - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short.value: - self._position = Positions.Short - trade_type = "short" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type is not None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, - 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick=self._current_tick, - total_reward=self.total_reward, - total_profit=self._total_profit, - position=self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info - - def _get_observation(self): - features_window = self.signal_features[( - self._current_tick - self.window_size):self._current_tick] - features_and_state = DataFrame(np.zeros((len(features_window), 2)), - columns=['current_profit_pct', 'position'], - index=features_window.index) - - features_and_state['current_profit_pct'] = self.get_unrealized_profit() - features_and_state['position'] = self._position.value - features_and_state = pd.concat([features_window, features_and_state], axis=1) - return features_and_state - - def get_unrealized_profit(self): - - if self._last_trade_tick is None: - return 0. - - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price) / last_trade_price - elif self._position == Positions.Long: - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price) / last_trade_price - else: - return 0. - - def is_tradesignal(self, action: int): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) - or (action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def _is_trade(self, action: Actions): - return ((action == Actions.Long.value and self._position == Positions.Short) or - (action == Actions.Short.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) - ) - - def is_hold(self, action): - return ((action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def add_buy_fee(self, price): - return price * (1 + self.fee) - - def add_sell_fee(self, price): - return price / (1 + self.fee) - - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} - - for key, value in info.items(): - self.history[key].append(value) - - def get_sharpe_ratio(self): - return mean_over_std(self.get_portfolio_log_returns()) - - def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if (action == Actions.Short.value or - action == Actions.Neutral.value) and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - # close short - if (action == Actions.Long.value or - action == Actions.Neutral.value) and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - return 0. - - def _update_profit(self, action): - if self._is_trade(action) or self._done: - pnl = self.get_unrealized_profit() - - if self._position == Positions.Long: - self._total_profit = self._total_profit + self._total_profit * pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - if self._position == Positions.Short: - self._total_profit = self._total_profit + self._total_profit * pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - def most_recent_return(self, action: int): - """ - We support Long, Neutral and Short positions. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Short.value or action == Actions.Neutral.value: - current_price = self.add_sell_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_buy_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Long.value or action == Actions.Neutral.value: - current_price = self.add_buy_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_sell_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - - def current_price(self) -> float: - return self.prices.iloc[self._current_tick].open +# # # spaces +# self.shape = (window_size, self.signal_features.shape[1] + 2) +# self.action_space = spaces.Discrete(len(Actions)) +# self.observation_space = spaces.Box( +# low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + +# # episode +# self._start_tick = self.window_size +# self._end_tick = len(self.prices) - 1 +# self._done = None +# self._current_tick = None +# self._last_trade_tick = None +# self._position = Positions.Neutral +# self._position_history = None +# self.total_reward = None +# self._total_profit = None +# self._first_rendering = None +# self.history = None +# self.trade_history = [] + +# def seed(self, seed: int = 1): +# self.np_random, seed = seeding.np_random(seed) +# return [seed] + +# def reset(self): + +# self._done = False + +# if self.starting_point is True: +# self._position_history = (self._start_tick * [None]) + [self._position] +# else: +# self._position_history = (self.window_size * [None]) + [self._position] + +# self._current_tick = self._start_tick +# self._last_trade_tick = None +# self._position = Positions.Neutral + +# self.total_reward = 0. +# self._total_profit = 1. # unit +# self._first_rendering = True +# self.history = {} +# self.trade_history = [] +# self.portfolio_log_returns = np.zeros(len(self.prices)) + +# self._profits = [(self._start_tick, 1)] +# self.close_trade_profit = [] + +# return self._get_observation() + +# def step(self, action: int): +# self._done = False +# self._current_tick += 1 + +# if self._current_tick == self._end_tick: +# self._done = True + +# self.update_portfolio_log_returns(action) + +# self._update_profit(action) +# step_reward = self.calculate_reward(action) +# self.total_reward += step_reward + +# trade_type = None +# if self.is_tradesignal(action): # exclude 3 case not trade +# # Update position +# """ +# Action: Neutral, position: Long -> Close Long +# Action: Neutral, position: Short -> Close Short + +# Action: Long, position: Neutral -> Open Long +# Action: Long, position: Short -> Close Short and Open Long + +# Action: Short, position: Neutral -> Open Short +# Action: Short, position: Long -> Close Long and Open Short +# """ + +# if action == Actions.Neutral.value: +# self._position = Positions.Neutral +# trade_type = "neutral" +# elif action == Actions.Long.value: +# self._position = Positions.Long +# trade_type = "long" +# elif action == Actions.Short.value: +# self._position = Positions.Short +# trade_type = "short" +# else: +# print("case not defined") + +# # Update last trade tick +# self._last_trade_tick = self._current_tick + +# if trade_type is not None: +# self.trade_history.append( +# {'price': self.current_price(), 'index': self._current_tick, +# 'type': trade_type}) + +# if self._total_profit < 0.2: +# self._done = True + +# self._position_history.append(self._position) +# observation = self._get_observation() +# info = dict( +# tick=self._current_tick, +# total_reward=self.total_reward, +# total_profit=self._total_profit, +# position=self._position.value +# ) +# self._update_history(info) + +# return observation, step_reward, self._done, info + +# def _get_observation(self): +# features_window = self.signal_features[( +# self._current_tick - self.window_size):self._current_tick] +# features_and_state = DataFrame(np.zeros((len(features_window), 2)), +# columns=['current_profit_pct', 'position'], +# index=features_window.index) + +# features_and_state['current_profit_pct'] = self.get_unrealized_profit() +# features_and_state['position'] = self._position.value +# features_and_state = pd.concat([features_window, features_and_state], axis=1) +# return features_and_state + +# def get_unrealized_profit(self): + +# if self._last_trade_tick is None: +# return 0. + +# if self._position == Positions.Neutral: +# return 0. +# elif self._position == Positions.Short: +# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) +# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) +# return (last_trade_price - current_price) / last_trade_price +# elif self._position == Positions.Long: +# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) +# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) +# return (current_price - last_trade_price) / last_trade_price +# else: +# return 0. + +# def is_tradesignal(self, action: int): +# # trade signal +# """ +# not trade signal is : +# Action: Neutral, position: Neutral -> Nothing +# Action: Long, position: Long -> Hold Long +# Action: Short, position: Short -> Hold Short +# """ +# return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) +# or (action == Actions.Short.value and self._position == Positions.Short) +# or (action == Actions.Long.value and self._position == Positions.Long)) + +# def _is_trade(self, action: Actions): +# return ((action == Actions.Long.value and self._position == Positions.Short) or +# (action == Actions.Short.value and self._position == Positions.Long) or +# (action == Actions.Neutral.value and self._position == Positions.Long) or +# (action == Actions.Neutral.value and self._position == Positions.Short) +# ) + +# def is_hold(self, action): +# return ((action == Actions.Short.value and self._position == Positions.Short) +# or (action == Actions.Long.value and self._position == Positions.Long)) + +# def add_buy_fee(self, price): +# return price * (1 + self.fee) + +# def add_sell_fee(self, price): +# return price / (1 + self.fee) + +# def _update_history(self, info): +# if not self.history: +# self.history = {key: [] for key in info.keys()} + +# for key, value in info.items(): +# self.history[key].append(value) + +# def get_sharpe_ratio(self): +# return mean_over_std(self.get_portfolio_log_returns()) + +# def calculate_reward(self, action): + +# if self._last_trade_tick is None: +# return 0. + +# # close long +# if (action == Actions.Short.value or +# action == Actions.Neutral.value) and self._position == Positions.Long: +# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) +# return float(np.log(current_price) - np.log(last_trade_price)) + +# # close short +# if (action == Actions.Long.value or +# action == Actions.Neutral.value) and self._position == Positions.Short: +# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) +# return float(np.log(last_trade_price) - np.log(current_price)) + +# return 0. + +# def _update_profit(self, action): +# if self._is_trade(action) or self._done: +# pnl = self.get_unrealized_profit() + +# if self._position == Positions.Long: +# self._total_profit = self._total_profit + self._total_profit * pnl +# self._profits.append((self._current_tick, self._total_profit)) +# self.close_trade_profit.append(pnl) + +# if self._position == Positions.Short: +# self._total_profit = self._total_profit + self._total_profit * pnl +# self._profits.append((self._current_tick, self._total_profit)) +# self.close_trade_profit.append(pnl) + +# def most_recent_return(self, action: int): +# """ +# We support Long, Neutral and Short positions. +# Return is generated from rising prices in Long +# and falling prices in Short positions. +# The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. +# """ +# # Long positions +# if self._position == Positions.Long: +# current_price = self.prices.iloc[self._current_tick].open +# if action == Actions.Short.value or action == Actions.Neutral.value: +# current_price = self.add_sell_fee(current_price) + +# previous_price = self.prices.iloc[self._current_tick - 1].open + +# if (self._position_history[self._current_tick - 1] == Positions.Short +# or self._position_history[self._current_tick - 1] == Positions.Neutral): +# previous_price = self.add_buy_fee(previous_price) + +# return np.log(current_price) - np.log(previous_price) + +# # Short positions +# if self._position == Positions.Short: +# current_price = self.prices.iloc[self._current_tick].open +# if action == Actions.Long.value or action == Actions.Neutral.value: +# current_price = self.add_buy_fee(current_price) + +# previous_price = self.prices.iloc[self._current_tick - 1].open +# if (self._position_history[self._current_tick - 1] == Positions.Long +# or self._position_history[self._current_tick - 1] == Positions.Neutral): +# previous_price = self.add_sell_fee(previous_price) + +# return np.log(previous_price) - np.log(current_price) + +# return 0 + +# def get_portfolio_log_returns(self): +# return self.portfolio_log_returns[1:self._current_tick + 1] + +# def update_portfolio_log_returns(self, action): +# self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + +# def current_price(self) -> float: +# return self.prices.iloc[self._current_tick].open - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick - 1].open +# def prev_price(self) -> float: +# return self.prices.iloc[self._current_tick - 1].open - def sharpe_ratio(self) -> float: - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward +# def sharpe_ratio(self) -> float: +# if len(self.close_trade_profit) == 0: +# return 0. +# returns = np.array(self.close_trade_profit) +# reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) +# return reward diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index a14111495..64d7061fc 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -1,6 +1,6 @@ import logging from enum import Enum -# from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union +from typing import Optional import gym import numpy as np @@ -44,14 +44,14 @@ class Base5ActionRLEnv(gym.Env): def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), reward_kwargs: dict = {}, window_size=10, starting_point=True, id: str = 'baseenv-1', seed: int = 1, config: dict = {}): - assert df.ndim == 2 self.rl_config = config['freqai']['rl_config'] self.id = id self.seed(seed) self.reset_env(df, prices, window_size, reward_kwargs, starting_point) - def reset_env(self, df, prices, window_size, reward_kwargs, starting_point=True): + def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, + reward_kwargs: dict, starting_point=True): self.df = df self.signal_features = self.df self.prices = prices @@ -69,18 +69,18 @@ class Base5ActionRLEnv(gym.Env): low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) # episode - self._start_tick = self.window_size - self._end_tick = len(self.prices) - 1 - self._done = None - self._current_tick = None - self._last_trade_tick = None + self._start_tick: int = self.window_size + self._end_tick: int = len(self.prices) - 1 + self._done: bool = False + self._current_tick: int = self._start_tick + self._last_trade_tick: Optional[int] = None self._position = Positions.Neutral - self._position_history = None - self.total_reward = None - self._total_profit = None - self._first_rendering = None - self.history = None - self.trade_history = [] + self._position_history: list = [None] + self.total_reward: float = 0 + self._total_profit: float = 0 + self._first_rendering: bool = False + self.history: dict = {} + self.trade_history: list = [] def seed(self, seed: int = 1): self.np_random, seed = seeding.np_random(seed) @@ -125,8 +125,7 @@ class Base5ActionRLEnv(gym.Env): self.total_reward += step_reward trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade - # Update position + if self.is_tradesignal(action): """ Action: Neutral, position: Long -> Close Long Action: Neutral, position: Short -> Close Short @@ -223,9 +222,8 @@ class Base5ActionRLEnv(gym.Env): # trade signal """ not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short + Determine if the signal is non sensical + e.g.: agent wants a Actions.Long_exit while it is in a Positions.short """ return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or (action == Actions.Neutral.value and self._position == Positions.Short) or @@ -292,7 +290,7 @@ class Base5ActionRLEnv(gym.Env): def most_recent_return(self, action: int): """ - We support Long, Neutral and Short positions. + Calculate the tick to tick return if in a trade. Return is generated from rising prices in Long and falling prices in Short positions. The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a9a1377a8..6660709bd 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -19,6 +19,7 @@ from typing import Callable from datetime import datetime, timezone from stable_baselines3.common.utils import set_random_seed import gym +from pathlib import Path logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') @@ -40,6 +41,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_env: Base5ActionRLEnv = None self.eval_callback: EvalCallback = None self.model_type = self.freqai_info['rl_config']['model_type'] + self.rl_config = self.freqai_info['rl_config'] + self.continual_retraining = self.rl_config['continual_retraining'] if self.model_type in SB3_MODELS: import_str = 'stable_baselines3' elif self.model_type in SB3_CONTRIB_MODELS: @@ -68,7 +71,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): logger.info("--------------------Starting training " f"{pair} --------------------") - # filter the features requested by user in the configuration file and elegantly handle NaNs features_filtered, labels_filtered = dk.filter_features( unfiltered_dataframe, dk.training_features_list, @@ -78,19 +80,19 @@ class BaseReinforcementLearningModel(IFreqaiModel): data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( features_filtered, labels_filtered) - dk.fit_labels() # useless for now, but just satiating append methods + dk.fit_labels() # FIXME useless for now, but just satiating append methods # normalize all data based on train_dataset only prices_train, prices_test = self.build_ohlc_price_dataframes(dk.data_dictionary, pair, dk) data_dictionary = dk.normalize_data(data_dictionary) - # optional additional data cleaning/analysis + # data cleaning/analysis self.data_cleaning_train(dk) logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' + f' features and {len(data_dictionary["train_features"])} data points' ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test, dk) @@ -100,9 +102,11 @@ class BaseReinforcementLearningModel(IFreqaiModel): return model - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test, dk): + def set_train_and_eval_environments(self, data_dictionary: Dict[str, DataFrame], + prices_train: DataFrame, prices_test: DataFrame, + dk: FreqaiDataKitchen): """ - User overrides this as shown here if they are using a custom MyRLEnv + User can override this if they are using a custom MyRLEnv """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] @@ -114,18 +118,22 @@ class BaseReinforcementLearningModel(IFreqaiModel): reward_kwargs=self.reward_params, config=self.config) self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config), ".") + reward_kwargs=self.reward_params, config=self.config), + str(Path(dk.data_path / 'monitor'))) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=eval_freq, - best_model_save_path=dk.data_path) + best_model_save_path=str(dk.data_path)) else: self.train_env.reset() self.eval_env.reset() self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) self.eval_env.reset_env(test_df, prices_test, self.CONV_WIDTH, self.reward_params) + # self.eval_callback.eval_env = self.eval_env + # self.eval_callback.best_model_save_path = str(dk.data_path) + # self.eval_callback._init_callback() self.eval_callback.__init__(self.eval_env, deterministic=True, render=False, eval_freq=eval_freq, - best_model_save_path=dk.data_path) + best_model_save_path=str(dk.data_path)) @abstractmethod def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): @@ -137,19 +145,20 @@ class BaseReinforcementLearningModel(IFreqaiModel): return - def get_state_info(self, pair): + def get_state_info(self, pair: str): open_trades = Trade.get_trades_proxy(is_open=True) market_side = 0.5 - current_profit = 0 + current_profit: float = 0 trade_duration = 0 for trade in open_trades: if trade.pair == pair: + # FIXME: mypy typing doesnt like that strategy may be "None" (it never will be) current_value = self.strategy.dp._exchange.get_rate( pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate now = datetime.now(timezone.utc).timestamp() - trade_duration = (now - trade.open_date.timestamp()) / self.base_tf_seconds - if 'long' in trade.enter_tag: + trade_duration = int((now - trade.open_date.timestamp()) / self.base_tf_seconds) + if 'long' in str(trade.enter_tag): market_side = 1 current_profit = (current_value - openrate) / openrate else: @@ -245,8 +254,9 @@ class BaseReinforcementLearningModel(IFreqaiModel): return -def make_env(env_id: str, rank: int, seed: int, train_df, price, - reward_params, window_size, monitor=False, config={}) -> Callable: +def make_env(env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, + reward_params: Dict[str, int], window_size: int, monitor: bool = False, + config: Dict[str, Any] = {}) -> Callable: """ Utility function for multiprocessed env. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearnerCustomAgent.py b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py similarity index 95% rename from freqtrade/freqai/prediction_models/ReinforcementLearnerCustomAgent.py rename to freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py index bb16b612b..fcd813ce6 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearnerCustomAgent.py +++ b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py @@ -22,6 +22,12 @@ class ReinforcementLearnerCustomAgent(BaseReinforcementLearningModel): """ User can customize agent by defining the class and using it directly. Here the example is "TDQN" + + Warning! + This is an advanced example of how a user may create and use a highly + customized model class (which can inherit from existing classes, + similar to how the example below inherits from DQN). + This file is for example purposes only, and should not be run. """ def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): @@ -34,7 +40,7 @@ class ReinforcementLearnerCustomAgent(BaseReinforcementLearningModel): # TDQN is a custom agent defined below model = TDQN(self.policy_type, self.train_env, - tensorboard_log=Path(dk.data_path / "tensorboard"), + tensorboard_log=str(Path(dk.data_path / "tensorboard")), policy_kwargs=policy_kwargs, **self.freqai_info['model_training_parameters'] ) @@ -217,7 +223,7 @@ class TDQN(DQN): exploration_initial_eps: float = 1.0, exploration_final_eps: float = 0.05, max_grad_norm: float = 10, - tensorboard_log: Optional[Path] = None, + tensorboard_log: Optional[str] = None, create_eval_env: bool = False, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 1, diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index c37973551..ae3e92f5e 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -485,6 +485,10 @@ class FreqaiDataDrawer: f"Unable to load model, ensure model exists at " f"{dk.data_path} " ) + # load it into ram if it was loaded from disk + if coin not in self.model_dictionary: + self.model_dictionary[coin] = model + if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]: dk.pca = cloudpickle.load( open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "rb") diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index 437b53b05..15a263b94 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -76,7 +76,8 @@ class ReinforcementLearningExample5ac(IStrategy): informative[f"%-{coin}pct-change"] = informative["close"].pct_change() informative[f"%-{coin}raw_volume"] = informative["volume"] - # The following features are necessary for RL models + # FIXME: add these outside the user strategy? + # The following columns are necessary for RL models. informative[f"%-{coin}raw_close"] = informative["close"] informative[f"%-{coin}raw_open"] = informative["open"] informative[f"%-{coin}raw_high"] = informative["high"] diff --git a/freqtrade/freqai/prediction_models/BaseClassifierModel.py b/freqtrade/freqai/prediction_models/BaseClassifierModel.py index 2edbf3b51..042f43199 100644 --- a/freqtrade/freqai/prediction_models/BaseClassifierModel.py +++ b/freqtrade/freqai/prediction_models/BaseClassifierModel.py @@ -57,9 +57,9 @@ class BaseClassifierModel(IFreqaiModel): self.data_cleaning_train(dk) logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' + f' features and {len(data_dictionary["train_features"])} data points' ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') model = self.fit(data_dictionary) diff --git a/freqtrade/freqai/prediction_models/BaseRegressionModel.py b/freqtrade/freqai/prediction_models/BaseRegressionModel.py index 2ef175a2e..6ca9ae8cb 100644 --- a/freqtrade/freqai/prediction_models/BaseRegressionModel.py +++ b/freqtrade/freqai/prediction_models/BaseRegressionModel.py @@ -56,9 +56,9 @@ class BaseRegressionModel(IFreqaiModel): self.data_cleaning_train(dk) logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' + f' features and {len(data_dictionary["train_features"])} data points' ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') model = self.fit(data_dictionary) diff --git a/freqtrade/freqai/prediction_models/BaseTensorFlowModel.py b/freqtrade/freqai/prediction_models/BaseTensorFlowModel.py index 04eff045f..6a842f007 100644 --- a/freqtrade/freqai/prediction_models/BaseTensorFlowModel.py +++ b/freqtrade/freqai/prediction_models/BaseTensorFlowModel.py @@ -53,9 +53,9 @@ class BaseTensorFlowModel(IFreqaiModel): self.data_cleaning_train(dk) logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' + f' features and {len(data_dictionary["train_features"])} data points' ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') model = self.fit(data_dictionary) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index d3e6bde7c..254fd32b0 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -1,7 +1,6 @@ import logging -from typing import Any, Dict # , Tuple +from typing import Any, Dict -# import numpy.typing as npt import torch as th from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions @@ -22,12 +21,18 @@ class ReinforcementLearner(BaseReinforcementLearningModel): total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) + net_arch=[512, 512, 256]) - model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path(dk.data_path / "tensorboard"), - **self.freqai_info['model_training_parameters'] - ) + if dk.pair not in self.dd.model_dictionary or not self.continual_retraining: + model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, + tensorboard_log=Path(dk.data_path / "tensorboard"), + **self.freqai_info['model_training_parameters'] + ) + else: + logger.info('Continual training activated - starting training from previously ' + 'trained agent.') + model = self.dd.model_dictionary[dk.pair] + model.set_env(self.train_env) model.learn( total_timesteps=int(total_timesteps), From bd870e233128d655ac89a091c1aa6a8b2196c0d7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 24 Aug 2022 16:32:14 +0200 Subject: [PATCH 055/421] fix monitor bug, set default values in case user doesnt set params --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 4 ++-- freqtrade/freqai/prediction_models/ReinforcementLearner.py | 3 ++- .../prediction_models/ReinforcementLearner_multiproc.py | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 6660709bd..1bc3505e1 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -42,7 +42,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_callback: EvalCallback = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] - self.continual_retraining = self.rl_config['continual_retraining'] + self.continual_retraining = self.rl_config.get('continual_retraining', False) if self.model_type in SB3_MODELS: import_str = 'stable_baselines3' elif self.model_type in SB3_CONTRIB_MODELS: @@ -289,7 +289,7 @@ class MyRLEnv(Base5ActionRLEnv): return 0. pnl = self.get_unrealized_profit() - max_trade_duration = self.rl_config['max_trade_duration_candles'] + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 100) trade_duration = self._current_tick - self._last_trade_tick factor = 1 diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 254fd32b0..f7f016ab4 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -32,6 +32,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): logger.info('Continual training activated - starting training from previously ' 'trained agent.') model = self.dd.model_dictionary[dk.pair] + model.tensorboard_log = Path(dk.data_path / "tensorboard") model.set_env(self.train_env) model.learn( @@ -61,7 +62,7 @@ class MyRLEnv(Base5ActionRLEnv): return 0. pnl = self.get_unrealized_profit() - max_trade_duration = self.rl_config['max_trade_duration_candles'] + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 100) trade_duration = self._current_tick - self._last_trade_tick factor = 1 diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 17281e2d0..3a4c245aa 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -26,10 +26,10 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[512, 512, 512]) + net_arch=[512, 512, 256]) model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path(dk.data_path / "tensorboard"), + tensorboard_log=Path(dk.full_path / "tensorboard"), **self.freqai_info['model_training_parameters'] ) From a61821e1c6803ca82951e3e03df9fcb8cecbcc99 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 24 Aug 2022 16:33:13 +0200 Subject: [PATCH 056/421] remove monitor log --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 1bc3505e1..0f0120365 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -118,8 +118,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): reward_kwargs=self.reward_params, config=self.config) self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config), - str(Path(dk.data_path / 'monitor'))) + reward_kwargs=self.reward_params, config=self.config)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=eval_freq, best_model_save_path=str(dk.data_path)) From d1bee29b1e5b01eb3465deea1b64968660e42b82 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 24 Aug 2022 18:32:40 +0200 Subject: [PATCH 057/421] improve default reward, fix bugs in environment --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 40 ++++++++---- .../RL/BaseReinforcementLearningModel.py | 61 +++++++++---------- .../prediction_models/ReinforcementLearner.py | 54 +++++++++++++--- 3 files changed, 102 insertions(+), 53 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 64d7061fc..9f7c52c9c 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -140,30 +140,32 @@ class Base5ActionRLEnv(gym.Env): if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" + self._last_trade_tick = None elif action == Actions.Long_enter.value: self._position = Positions.Long trade_type = "long" + self._last_trade_tick = self._current_tick elif action == Actions.Short_enter.value: self._position = Positions.Short trade_type = "short" + self._last_trade_tick = self._current_tick elif action == Actions.Long_exit.value: self._position = Positions.Neutral trade_type = "neutral" + self._last_trade_tick = None elif action == Actions.Short_exit.value: self._position = Positions.Neutral trade_type = "neutral" + self._last_trade_tick = None else: print("case not defined") - # Update last trade tick - self._last_trade_tick = self._current_tick - if trade_type is not None: self.trade_history.append( {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - if self._total_profit < 0.2: + if self._total_profit < 0.5: self._done = True self._position_history.append(self._position) @@ -221,8 +223,7 @@ class Base5ActionRLEnv(gym.Env): def is_tradesignal(self, action: int): # trade signal """ - not trade signal is : - Determine if the signal is non sensical + Determine if the signal is a trade signal e.g.: agent wants a Actions.Long_exit while it is in a Positions.short """ return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or @@ -237,6 +238,24 @@ class Base5ActionRLEnv(gym.Env): (action == Actions.Long_exit.value and self._position == Positions.Short) or (action == Actions.Long_exit.value and self._position == Positions.Neutral)) + def _is_valid(self, action: int): + # trade signal + """ + Determine if the signal is valid. + e.g.: agent wants a Actions.Long_exit while it is in a Positions.short + """ + # Agent should only try to exit if it is in position + if action in (Actions.Short_exit.value, Actions.Long_exit.value): + if self._position not in (Positions.Short, Positions.Long): + return False + + # Agent should only try to enter if it is not in position + if action in (Actions.Short_enter.value, Actions.Long_enter.value): + if self._position != Positions.Neutral: + return False + + return True + def _is_trade(self, action: Actions): return ((action == Actions.Long_enter.value and self._position == Positions.Neutral) or (action == Actions.Short_enter.value and self._position == Positions.Neutral)) @@ -278,13 +297,8 @@ class Base5ActionRLEnv(gym.Env): if self._is_trade(action) or self._done: pnl = self.get_unrealized_profit() - if self._position == Positions.Long: - self._total_profit = self._total_profit + self._total_profit * pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - if self._position == Positions.Short: - self._total_profit = self._total_profit + self._total_profit * pnl + if self._position in (Positions.Long, Positions.Short): + self._total_profit *= (1 + pnl) self._profits.append((self._current_tick, self._total_profit)) self.close_trade_profit.append(pnl) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 0f0120365..84d19f269 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -19,7 +19,6 @@ from typing import Callable from datetime import datetime, timezone from stable_baselines3.common.utils import set_random_seed import gym -from pathlib import Path logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') @@ -112,27 +111,14 @@ class BaseReinforcementLearningModel(IFreqaiModel): test_df = data_dictionary["test_features"] eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) - # environments - if not self.train_env: - self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config) - self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config)) - self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, - best_model_save_path=str(dk.data_path)) - else: - self.train_env.reset() - self.eval_env.reset() - self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) - self.eval_env.reset_env(test_df, prices_test, self.CONV_WIDTH, self.reward_params) - # self.eval_callback.eval_env = self.eval_env - # self.eval_callback.best_model_save_path = str(dk.data_path) - # self.eval_callback._init_callback() - self.eval_callback.__init__(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, - best_model_save_path=str(dk.data_path)) + self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, config=self.config) + self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, config=self.config)) + self.eval_callback = EvalCallback(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=str(dk.data_path)) @abstractmethod def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): @@ -284,30 +270,43 @@ class MyRLEnv(Base5ActionRLEnv): def calculate_reward(self, action): - if self._last_trade_tick is None: - return 0. + # first, penalize if the action is not valid + if not self._is_valid(action): + return -15 pnl = self.get_unrealized_profit() - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 100) + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value): + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -15 + + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) trade_duration = self._current_tick - self._last_trade_tick - factor = 1 if trade_duration <= max_trade_duration: factor *= 1.5 elif trade_duration > max_trade_duration: factor *= 0.5 + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long): + return -50 * trade_duration / max_trade_duration + # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(pnl * factor) + return float(rew * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - factor = 1 - if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(pnl * factor) + return float(rew * factor) return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index f7f016ab4..2d1cafab5 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -6,6 +6,10 @@ from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from pathlib import Path +from pandas import DataFrame +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.monitor import Monitor +import numpy as np logger = logging.getLogger(__name__) @@ -49,6 +53,25 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return model + def set_train_and_eval_environments(self, data_dictionary: Dict[str, DataFrame], + prices_train: DataFrame, prices_test: DataFrame, + dk: FreqaiDataKitchen): + """ + User can override this if they are using a custom MyRLEnv + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) + + self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, config=self.config) + self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, config=self.config)) + self.eval_callback = EvalCallback(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=str(dk.data_path)) + class MyRLEnv(Base5ActionRLEnv): """ @@ -58,30 +81,43 @@ class MyRLEnv(Base5ActionRLEnv): def calculate_reward(self, action): - if self._last_trade_tick is None: - return 0. + # first, penalize if the action is not valid + if not self._is_valid(action): + return -15 pnl = self.get_unrealized_profit() - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 100) + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value): + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -15 + + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) trade_duration = self._current_tick - self._last_trade_tick - factor = 1 if trade_duration <= max_trade_duration: factor *= 1.5 elif trade_duration > max_trade_duration: factor *= 0.5 + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long): + return -50 * trade_duration / max_trade_duration + # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(pnl * factor) + return float(rew * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - factor = 1 - if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(pnl * factor) + return float(rew * factor) return 0. From 94cfc8e63febe0590bae324f932cde390fc3a7a2 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 25 Aug 2022 11:46:18 +0200 Subject: [PATCH 058/421] fix multiproc callback, add continual learning to multiproc, fix totalprofit bug in env, set eval_freq automatically, improve default reward --- config_examples/config_freqai-rl.example.json | 14 ++--- freqtrade/freqai/RL/Base3ActionRLEnv.py | 2 + freqtrade/freqai/RL/Base5ActionRLEnv.py | 7 +-- .../RL/BaseReinforcementLearningModel.py | 24 ++++---- .../prediction_models/ReinforcementLearner.py | 16 +++--- .../ReinforcementLearner_multiproc.py | 57 +++++++++---------- 6 files changed, 58 insertions(+), 62 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index b3f8737be..e8852a0cf 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -56,9 +56,9 @@ "freqai": { "enabled": true, "model_save_type": "stable_baselines", - "conv_width": 10, + "conv_width": 4, "purge_old_models": true, - "train_period_days": 10, + "train_period_days": 5, "backtest_period_days": 2, "identifier": "unique-id", "data_kitchen_thread_count": 2, @@ -72,7 +72,7 @@ "30m" ], "indicator_max_period_candles": 10, - "indicator_periods_candles": [5, 10] + "indicator_periods_candles": [5] }, "data_split_parameters": { "test_size": 0.5, @@ -85,13 +85,13 @@ "verbose": 1 }, "rl_config": { - "train_cycles": 3, - "eval_cycles": 3, + "train_cycles": 6, "thread_count": 4, - "max_trade_duration_candles": 100, + "max_trade_duration_candles": 300, "model_type": "PPO", "policy_type": "MlpPolicy", - "continual_retraining": true, + "continual_learning": false, + "max_training_drawdown_pct": 0.5, "model_reward_parameters": { "rr": 1, "profit_aim": 0.02, diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index cddd2f6f9..fe51d3b13 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -1,3 +1,5 @@ +# Example of a 3 action environment. + # import logging # from enum import Enum diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 9f7c52c9c..b93d6e6ff 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -77,8 +77,7 @@ class Base5ActionRLEnv(gym.Env): self._position = Positions.Neutral self._position_history: list = [None] self.total_reward: float = 0 - self._total_profit: float = 0 - self._first_rendering: bool = False + self._total_profit: float = 1 self.history: dict = {} self.trade_history: list = [] @@ -101,7 +100,6 @@ class Base5ActionRLEnv(gym.Env): self.total_reward = 0. self._total_profit = 1. # unit - self._first_rendering = True self.history = {} self.trade_history = [] self.portfolio_log_returns = np.zeros(len(self.prices)) @@ -165,7 +163,7 @@ class Base5ActionRLEnv(gym.Env): {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - if self._total_profit < 0.5: + if self._total_profit < 1 - self.rl_config.get('max_training_drawdown_pct', 0.8): self._done = True self._position_history.append(self._position) @@ -293,7 +291,6 @@ class Base5ActionRLEnv(gym.Env): return 0. def _update_profit(self, action): - # if self._is_trade(action) or self._done: if self._is_trade(action) or self._done: pnl = self.get_unrealized_profit() diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 84d19f269..7a524ba87 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -23,8 +23,8 @@ logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') -SB3_MODELS = ['PPO', 'A2C', 'DQN', 'TD3', 'SAC'] -SB3_CONTRIB_MODELS = ['TRPO', 'ARS'] +SB3_MODELS = ['PPO', 'A2C', 'DQN'] +SB3_CONTRIB_MODELS = ['TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO'] class BaseReinforcementLearningModel(IFreqaiModel): @@ -41,7 +41,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_callback: EvalCallback = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] - self.continual_retraining = self.rl_config.get('continual_retraining', False) + self.continual_learning = self.rl_config.get('continual_learning', False) if self.model_type in SB3_MODELS: import_str = 'stable_baselines3' elif self.model_type in SB3_CONTRIB_MODELS: @@ -109,7 +109,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config) @@ -117,7 +116,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, + render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) @abstractmethod @@ -138,6 +137,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): for trade in open_trades: if trade.pair == pair: # FIXME: mypy typing doesnt like that strategy may be "None" (it never will be) + # FIXME: get_rate and trade_udration shouldn't work with backtesting, + # we need to use candle dates and prices to compute that. current_value = self.strategy.dp._exchange.get_rate( pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate @@ -256,7 +257,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df: DataFrame, price: Data env = MyRLEnv(df=train_df, prices=price, window_size=window_size, reward_kwargs=reward_params, id=env_id, seed=seed + rank, config=config) if monitor: - env = Monitor(env, ".") + env = Monitor(env) return env set_random_seed(seed) return _init @@ -272,18 +273,19 @@ class MyRLEnv(Base5ActionRLEnv): # first, penalize if the action is not valid if not self._is_valid(action): - return -15 + return -2 pnl = self.get_unrealized_profit() rew = np.sign(pnl) * (pnl + 1) factor = 100 # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value): + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: - return -15 + return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) trade_duration = self._current_tick - self._last_trade_tick @@ -294,8 +296,8 @@ class MyRLEnv(Base5ActionRLEnv): factor *= 0.5 # discourage sitting in position - if self._position in (Positions.Short, Positions.Long): - return -50 * trade_duration / max_trade_duration + if self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration # close long if action == Actions.Long_exit.value and self._position == Positions.Long: diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 2d1cafab5..36cc821e4 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -27,7 +27,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[512, 512, 256]) - if dk.pair not in self.dd.model_dictionary or not self.continual_retraining: + if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, tensorboard_log=Path(dk.data_path / "tensorboard"), **self.freqai_info['model_training_parameters'] @@ -61,7 +61,6 @@ class ReinforcementLearner(BaseReinforcementLearningModel): """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config) @@ -69,7 +68,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, + render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) @@ -83,18 +82,19 @@ class MyRLEnv(Base5ActionRLEnv): # first, penalize if the action is not valid if not self._is_valid(action): - return -15 + return -2 pnl = self.get_unrealized_profit() rew = np.sign(pnl) * (pnl + 1) factor = 100 # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value): + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: - return -15 + return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) trade_duration = self._current_tick - self._last_trade_tick @@ -105,8 +105,8 @@ class MyRLEnv(Base5ActionRLEnv): factor *= 0.5 # discourage sitting in position - if self._position in (Positions.Short, Positions.Long): - return -50 * trade_duration / max_trade_duration + if self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration # close long if action == Actions.Long_exit.value and self._position == Positions.Long: diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 3a4c245aa..7e8141b23 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -26,12 +26,19 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[512, 512, 256]) + net_arch=[256, 256]) - model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path(dk.full_path / "tensorboard"), - **self.freqai_info['model_training_parameters'] - ) + if dk.pair not in self.dd.model_dictionary or not self.continual_learning: + model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, + tensorboard_log=Path(dk.full_path / "tensorboard"), + **self.freqai_info['model_training_parameters'] + ) + else: + logger.info('Continual training activated - starting training from previously ' + 'trained agent.') + model = self.dd.model_dictionary[dk.pair] + model.tensorboard_log = Path(dk.data_path / "tensorboard") + model.set_env(self.train_env) model.learn( total_timesteps=int(total_timesteps), @@ -57,30 +64,18 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): test_df = data_dictionary["test_features"] eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) - # environments - if not self.train_env: - env_id = "train_env" - num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2) - self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH, - config=self.config) for i - in range(num_cpu)]) + env_id = "train_env" + num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2) + self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.reward_params, self.CONV_WIDTH, + config=self.config) for i + in range(num_cpu)]) - eval_env_id = 'eval_env' - self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config) for i - in range(num_cpu)]) - self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, - best_model_save_path=dk.data_path) - else: - self.train_env.env_method('reset') - self.eval_env.env_method('reset') - self.train_env.env_method('reset_env', train_df, prices_train, - self.CONV_WIDTH, self.reward_params) - self.eval_env.env_method('reset_env', train_df, prices_train, - self.CONV_WIDTH, self.reward_params) - self.eval_callback.__init__(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, - best_model_save_path=dk.data_path) + eval_env_id = 'eval_env' + self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.reward_params, self.CONV_WIDTH, monitor=True, + config=self.config) for i + in range(num_cpu)]) + self.eval_callback = EvalCallback(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=dk.data_path) From 05ccebf9a16ac2059f7f0bbdde7f4f1e4bd0bcb4 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 25 Aug 2022 12:29:48 +0200 Subject: [PATCH 059/421] automate eval freq in multiproc --- .../freqai/prediction_models/ReinforcementLearner_multiproc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 7e8141b23..18a843b6d 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -62,7 +62,6 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) env_id = "train_env" num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2) @@ -77,5 +76,5 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): config=self.config) for i in range(num_cpu)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, + render=False, eval_freq=len(train_df), best_model_save_path=dk.data_path) From 3199eb453b2a855ae4949fefe586583e941b3235 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 25 Aug 2022 19:05:51 +0200 Subject: [PATCH 060/421] reduce code for base use-case, ensure multiproc inherits custom env, add ability to limit ram use. --- config_examples/config_freqai-rl.example.json | 1 + .../RL/BaseReinforcementLearningModel.py | 129 ++++++++++-------- freqtrade/freqai/data_drawer.py | 9 +- .../prediction_models/ReinforcementLearner.py | 102 ++++++-------- .../ReinforcementLearner_multiproc.py | 7 +- 5 files changed, 125 insertions(+), 123 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index e8852a0cf..dc7c62e4a 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -58,6 +58,7 @@ "model_save_type": "stable_baselines", "conv_width": 4, "purge_old_models": true, + "limit_ram_usage": false, "train_period_days": 5, "backtest_period_days": 2, "identifier": "unique-id", diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 7a524ba87..5a7ae4372 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -19,6 +19,7 @@ from typing import Callable from datetime import datetime, timezone from stable_baselines3.common.utils import set_random_seed import gym +from pathlib import Path logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') @@ -110,9 +111,9 @@ class BaseReinforcementLearningModel(IFreqaiModel): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config) - self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, + self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, config=self.config) + self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, @@ -126,7 +127,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): go in here. Abstract method, so this function must be overridden by user class. """ - return def get_state_info(self, pair: str): @@ -232,6 +232,72 @@ class BaseReinforcementLearningModel(IFreqaiModel): return prices_train, prices_test + def load_model_from_disk(self, dk: FreqaiDataKitchen) -> Any: + """ + Can be used by user if they are trying to limit_ram_usage *and* + perform continual learning. + For now, this is unused. + """ + exists = Path(dk.data_path / f"{dk.model_filename}_model").is_file() + if exists: + model = self.MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model") + else: + logger.info('No model file on disk to continue learning from.') + + return model + + # Nested class which can be overridden by user to customize further + class MyRLEnv(Base5ActionRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env. Here the user + sets a custom reward based on profit and trade duration. + """ + + def calculate_reward(self, action): + + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick + + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and \ + action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration + + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + + return 0. + # TODO take care of this appendage. Right now it needs to be called because FreqAI enforces it. # But FreqaiRL needs more objects passed to fit() (like DK) and we dont want to go refactor # all the other existing fit() functions to include dk argument. For now we instantiate and @@ -240,7 +306,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): return -def make_env(env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, +def make_env(MyRLEnv: Base5ActionRLEnv, env_id: str, rank: int, + seed: int, train_df: DataFrame, price: DataFrame, reward_params: Dict[str, int], window_size: int, monitor: bool = False, config: Dict[str, Any] = {}) -> Callable: """ @@ -252,6 +319,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df: DataFrame, price: Data :param rank: (int) index of the subprocess :return: (Callable) """ + def _init() -> gym.Env: env = MyRLEnv(df=train_df, prices=price, window_size=window_size, @@ -261,54 +329,3 @@ def make_env(env_id: str, rank: int, seed: int, train_df: DataFrame, price: Data return env set_random_seed(seed) return _init - - -class MyRLEnv(Base5ActionRLEnv): - """ - User can override any function in BaseRLEnv and gym.Env. Here the user - sets a custom reward based on profit and trade duration. - """ - - def calculate_reward(self, action): - - # first, penalize if the action is not valid - if not self._is_valid(action): - return -2 - - pnl = self.get_unrealized_profit() - rew = np.sign(pnl) * (pnl + 1) - factor = 100 - - # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ - and self._position == Positions.Neutral: - return 25 - # discourage agent from not entering trades - if action == Actions.Neutral.value and self._position == Positions.Neutral: - return -1 - - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) - trade_duration = self._current_tick - self._last_trade_tick - - if trade_duration <= max_trade_duration: - factor *= 1.5 - elif trade_duration > max_trade_duration: - factor *= 0.5 - - # discourage sitting in position - if self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value: - return -1 * trade_duration / max_trade_duration - - # close long - if action == Actions.Long_exit.value and self._position == Positions.Long: - if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) - - # close short - if action == Actions.Short_exit.value and self._position == Positions.Short: - if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) - - return 0. diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index ae3e92f5e..64a5502ad 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -90,6 +90,7 @@ class FreqaiDataDrawer: self.empty_pair_dict: pair_info = { "model_filename": "", "trained_timestamp": 0, "priority": 1, "first": True, "data_path": "", "extras": {}} + self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False) def load_drawer_from_disk(self): """ @@ -423,8 +424,8 @@ class FreqaiDataDrawer: dk.pca, open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "wb") ) - # if self.live: - self.model_dictionary[coin] = model + if not self.limit_ram_use: + self.model_dictionary[coin] = model self.pair_dict[coin]["model_filename"] = dk.model_filename self.pair_dict[coin]["data_path"] = str(dk.data_path) self.save_drawer_to_disk() @@ -464,7 +465,7 @@ class FreqaiDataDrawer: model_type = self.freqai_info.get('model_save_type', 'joblib') # try to access model in memory instead of loading object from disk to save time - if dk.live and coin in self.model_dictionary: + if dk.live and coin in self.model_dictionary and not self.limit_ram_use: model = self.model_dictionary[coin] elif model_type == 'joblib': model = load(dk.data_path / f"{dk.model_filename}_model.joblib") @@ -486,7 +487,7 @@ class FreqaiDataDrawer: ) # load it into ram if it was loaded from disk - if coin not in self.model_dictionary: + if coin not in self.model_dictionary and not self.limit_ram_use: self.model_dictionary[coin] = model if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]: diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 36cc821e4..a72a56e20 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -3,12 +3,12 @@ from typing import Any, Dict import torch as th from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions +from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from pathlib import Path -from pandas import DataFrame -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.monitor import Monitor +# from pandas import DataFrame +# from stable_baselines3.common.callbacks import EvalCallback +# from stable_baselines3.common.monitor import Monitor import numpy as np logger = logging.getLogger(__name__) @@ -53,71 +53,53 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return model - def set_train_and_eval_environments(self, data_dictionary: Dict[str, DataFrame], - prices_train: DataFrame, prices_test: DataFrame, - dk: FreqaiDataKitchen): + class MyRLEnv(BaseReinforcementLearningModel.MyRLEnv): """ - User can override this if they are using a custom MyRLEnv + User can override any function in BaseRLEnv and gym.Env. Here the user + sets a custom reward based on profit and trade duration. """ - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config) - self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config)) - self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=len(train_df), - best_model_save_path=str(dk.data_path)) + def calculate_reward(self, action): + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 -class MyRLEnv(Base5ActionRLEnv): - """ - User can override any function in BaseRLEnv and gym.Env. Here the user - sets a custom reward based on profit and trade duration. - """ + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100 - def calculate_reward(self, action): + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 - # first, penalize if the action is not valid - if not self._is_valid(action): - return -2 + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick - pnl = self.get_unrealized_profit() - rew = np.sign(pnl) * (pnl + 1) - factor = 100 + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 - # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ - and self._position == Positions.Neutral: - return 25 - # discourage agent from not entering trades - if action == Actions.Neutral.value and self._position == Positions.Neutral: - return -1 + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and \ + action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) - trade_duration = self._current_tick - self._last_trade_tick + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) - if trade_duration <= max_trade_duration: - factor *= 1.5 - elif trade_duration > max_trade_duration: - factor *= 0.5 + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) - # discourage sitting in position - if self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value: - return -1 * trade_duration / max_trade_duration - - # close long - if action == Actions.Long_exit.value and self._position == Positions.Long: - if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) - - # close short - if action == Actions.Short_exit.value and self._position == Positions.Short: - if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) - - return 0. + return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 18a843b6d..f301da981 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -34,7 +34,7 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): **self.freqai_info['model_training_parameters'] ) else: - logger.info('Continual training activated - starting training from previously ' + logger.info('Continual learning activated - starting training from previously ' 'trained agent.') model = self.dd.model_dictionary[dk.pair] model.tensorboard_log = Path(dk.data_path / "tensorboard") @@ -65,13 +65,14 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): env_id = "train_env" num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2) - self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, config=self.config) for i in range(num_cpu)]) eval_env_id = 'eval_env' - self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, + test_df, prices_test, self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i in range(num_cpu)]) From d31926efdf9d3eafea87aa3b334de4d389c7308f Mon Sep 17 00:00:00 2001 From: richardjozsa Date: Thu, 25 Aug 2022 21:40:16 +0200 Subject: [PATCH 061/421] Added Base4Action --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 346 ++++++++++++++++++++++++ 1 file changed, 346 insertions(+) create mode 100644 freqtrade/freqai/RL/Base4ActionRLEnv.py diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py new file mode 100644 index 000000000..478507639 --- /dev/null +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -0,0 +1,346 @@ +import logging +from enum import Enum +from typing import Optional + +import gym +import numpy as np +from gym import spaces +from gym.utils import seeding +from pandas import DataFrame +import pandas as pd +from abc import abstractmethod +logger = logging.getLogger(__name__) + + +class Actions(Enum): + Neutral = 0 + Exit = 1 + Long_enter = 2 + Short_enter = 3 + + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + + +def mean_over_std(x): + std = np.std(x, ddof=1) + mean = np.mean(x) + return mean / std if std > 0 else 0 + + +class Base4ActionRLEnv(gym.Env): + """ + Base class for a 5 action environment + """ + metadata = {'render.modes': ['human']} + + def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), + reward_kwargs: dict = {}, window_size=10, starting_point=True, + id: str = 'baseenv-1', seed: int = 1, config: dict = {}): + + self.rl_config = config['freqai']['rl_config'] + self.id = id + self.seed(seed) + self.reset_env(df, prices, window_size, reward_kwargs, starting_point) + + def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, + reward_kwargs: dict, starting_point=True): + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee = 0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1] + 3) + self.action_space = spaces.Discrete(len(Actions)) + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick: int = self.window_size + self._end_tick: int = len(self.prices) - 1 + self._done: bool = False + self._current_tick: int = self._start_tick + self._last_trade_tick: Optional[int] = None + self._position = Positions.Neutral + self._position_history: list = [None] + self.total_reward: float = 0 + self._total_profit: float = 1 + self.history: dict = {} + self.trade_history: list = [] + + def seed(self, seed: int = 1): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + + self._done = False + + if self.starting_point is True: + self._position_history = (self._start_tick * [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + + return self._get_observation() + + def step(self, action: int): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self.calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + self._last_trade_tick = None + elif action == Actions.Long_enter.value: + self._position = Positions.Long + trade_type = "long" + self._last_trade_tick = self._current_tick + elif action == Actions.Short_enter.value: + self._position = Positions.Short + trade_type = "short" + self._last_trade_tick = self._current_tick + elif action == Actions.Exit.value: + self._position = Positions.Neutral + trade_type = "neutral" + self._last_trade_tick = None + elif action == Actions.Exit.value: + self._position = Positions.Neutral + trade_type = "neutral" + self._last_trade_tick = None + else: + print("case not defined") + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if self._total_profit < 1 - self.rl_config.get('max_training_drawdown_pct', 0.8): + self._done = True + + self._position_history.append(self._position) + + info = dict( + tick=self._current_tick, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + + observation = self._get_observation() + + self._update_history(info) + + return observation, step_reward, self._done, info + + def _get_observation(self): + features_window = self.signal_features[( + self._current_tick - self.window_size):self._current_tick] + features_and_state = DataFrame(np.zeros((len(features_window), 3)), + columns=['current_profit_pct', 'position', 'trade_duration'], + index=features_window.index) + + features_and_state['current_profit_pct'] = self.get_unrealized_profit() + features_and_state['position'] = self._position.value + features_and_state['trade_duration'] = self.get_trade_duration() + features_and_state = pd.concat([features_window, features_and_state], axis=1) + return features_and_state + + def get_trade_duration(self): + if self._last_trade_tick is None: + return 0 + else: + return self._current_tick - self._last_trade_tick + + def get_unrealized_profit(self): + + if self._last_trade_tick is None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price) / last_trade_price + else: + return 0. + + def is_tradesignal(self, action: int): + # trade signal + """ + Determine if the signal is a trade signal + e.g.: agent wants a Actions.Long_exit while it is in a Positions.short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Short_enter.value and self._position == Positions.Short) or + (action == Actions.Short_enter.value and self._position == Positions.Long) or + (action == Actions.Exit.value and self._position == Positions.Neutral) or + (action == Actions.Long_enter.value and self._position == Positions.Long) or + (action == Actions.Long_enter.value and self._position == Positions.Short)) + + def _is_valid(self, action: int): + # trade signal + """ + Determine if the signal is valid. + e.g.: agent wants a Actions.Long_exit while it is in a Positions.short + """ + # Agent should only try to exit if it is in position + if action in (Actions.Exit.value): + if self._position not in (Positions.Short, Positions.Long): + return False + + # Agent should only try to enter if it is not in position + if action in (Actions.Short_enter.value, Actions.Long_enter.value): + if self._position != Positions.Neutral: + return False + + return True + + def _is_trade(self, action: Actions): + return ((action == Actions.Long_enter.value and self._position == Positions.Neutral) or + (action == Actions.Short_enter.value and self._position == Positions.Neutral)) + + def is_hold(self, action): + return ((action == Actions.Short_enter.value and self._position == Positions.Short) or + (action == Actions.Long_enter.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + (action == Actions.Neutral.value and self._position == Positions.Neutral)) + + def add_entry_fee(self, price): + return price * (1 + self.fee) + + def add_exit_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + def get_sharpe_ratio(self): + return mean_over_std(self.get_portfolio_log_returns()) + + @abstractmethod + def calculate_reward(self, action): + """ + Reward is created by BaseReinforcementLearningModel and can + be inherited/edited by the user made ReinforcementLearner file. + """ + + return 0. + + def _update_profit(self, action): + if self._is_trade(action) or self._done: + pnl = self.get_unrealized_profit() + + if self._position in (Positions.Long, Positions.Short): + self._total_profit *= (1 + pnl) + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + def most_recent_return(self, action: int): + """ + Calculate the tick to tick return if in a trade. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_entry_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_exit_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick - 1].open + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward From cdc550da9a40ed8e46150bb2c9780e81147fb3b8 Mon Sep 17 00:00:00 2001 From: richardjozsa Date: Fri, 26 Aug 2022 09:59:17 +0200 Subject: [PATCH 062/421] Revert the docker changes to be inline with the original freqtrade image Reverted the changes, and added a new way of doing, Dockerfile.freqai with that file the users can make their own dockerimage. --- Dockerfile | 2 +- docker/Dockerfile.freqai | 59 +++++++++++++++++++++++++++++++++++---- requirements-freqai.txt | 2 +- requirements-hyperopt.txt | 1 - 4 files changed, 56 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index d06b53202..d37555cd8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -33,7 +33,7 @@ RUN cd /tmp && /tmp/install_ta-lib.sh && rm -r /tmp/*ta-lib* ENV LD_LIBRARY_PATH /usr/local/lib # Install dependencies -COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt requirements-freqai.txt /freqtrade/ +COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt /freqtrade/ USER ftuser RUN pip install --user --no-cache-dir numpy \ && pip install --user --no-cache-dir -r requirements-hyperopt.txt diff --git a/docker/Dockerfile.freqai b/docker/Dockerfile.freqai index 9a2f75700..af9da4c25 100644 --- a/docker/Dockerfile.freqai +++ b/docker/Dockerfile.freqai @@ -1,9 +1,58 @@ -ARG sourceimage=freqtradeorg/freqtrade -ARG sourcetag=develop -FROM ${sourceimage}:${sourcetag} +FROM python:3.10.6-slim-bullseye as base + +# Setup env +ENV LANG C.UTF-8 +ENV LC_ALL C.UTF-8 +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONFAULTHANDLER 1 +ENV PATH=/home/ftuser/.local/bin:$PATH +ENV FT_APP_ENV="docker" + +# Prepare environment +RUN mkdir /freqtrade \ + && apt-get update \ + && apt-get -y install sudo libatlas3-base curl sqlite3 libhdf5-serial-dev \ + && apt-get clean \ + && useradd -u 1000 -G sudo -U -m -s /bin/bash ftuser \ + && chown ftuser:ftuser /freqtrade \ + # Allow sudoers + && echo "ftuser ALL=(ALL) NOPASSWD: /bin/chown" >> /etc/sudoers + +WORKDIR /freqtrade # Install dependencies -COPY requirements-freqai.txt /freqtrade/ +FROM base as python-deps +RUN apt-get update \ + && apt-get -y install build-essential libssl-dev git libffi-dev libgfortran5 pkg-config cmake gcc \ + && apt-get clean \ + && pip install --upgrade pip -RUN pip install -r requirements-freqai.txt --user --no-cache-dir +# Install TA-lib +COPY build_helpers/* /tmp/ +RUN cd /tmp && /tmp/install_ta-lib.sh && rm -r /tmp/*ta-lib* +ENV LD_LIBRARY_PATH /usr/local/lib +# Install dependencies +COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt requirements-freqai.txt /freqtrade/ +USER ftuser +RUN pip install --user --no-cache-dir numpy \ + && pip install --user --no-cache-dir -r requirements-freqai.txt + +# Copy dependencies to runtime-image +FROM base as runtime-image +COPY --from=python-deps /usr/local/lib /usr/local/lib +ENV LD_LIBRARY_PATH /usr/local/lib + +COPY --from=python-deps --chown=ftuser:ftuser /home/ftuser/.local /home/ftuser/.local + +USER ftuser +# Install and execute +COPY --chown=ftuser:ftuser . /freqtrade/ + +RUN pip install -e . --user --no-cache-dir --no-build-isolation \ + && mkdir /freqtrade/user_data/ \ + && freqtrade install-ui + +ENTRYPOINT ["freqtrade"] +# Default to trade mode +CMD [ "trade" ] diff --git a/requirements-freqai.txt b/requirements-freqai.txt index de1b6670a..aebce1fae 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -1,5 +1,5 @@ # Include all requirements to run the bot. --r requirements.txt +-r requirements-hyperopt.txt # Required for freqai scikit-learn==1.1.2 diff --git a/requirements-hyperopt.txt b/requirements-hyperopt.txt index e19eb27c1..020ccdda8 100644 --- a/requirements-hyperopt.txt +++ b/requirements-hyperopt.txt @@ -7,4 +7,3 @@ scikit-learn==1.1.2 scikit-optimize==0.9.0 filelock==3.8.0 progressbar2==4.0.0 --r requirements-freqai.txt \ No newline at end of file From baa4f8e3d0d7f4d1dcb950b799da749171365989 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 26 Aug 2022 11:03:17 +0200 Subject: [PATCH 063/421] remove Base3ActionEnv in favor of Base4Action --- freqtrade/freqai/RL/Base3ActionRLEnv.py | 332 ------------------------ 1 file changed, 332 deletions(-) delete mode 100644 freqtrade/freqai/RL/Base3ActionRLEnv.py diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py deleted file mode 100644 index fe51d3b13..000000000 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ /dev/null @@ -1,332 +0,0 @@ -# Example of a 3 action environment. - -# import logging -# from enum import Enum - -# import gym -# import numpy as np -# import pandas as pd -# from gym import spaces -# from gym.utils import seeding -# from pandas import DataFrame - - -# # from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union - -# logger = logging.getLogger(__name__) - - -# class Actions(Enum): -# Short = 0 -# Long = 1 -# Neutral = 2 - - -# class Positions(Enum): -# Short = 0 -# Long = 1 -# Neutral = 0.5 - -# def opposite(self): -# return Positions.Short if self == Positions.Long else Positions.Long - - -# def mean_over_std(x): -# std = np.std(x, ddof=1) -# mean = np.mean(x) -# return mean / std if std > 0 else 0 - - -# class Base3ActionRLEnv(gym.Env): - -# metadata = {'render.modes': ['human']} - -# def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), -# reward_kwargs: dict = {}, window_size=10, starting_point=True, -# id: str = 'baseenv-1', seed: int = 1): -# assert df.ndim == 2 - -# self.id = id -# self.seed(seed) -# self.reset_env(df, prices, window_size, reward_kwargs, starting_point) - -# def reset_env(self, df, prices, window_size, reward_kwargs, starting_point=True): -# self.df = df -# self.signal_features = self.df -# self.prices = prices -# self.window_size = window_size -# self.starting_point = starting_point -# self.rr = reward_kwargs["rr"] -# self.profit_aim = reward_kwargs["profit_aim"] - -# self.fee = 0.0015 - -# # # spaces -# self.shape = (window_size, self.signal_features.shape[1] + 2) -# self.action_space = spaces.Discrete(len(Actions)) -# self.observation_space = spaces.Box( -# low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - -# # episode -# self._start_tick = self.window_size -# self._end_tick = len(self.prices) - 1 -# self._done = None -# self._current_tick = None -# self._last_trade_tick = None -# self._position = Positions.Neutral -# self._position_history = None -# self.total_reward = None -# self._total_profit = None -# self._first_rendering = None -# self.history = None -# self.trade_history = [] - -# def seed(self, seed: int = 1): -# self.np_random, seed = seeding.np_random(seed) -# return [seed] - -# def reset(self): - -# self._done = False - -# if self.starting_point is True: -# self._position_history = (self._start_tick * [None]) + [self._position] -# else: -# self._position_history = (self.window_size * [None]) + [self._position] - -# self._current_tick = self._start_tick -# self._last_trade_tick = None -# self._position = Positions.Neutral - -# self.total_reward = 0. -# self._total_profit = 1. # unit -# self._first_rendering = True -# self.history = {} -# self.trade_history = [] -# self.portfolio_log_returns = np.zeros(len(self.prices)) - -# self._profits = [(self._start_tick, 1)] -# self.close_trade_profit = [] - -# return self._get_observation() - -# def step(self, action: int): -# self._done = False -# self._current_tick += 1 - -# if self._current_tick == self._end_tick: -# self._done = True - -# self.update_portfolio_log_returns(action) - -# self._update_profit(action) -# step_reward = self.calculate_reward(action) -# self.total_reward += step_reward - -# trade_type = None -# if self.is_tradesignal(action): # exclude 3 case not trade -# # Update position -# """ -# Action: Neutral, position: Long -> Close Long -# Action: Neutral, position: Short -> Close Short - -# Action: Long, position: Neutral -> Open Long -# Action: Long, position: Short -> Close Short and Open Long - -# Action: Short, position: Neutral -> Open Short -# Action: Short, position: Long -> Close Long and Open Short -# """ - -# if action == Actions.Neutral.value: -# self._position = Positions.Neutral -# trade_type = "neutral" -# elif action == Actions.Long.value: -# self._position = Positions.Long -# trade_type = "long" -# elif action == Actions.Short.value: -# self._position = Positions.Short -# trade_type = "short" -# else: -# print("case not defined") - -# # Update last trade tick -# self._last_trade_tick = self._current_tick - -# if trade_type is not None: -# self.trade_history.append( -# {'price': self.current_price(), 'index': self._current_tick, -# 'type': trade_type}) - -# if self._total_profit < 0.2: -# self._done = True - -# self._position_history.append(self._position) -# observation = self._get_observation() -# info = dict( -# tick=self._current_tick, -# total_reward=self.total_reward, -# total_profit=self._total_profit, -# position=self._position.value -# ) -# self._update_history(info) - -# return observation, step_reward, self._done, info - -# def _get_observation(self): -# features_window = self.signal_features[( -# self._current_tick - self.window_size):self._current_tick] -# features_and_state = DataFrame(np.zeros((len(features_window), 2)), -# columns=['current_profit_pct', 'position'], -# index=features_window.index) - -# features_and_state['current_profit_pct'] = self.get_unrealized_profit() -# features_and_state['position'] = self._position.value -# features_and_state = pd.concat([features_window, features_and_state], axis=1) -# return features_and_state - -# def get_unrealized_profit(self): - -# if self._last_trade_tick is None: -# return 0. - -# if self._position == Positions.Neutral: -# return 0. -# elif self._position == Positions.Short: -# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) -# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) -# return (last_trade_price - current_price) / last_trade_price -# elif self._position == Positions.Long: -# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) -# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) -# return (current_price - last_trade_price) / last_trade_price -# else: -# return 0. - -# def is_tradesignal(self, action: int): -# # trade signal -# """ -# not trade signal is : -# Action: Neutral, position: Neutral -> Nothing -# Action: Long, position: Long -> Hold Long -# Action: Short, position: Short -> Hold Short -# """ -# return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) -# or (action == Actions.Short.value and self._position == Positions.Short) -# or (action == Actions.Long.value and self._position == Positions.Long)) - -# def _is_trade(self, action: Actions): -# return ((action == Actions.Long.value and self._position == Positions.Short) or -# (action == Actions.Short.value and self._position == Positions.Long) or -# (action == Actions.Neutral.value and self._position == Positions.Long) or -# (action == Actions.Neutral.value and self._position == Positions.Short) -# ) - -# def is_hold(self, action): -# return ((action == Actions.Short.value and self._position == Positions.Short) -# or (action == Actions.Long.value and self._position == Positions.Long)) - -# def add_buy_fee(self, price): -# return price * (1 + self.fee) - -# def add_sell_fee(self, price): -# return price / (1 + self.fee) - -# def _update_history(self, info): -# if not self.history: -# self.history = {key: [] for key in info.keys()} - -# for key, value in info.items(): -# self.history[key].append(value) - -# def get_sharpe_ratio(self): -# return mean_over_std(self.get_portfolio_log_returns()) - -# def calculate_reward(self, action): - -# if self._last_trade_tick is None: -# return 0. - -# # close long -# if (action == Actions.Short.value or -# action == Actions.Neutral.value) and self._position == Positions.Long: -# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) -# return float(np.log(current_price) - np.log(last_trade_price)) - -# # close short -# if (action == Actions.Long.value or -# action == Actions.Neutral.value) and self._position == Positions.Short: -# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) -# return float(np.log(last_trade_price) - np.log(current_price)) - -# return 0. - -# def _update_profit(self, action): -# if self._is_trade(action) or self._done: -# pnl = self.get_unrealized_profit() - -# if self._position == Positions.Long: -# self._total_profit = self._total_profit + self._total_profit * pnl -# self._profits.append((self._current_tick, self._total_profit)) -# self.close_trade_profit.append(pnl) - -# if self._position == Positions.Short: -# self._total_profit = self._total_profit + self._total_profit * pnl -# self._profits.append((self._current_tick, self._total_profit)) -# self.close_trade_profit.append(pnl) - -# def most_recent_return(self, action: int): -# """ -# We support Long, Neutral and Short positions. -# Return is generated from rising prices in Long -# and falling prices in Short positions. -# The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. -# """ -# # Long positions -# if self._position == Positions.Long: -# current_price = self.prices.iloc[self._current_tick].open -# if action == Actions.Short.value or action == Actions.Neutral.value: -# current_price = self.add_sell_fee(current_price) - -# previous_price = self.prices.iloc[self._current_tick - 1].open - -# if (self._position_history[self._current_tick - 1] == Positions.Short -# or self._position_history[self._current_tick - 1] == Positions.Neutral): -# previous_price = self.add_buy_fee(previous_price) - -# return np.log(current_price) - np.log(previous_price) - -# # Short positions -# if self._position == Positions.Short: -# current_price = self.prices.iloc[self._current_tick].open -# if action == Actions.Long.value or action == Actions.Neutral.value: -# current_price = self.add_buy_fee(current_price) - -# previous_price = self.prices.iloc[self._current_tick - 1].open -# if (self._position_history[self._current_tick - 1] == Positions.Long -# or self._position_history[self._current_tick - 1] == Positions.Neutral): -# previous_price = self.add_sell_fee(previous_price) - -# return np.log(previous_price) - np.log(current_price) - -# return 0 - -# def get_portfolio_log_returns(self): -# return self.portfolio_log_returns[1:self._current_tick + 1] - -# def update_portfolio_log_returns(self, action): -# self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - -# def current_price(self) -> float: -# return self.prices.iloc[self._current_tick].open - -# def prev_price(self) -> float: -# return self.prices.iloc[self._current_tick - 1].open - -# def sharpe_ratio(self) -> float: -# if len(self.close_trade_profit) == 0: -# return 0. -# returns = np.array(self.close_trade_profit) -# reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) -# return reward From 8c313b431d9c1094e588acf179152a944ca84de0 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 26 Aug 2022 11:14:01 +0200 Subject: [PATCH 064/421] remove whitespace from Dockerfile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d37555cd8..14a67edc8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -33,7 +33,7 @@ RUN cd /tmp && /tmp/install_ta-lib.sh && rm -r /tmp/*ta-lib* ENV LD_LIBRARY_PATH /usr/local/lib # Install dependencies -COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt /freqtrade/ +COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt /freqtrade/ USER ftuser RUN pip install --user --no-cache-dir numpy \ && pip install --user --no-cache-dir -r requirements-hyperopt.txt From 7766350c1558ae257cc540a22dadc7aefcafe384 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 28 Aug 2022 19:21:57 +0200 Subject: [PATCH 065/421] refactor environment inheritence tree to accommodate flexible action types/counts. fix bug in train profit handling --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 234 +-------------- freqtrade/freqai/RL/Base5ActionRLEnv.py | 187 +----------- freqtrade/freqai/RL/BaseEnvironment.py | 270 ++++++++++++++++++ .../RL/BaseReinforcementLearningModel.py | 35 +-- .../RL/ReinforcementLearnerCustomAgent.py | 23 +- freqtrade/freqai/freqai_interface.py | 2 +- .../prediction_models/ReinforcementLearner.py | 17 +- .../ReinforcementLearner_multiproc.py | 11 +- 8 files changed, 339 insertions(+), 440 deletions(-) create mode 100644 freqtrade/freqai/RL/BaseEnvironment.py diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 478507639..ef5b1c107 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -1,14 +1,11 @@ import logging from enum import Enum -from typing import Optional -import gym -import numpy as np from gym import spaces -from gym.utils import seeding -from pandas import DataFrame -import pandas as pd -from abc import abstractmethod + +from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions + + logger = logging.getLogger(__name__) @@ -19,95 +16,13 @@ class Actions(Enum): Short_enter = 3 - -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 - - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long - - -def mean_over_std(x): - std = np.std(x, ddof=1) - mean = np.mean(x) - return mean / std if std > 0 else 0 - - -class Base4ActionRLEnv(gym.Env): +class Base4ActionRLEnv(BaseEnvironment): """ - Base class for a 5 action environment + Base class for a 4 action environment """ - metadata = {'render.modes': ['human']} - def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), - reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1, config: dict = {}): - - self.rl_config = config['freqai']['rl_config'] - self.id = id - self.seed(seed) - self.reset_env(df, prices, window_size, reward_kwargs, starting_point) - - def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, - reward_kwargs: dict, starting_point=True): - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] - - self.fee = 0.0015 - - # # spaces - self.shape = (window_size, self.signal_features.shape[1] + 3) + def set_action_space(self): self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick: int = self.window_size - self._end_tick: int = len(self.prices) - 1 - self._done: bool = False - self._current_tick: int = self._start_tick - self._last_trade_tick: Optional[int] = None - self._position = Positions.Neutral - self._position_history: list = [None] - self.total_reward: float = 0 - self._total_profit: float = 1 - self.history: dict = {} - self.trade_history: list = [] - - def seed(self, seed: int = 1): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - def reset(self): - - self._done = False - - if self.starting_point is True: - self._position_history = (self._start_tick * [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - - return self._get_observation() def step(self, action: int): self._done = False @@ -181,43 +96,6 @@ class Base4ActionRLEnv(gym.Env): return observation, step_reward, self._done, info - def _get_observation(self): - features_window = self.signal_features[( - self._current_tick - self.window_size):self._current_tick] - features_and_state = DataFrame(np.zeros((len(features_window), 3)), - columns=['current_profit_pct', 'position', 'trade_duration'], - index=features_window.index) - - features_and_state['current_profit_pct'] = self.get_unrealized_profit() - features_and_state['position'] = self._position.value - features_and_state['trade_duration'] = self.get_trade_duration() - features_and_state = pd.concat([features_window, features_and_state], axis=1) - return features_and_state - - def get_trade_duration(self): - if self._last_trade_tick is None: - return 0 - else: - return self._current_tick - self._last_trade_tick - - def get_unrealized_profit(self): - - if self._last_trade_tick is None: - return 0. - - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price) / last_trade_price - elif self._position == Positions.Long: - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price) / last_trade_price - else: - return 0. - def is_tradesignal(self, action: int): # trade signal """ @@ -228,7 +106,7 @@ class Base4ActionRLEnv(gym.Env): (action == Actions.Neutral.value and self._position == Positions.Short) or (action == Actions.Neutral.value and self._position == Positions.Long) or (action == Actions.Short_enter.value and self._position == Positions.Short) or - (action == Actions.Short_enter.value and self._position == Positions.Long) or + (action == Actions.Short_enter.value and self._position == Positions.Long) or (action == Actions.Exit.value and self._position == Positions.Neutral) or (action == Actions.Long_enter.value and self._position == Positions.Long) or (action == Actions.Long_enter.value and self._position == Positions.Short)) @@ -240,7 +118,7 @@ class Base4ActionRLEnv(gym.Env): e.g.: agent wants a Actions.Long_exit while it is in a Positions.short """ # Agent should only try to exit if it is in position - if action in (Actions.Exit.value): + if action == Actions.Exit.value: if self._position not in (Positions.Short, Positions.Long): return False @@ -250,97 +128,3 @@ class Base4ActionRLEnv(gym.Env): return False return True - - def _is_trade(self, action: Actions): - return ((action == Actions.Long_enter.value and self._position == Positions.Neutral) or - (action == Actions.Short_enter.value and self._position == Positions.Neutral)) - - def is_hold(self, action): - return ((action == Actions.Short_enter.value and self._position == Positions.Short) or - (action == Actions.Long_enter.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) or - (action == Actions.Neutral.value and self._position == Positions.Neutral)) - - def add_entry_fee(self, price): - return price * (1 + self.fee) - - def add_exit_fee(self, price): - return price / (1 + self.fee) - - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} - - for key, value in info.items(): - self.history[key].append(value) - - def get_sharpe_ratio(self): - return mean_over_std(self.get_portfolio_log_returns()) - - @abstractmethod - def calculate_reward(self, action): - """ - Reward is created by BaseReinforcementLearningModel and can - be inherited/edited by the user made ReinforcementLearner file. - """ - - return 0. - - def _update_profit(self, action): - if self._is_trade(action) or self._done: - pnl = self.get_unrealized_profit() - - if self._position in (Positions.Long, Positions.Short): - self._total_profit *= (1 + pnl) - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - def most_recent_return(self, action: int): - """ - Calculate the tick to tick return if in a trade. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_entry_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_exit_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - - def current_price(self) -> float: - return self.prices.iloc[self._current_tick].open - - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick - 1].open - - def sharpe_ratio(self): - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index b93d6e6ff..e0a38f9d1 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -1,14 +1,14 @@ import logging from enum import Enum -from typing import Optional -import gym import numpy as np -from gym import spaces -from gym.utils import seeding -from pandas import DataFrame import pandas as pd -from abc import abstractmethod +from gym import spaces +from pandas import DataFrame + +from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions + + logger = logging.getLogger(__name__) @@ -20,70 +20,19 @@ class Actions(Enum): Short_exit = 4 -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 - - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long - - def mean_over_std(x): std = np.std(x, ddof=1) mean = np.mean(x) return mean / std if std > 0 else 0 -class Base5ActionRLEnv(gym.Env): +class Base5ActionRLEnv(BaseEnvironment): """ Base class for a 5 action environment """ - metadata = {'render.modes': ['human']} - def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), - reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1, config: dict = {}): - - self.rl_config = config['freqai']['rl_config'] - self.id = id - self.seed(seed) - self.reset_env(df, prices, window_size, reward_kwargs, starting_point) - - def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, - reward_kwargs: dict, starting_point=True): - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] - - self.fee = 0.0015 - - # # spaces - self.shape = (window_size, self.signal_features.shape[1] + 3) + def set_action_space(self): self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick: int = self.window_size - self._end_tick: int = len(self.prices) - 1 - self._done: bool = False - self._current_tick: int = self._start_tick - self._last_trade_tick: Optional[int] = None - self._position = Positions.Neutral - self._position_history: list = [None] - self.total_reward: float = 0 - self._total_profit: float = 1 - self.history: dict = {} - self.trade_history: list = [] - - def seed(self, seed: int = 1): - self.np_random, seed = seeding.np_random(seed) - return [seed] def reset(self): @@ -106,6 +55,7 @@ class Base5ActionRLEnv(gym.Env): self._profits = [(self._start_tick, 1)] self.close_trade_profit = [] + self._total_unrealized_profit = 1 return self._get_observation() @@ -118,7 +68,7 @@ class Base5ActionRLEnv(gym.Env): self.update_portfolio_log_returns(action) - self._update_profit(action) + self._update_unrealized_total_profit() step_reward = self.calculate_reward(action) self.total_reward += step_reward @@ -148,10 +98,12 @@ class Base5ActionRLEnv(gym.Env): trade_type = "short" self._last_trade_tick = self._current_tick elif action == Actions.Long_exit.value: + self._update_total_profit() self._position = Positions.Neutral trade_type = "neutral" self._last_trade_tick = None elif action == Actions.Short_exit.value: + self._update_total_profit() self._position = Positions.Neutral trade_type = "neutral" self._last_trade_tick = None @@ -163,7 +115,8 @@ class Base5ActionRLEnv(gym.Env): {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - if self._total_profit < 1 - self.rl_config.get('max_training_drawdown_pct', 0.8): + if (self._total_profit < self.max_drawdown or + self._total_unrealized_profit < self.max_drawdown): self._done = True self._position_history.append(self._position) @@ -200,24 +153,6 @@ class Base5ActionRLEnv(gym.Env): else: return self._current_tick - self._last_trade_tick - def get_unrealized_profit(self): - - if self._last_trade_tick is None: - return 0. - - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price) / last_trade_price - elif self._position == Positions.Long: - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price) / last_trade_price - else: - return 0. - def is_tradesignal(self, action: int): # trade signal """ @@ -253,97 +188,3 @@ class Base5ActionRLEnv(gym.Env): return False return True - - def _is_trade(self, action: Actions): - return ((action == Actions.Long_enter.value and self._position == Positions.Neutral) or - (action == Actions.Short_enter.value and self._position == Positions.Neutral)) - - def is_hold(self, action): - return ((action == Actions.Short_enter.value and self._position == Positions.Short) or - (action == Actions.Long_enter.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) or - (action == Actions.Neutral.value and self._position == Positions.Neutral)) - - def add_entry_fee(self, price): - return price * (1 + self.fee) - - def add_exit_fee(self, price): - return price / (1 + self.fee) - - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} - - for key, value in info.items(): - self.history[key].append(value) - - def get_sharpe_ratio(self): - return mean_over_std(self.get_portfolio_log_returns()) - - @abstractmethod - def calculate_reward(self, action): - """ - Reward is created by BaseReinforcementLearningModel and can - be inherited/edited by the user made ReinforcementLearner file. - """ - - return 0. - - def _update_profit(self, action): - if self._is_trade(action) or self._done: - pnl = self.get_unrealized_profit() - - if self._position in (Positions.Long, Positions.Short): - self._total_profit *= (1 + pnl) - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - def most_recent_return(self, action: int): - """ - Calculate the tick to tick return if in a trade. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_entry_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_exit_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - - def current_price(self) -> float: - return self.prices.iloc[self._current_tick].open - - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick - 1].open - - def sharpe_ratio(self): - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py new file mode 100644 index 000000000..bba3c4a1b --- /dev/null +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -0,0 +1,270 @@ +import logging +from abc import abstractmethod +from enum import Enum +from typing import Optional + +import gym +import numpy as np +import pandas as pd +from gym import spaces +from gym.utils import seeding +from pandas import DataFrame + + +logger = logging.getLogger(__name__) + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + + +class BaseEnvironment(gym.Env): + """ + Base class for environments. This class is agnostic to action count. + Inherited classes customize this to include varying action counts/types, + See RL/Base5ActionRLEnv.py and RL/Base4ActionRLEnv.py + """ + + def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), + reward_kwargs: dict = {}, window_size=10, starting_point=True, + id: str = 'baseenv-1', seed: int = 1, config: dict = {}): + + self.rl_config = config['freqai']['rl_config'] + self.id = id + self.seed(seed) + self.reset_env(df, prices, window_size, reward_kwargs, starting_point) + self.max_drawdown = 1 - self.rl_config.get('max_training_drawdown_pct', 0.8) + self.compound_trades = config['stake_amount'] == 'unlimited' + + def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, + reward_kwargs: dict, starting_point=True): + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee = 0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1] + 3) + self.set_action_space() + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick: int = self.window_size + self._end_tick: int = len(self.prices) - 1 + self._done: bool = False + self._current_tick: int = self._start_tick + self._last_trade_tick: Optional[int] = None + self._position = Positions.Neutral + self._position_history: list = [None] + self.total_reward: float = 0 + self._total_profit: float = 1 + self._total_unrealized_profit: float = 1 + self.history: dict = {} + self.trade_history: list = [] + + @abstractmethod + def set_action_space(self): + """ + Unique to the environment action count. Must be inherited. + """ + + def seed(self, seed: int = 1): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + + self._done = False + + if self.starting_point is True: + self._position_history = (self._start_tick * [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self._total_unrealized_profit = 1 + + return self._get_observation() + + @abstractmethod + def step(self, action: int): + """ + Step depeneds on action types, this must be inherited. + """ + return + + def _get_observation(self): + """ + This may or may not be independent of action types, user can inherit + this in their custom "MyRLEnv" + """ + features_window = self.signal_features[( + self._current_tick - self.window_size):self._current_tick] + features_and_state = DataFrame(np.zeros((len(features_window), 3)), + columns=['current_profit_pct', 'position', 'trade_duration'], + index=features_window.index) + + features_and_state['current_profit_pct'] = self.get_unrealized_profit() + features_and_state['position'] = self._position.value + features_and_state['trade_duration'] = self.get_trade_duration() + features_and_state = pd.concat([features_window, features_and_state], axis=1) + return features_and_state + + def get_trade_duration(self): + if self._last_trade_tick is None: + return 0 + else: + return self._current_tick - self._last_trade_tick + + def get_unrealized_profit(self): + + if self._last_trade_tick is None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price) / last_trade_price + else: + return 0. + + @abstractmethod + def is_tradesignal(self, action: int): + # trade signal + """ + Determine if the signal is a trade signal. This is + unique to the actions in the environment, and therefore must be + inherited. + """ + return + + def _is_valid(self, action: int): + # trade signal + """ + Determine if the signal is valid.This is + unique to the actions in the environment, and therefore must be + inherited. + """ + return + + def add_entry_fee(self, price): + return price * (1 + self.fee) + + def add_exit_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + @abstractmethod + def calculate_reward(self, action): + """ + Reward is created by BaseReinforcementLearningModel and can + be inherited/edited by the user made ReinforcementLearner file. + """ + + return 0. + + def _update_unrealized_total_profit(self): + """ + Update the unrealized total profit incase of episode end. + """ + if self._position in (Positions.Long, Positions.Short): + pnl = self.get_unrealized_profit() + if self.compound_trades: + # assumes unit stake and compounding + unrl_profit = self._total_profit * (1 + pnl) + else: + # assumes unit stake and no compounding + unrl_profit = self._total_profit + pnl + self._total_unrealized_profit = unrl_profit + + def _update_total_profit(self): + pnl = self.get_unrealized_profit() + if self.compound_trades: + # assumes unite stake and compounding + self._total_profit = self._total_profit * (1 + pnl) + else: + # assumes unit stake and no compounding + self._total_profit += pnl + + def most_recent_return(self, action: int): + """ + Calculate the tick to tick return if in a trade. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_entry_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_exit_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick - 1].open + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 5a7ae4372..77db9c655 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -1,25 +1,28 @@ import logging -from typing import Any, Dict, Tuple +from abc import abstractmethod +from datetime import datetime, timezone +from pathlib import Path +from typing import Any, Callable, Dict, Tuple +import gym import numpy as np import numpy.typing as npt import pandas as pd +import torch as th +import torch.multiprocessing from pandas import DataFrame -from abc import abstractmethod +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.monitor import Monitor +from stable_baselines3.common.utils import set_random_seed + from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel -from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions +from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv +from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions from freqtrade.persistence import Trade -import torch.multiprocessing -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.monitor import Monitor -import torch as th -from typing import Callable -from datetime import datetime, timezone -from stable_baselines3.common.utils import set_random_seed -import gym -from pathlib import Path + + logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') @@ -37,8 +40,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): super().__init__(config=kwargs['config']) th.set_num_threads(self.freqai_info['rl_config'].get('thread_count', 4)) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] - self.train_env: Base5ActionRLEnv = None - self.eval_env: Base5ActionRLEnv = None + self.train_env: BaseEnvironment = None + self.eval_env: BaseEnvironment = None self.eval_callback: EvalCallback = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] @@ -194,7 +197,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def _predict(window): market_side, current_profit, trade_duration = self.get_state_info(dk.pair) observations = dataframe.iloc[window.index] - observations['current_profit'] = current_profit + observations['current_profit_pct'] = current_profit observations['position'] = market_side observations['trade_duration'] = trade_duration res, _ = model.predict(observations, deterministic=True) @@ -306,7 +309,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): return -def make_env(MyRLEnv: Base5ActionRLEnv, env_id: str, rank: int, +def make_env(MyRLEnv: BaseEnvironment, env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, reward_params: Dict[str, int], window_size: int, monitor: bool = False, config: Dict[str, Any] = {}) -> Callable: diff --git a/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py index fcd813ce6..4ad95c214 100644 --- a/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py +++ b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py @@ -1,19 +1,20 @@ import logging -import torch as th +from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Type, Union -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel + +import gym +import torch as th from stable_baselines3 import DQN from stable_baselines3.common.buffers import ReplayBuffer -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from pathlib import Path -from stable_baselines3.dqn.policies import (CnnPolicy, DQNPolicy, MlpPolicy, - QNetwork) -from torch import nn -import gym -from stable_baselines3.common.torch_layers import (BaseFeaturesExtractor, - FlattenExtractor) -from stable_baselines3.common.type_aliases import GymEnv, Schedule from stable_baselines3.common.policies import BasePolicy +from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor +from stable_baselines3.common.type_aliases import GymEnv, Schedule +from stable_baselines3.dqn.policies import CnnPolicy, DQNPolicy, MlpPolicy, QNetwork +from torch import nn + +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel + logger = logging.getLogger(__name__) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 21b79e003..b3367f9de 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -7,7 +7,7 @@ import time from abc import ABC, abstractmethod from pathlib import Path from threading import Lock -from typing import Any, Dict, Tuple, Optional +from typing import Any, Dict, Optional, Tuple import numpy as np import pandas as pd diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index a72a56e20..0e156d28e 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -1,15 +1,14 @@ import logging +from pathlib import Path from typing import Any, Dict -import torch as th -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel -from pathlib import Path -# from pandas import DataFrame -# from stable_baselines3.common.callbacks import EvalCallback -# from stable_baselines3.common.monitor import Monitor import numpy as np +import torch as th + +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel + logger = logging.getLogger(__name__) @@ -53,7 +52,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return model - class MyRLEnv(BaseReinforcementLearningModel.MyRLEnv): + class MyRLEnv(Base5ActionRLEnv): """ User can override any function in BaseRLEnv and gym.Env. Here the user sets a custom reward based on profit and trade duration. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index f301da981..9f6a66729 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -1,15 +1,16 @@ import logging +from pathlib import Path from typing import Any, Dict # , Tuple # import numpy.typing as npt import torch as th from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv + +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.BaseReinforcementLearningModel import (BaseReinforcementLearningModel, make_env) -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from pathlib import Path logger = logging.getLogger(__name__) @@ -26,7 +27,7 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256]) + net_arch=[256, 256, 128]) if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, @@ -64,9 +65,9 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): test_df = data_dictionary["test_features"] env_id = "train_env" - num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2) + num_cpu = int(self.freqai_info["rl_config"]["thread_count"]) self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH, + self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i in range(num_cpu)]) From af8f308584a270c4e35d2ad6d768099459975cb1 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 28 Aug 2022 20:52:03 +0200 Subject: [PATCH 066/421] start the reinforcement learning doc --- docs/assets/tensorboard.png | Bin 0 -> 9273 bytes docs/freqai.md | 101 +++++++++++++++++++++++++++++++++++- 2 files changed, 99 insertions(+), 2 deletions(-) create mode 100644 docs/assets/tensorboard.png diff --git a/docs/assets/tensorboard.png b/docs/assets/tensorboard.png new file mode 100644 index 0000000000000000000000000000000000000000..b986900435b28c89e9d9e8d1bdb5413d4411f913 GIT binary patch literal 9273 zcmWk!1yodB5GGbiN?hsg24U&$Zcyp&j-^3V8tD?GQ$QB!knUcPSQ;c;Iz+nu_n-6P zoO|Bey?5r$%=dlsVzf1t@Nmd+P*70tRFvg)fqMXO+F)S-*P^<7T;PW7p={`lf`Z@o z|A{)}{>=s$B=b>t=cDIt=i_JTWsBnH=f~~n=Im{4>0!(5?q&b=SezUMg%(9cUPj+P z=P1`O$rzEf{vHW?N8tczTXb>L&|Q(&fhdfUt7I@4TC-h#win-tVX07MhmG1)w5zl0 zWvP_@O)6*PsnBO92v9NLOKnz?QzWVf_TvU5635(y1u*c_QJ0&&8n!33N6_wH)!`jIpk40vwn`sgL=kMlEO zpmIgw5DXhES((KPTu2#RN5tu%*taeC$I6$Qxr%RC=I=2PXvymlbr+=9f^`NmjF>W7 zAMA)UbL1z@jRUnTd1#Y&#=3jezer(Wu~L89)8qBW@(Qv%errKYCx9~tw>U-z5n)G< z6E|3|_dLx#DGDy3%SrsjWbKPO4ZWp7oyxy?nRdpr^>D3)WiKg1CK>&ADsK3eu_vnw zMfyciajLB{uP2f1!Sp3wL`Oju;dOW_4gD_#`O4_JBhc^uN3j!XVQxJVc&DY`cbv1nE|Q_J?N_g#wmW#d?6a?9b%N; zar)fbSVWvWwIlrM9!BJuoNulw{w4KoIKstm-f23g?`PfZvOA(j`Yqd!~Yt$3Ozum+arIyr5rjh9zSZEfw{ zd?Z{fydX&P@!yP=3M0{%vKV9lX=KmF2CXcXzCo!zJ6*Vig#`*7Xu>X4pIw0v)L_K* zYY&2wVQaZ8m|d1Uw!^ED8v1E()o)z^0UaD16b`wC#1UoD^iT?vflyJ zohQYPCjT!a?BVW;9|CFEKGPCE1M&0oo3wgG0ApLi z_?;)I%8W_6zgz?>-c;cd5CHQnbJVTSCzMTsBRx%psBJQAUkeK>lSV2p=6wJwSXx+w zy{U?@B)#rT+!AJum1Rzi5fi;V$$uNISo{J$60eAkfRd4tN*m1Ky>GEKynbBb5+(Q{ zE*Xk^a8s2G_{&&dU*CS(`@Cev@zjEAc18 zjDh)TW9z0iZ=|N%T6;_pt3LZHd@9lY{(jJ&Pxb+#2Czf;mT>#OUux3EySfG!c^;q5 z?LjhA6R@!zzUnt|fR`W|3|MFo*M~!H@V6o zE(Eh`tTEBYe0L9Oo^tr@rG7_A-JZ2co*=NBR(lC#VJ59a&r@Ht1$sI;#ah)ympRTl zHNF$YI29*g+lka(>-mJK{YpkM-}tKSy=j4?Xe=S%#h+Cs|0=a%2Y&}C-22aMJIw4) z);iQRHH!{hYqWt~QBj_^4`gTLWUjTp|Cg^q?sXw|zU1-1ZX1ZiAM)hIEv}&z5*2NO zy}w4h;P709@uQ=o=Lma#bZW$DC|wWin0(IDPH6s8@J)T@NN;S+J0cz#it_n)!Y>a2y5mTFj!eRJ~5FzVfWK*AJKy{lt`19 zcy+k=k_HZk`%xj!<2p4pX68fI-iNud7L}(qyDkqlxh}skFtkTjKil!jk}dpk7_i)A zeV|{G=?*EXux~|O3ox}=T2)^F+xVPqDoo}}B_adBaSBe2Lj`0*9h+2}?Sr6ftm6zALQWzMJ(a<34;gWq24X}dD z3X0 zCYyg5ulQ+XL}hSj$kQ~_sd0387;Dby)mYsy^4uXi($)|pBTb+D&a<-Be7DZuwB0|M z7R$1TF37gedy@+`t$@_qPJZxq_F&;R;wB>tCQBrzN`BG z%=Vi6@$m3;IEhDgyh?Q06crUosi;b|SuG6bE6JOvfdn)j`0;9#n`WL7IAjP5D%lf` zHVQ1F&@i82Mr3al5A${^8W_mJpOr_vtszo z!!VZmY}j!_kn^((zEn8ME4gdo*n~2yoDqt&wfomz7D>tLkC;_Wi)sb4rweY`phCuU zRjVNG4}>KKBkPlq;%NjTQjVV9og;4vaL*fi7hz3m&)jZ#J?I zTf9DQMo&JMBuWzJk;eMqxF(q=>h)HUU9H@@j~92ap6-K#63oBI)hS)~7w$%1K>ktW zWCH3-F7WnC(Kh;mKC(^KS8}Wnm{~uMR;vH?Yp?a2;%gC(o0dM_&k=pQ!nd=O`6bL) zeMylA{InoT!VQ(4pmO;6a7?j{d+6Zgk>MXN@__n>wY=rrtvmbj07rLN)$tYVZGdMK zS!Q))N+zkCm3n&sva$UyyjJU;V&&6&lm;=nWaZNyOd5}kZd4$o;D?8tLkS@-=sM^j&TbW(fhLp@CiR-vtqYL#xoTj=btgAX*ixa8M6qo~FK*`Eb}oD+fa_@+ zN;E=`UHUaUky1pKG*bNjv=61_c%MNUw{N#-+i8Xc{Cqzy%fieFM8ExfK*2hqGR()+ z2b68;qvN%6K~uleTC=76s?VX>7=iau`?A!AjcLv)-9<1ijgJ~PcRhioC_oTasdE8R zt&-tRwm>LcY|uebvsp9X#;oOy(8BHPHtF zoem~}LuF>L4vE*$nBJ}h#?QJ3KSMyzN@6)?FFk3o(FqW*)L2DQNBT1w13NSe_Jes( zuQ%rjp_~fLse^fbn+I>?Mt-Ppnsp4@)&Flu*4U3}DT>9@3L|B7Y*?=}goF>W z#XD9}432bp^h>$0$l9jug4z;tIu!nNGlbir$$^IPRWBoAD`J-?N19xVTevY%c>Yb5 zCXfU!G>zS4Y|PGb;WJAlxYJnd&l)o(N_|2Gj4yLr)_+hgJB69b{)zjd88o#t)7-N2 z%>PZg3-hzQIG!F)mldN|3=Z&tJ)b}W)TpJ7d6|>`Yx?kLF;1L`qY|6FphMn`w@egF zryfVdyK!#g3aXccl^E5;ohQFdZo%iZJilAHWkLR&x^;1}GIyrm8BJM;V&0iWgBmlo zv^`8Ky)}#~a5V;Y_Wwa9d+eu2Hhz24=Xa}TvyMRK_`4YYnc@Upp-y>_mng+8%ryyU zOyij>502woIJ3u?7$+FwF+E?PB(7O5^wZR20#(xAxJ)9iVpV($Wd2=O8a%@aCd%TJ zsrul3qleLT&DmMh#MTm(Qd}3BCjR=hOsk03nXk?s8#tiXnUYk;Rl;HlB48* zws~zTs-~|(bSuDl$ijob!waCq4S6b;&+N8!7IOD`wT@4G6;feNEt%O9R+cQ=?k?Yn z5SKs+CEOInmz~bgtt*=Ge~9|BbytflQ(<=!C%G%sD){zAHf}6@S8GJf@7M-F65UqY zjvGHW+DH|NC$O)5mNs|inqMsEDS2n(=3MK(r3&(5H$YhT^YUI%F}HXMP`8BAf3PGC zA1v}9bvoLbg3Lo$I8?P*;1d%%+?gt+x>$6ZC?9}Qw!6y;;AaXUHvsDbVD!mU>QqyQ z72M|1onZ^r@lu@r;gXqFX;I0ZjjJ8VYN+=N?E_!kE9Cu<%@yS#7QMa*v_oYM`?{l# z!E@0oZ3P>CzJ%_!&wP0Z9<62ZG>!7EdHU@BxaF>QyT^gsbsF#g0<8hlR7OBp4?0}F zRHYJJ{))gz3fGg00s*zIydX}HaCc3^mB?*(M!=xWS~ZxW$Zc)a640{eYp@JQxcn}i zen(tN$BhVielFe9cwBTZ)-UybsQd$lnP%2 zYfNdqH^(4r_s%fwk-Q7yn z7VT#5b8-mR=ie_%++jLx$F=?)e@8h?+Gg!b#ecZ3OLjh9)YNR!IV&M^IA6t|&4@t=dCk2+qzrj!#={;`RJ}Rr%vdYCPNt2c_G=l3J4a z=DhGg$k;1MN=a3Z`k4Ko;G_Ode?q&+W(!h9N@YSB9UpbaB+aJ9#;gghC887+h%`Ay zPvdi3$w%Dzp>nIP3nNxT*0Fc(C28+66a>GPF1lQHh!FJ`O`26lmC*2I;YQ(8#lH8O z9Fh6Po1x}uA*?W;7Hq2&%aL_d!eeG#6H4ukG=mrntppJg3rlZ&8wdCEejjQJ} zeVXrMa6Lfqd3!aiU@m=<6cQah=d9E|mJLG>V`kLl4lQ_)RJgM^YujWkJfS00R*=Dv z75+>Q1zm;9W7x=~8B%ATtCsvN%p7d_@eT#Bd!t+WQR;H#c*@ZWTvI>2rA&15E z$I@CDZ9e{VprSs)nW`9;1uWE(1pM;%lMm{{$Tor~mI14-y3;ErF zsF0mXLklT1nbx>(RcUZZSgL3y6@LWFV3w+~VSgtVdPKgrJL!6=2{~wg`7O3dWFq?o zzz!@Ii1pPqtledh8yPy3iYh_WZI7&Z1`!dhYFHFuzZ-hLaG#1#K~YOcDF1E*;8@lx z2K3;gxHhu>xL6P*jHK)9>+=)~UN4_>8hGvSKnZ?f)%Z>6^rK*uhbajiDDULOY$a=g z9G3d~6ZlB`=}*%uv5bLXsk@mL7btt$;rd91Ex@srnU@v+UEnl#7;k&Ewn${DmyOj> z!lRzAA88VYz)@?feyq2y)tRYMQ&N8Z#7u9k9IpA$QUCqzDIYF6l}Z7b1Db4K|BQe> z+|uHS!9gkE<(G;UYW_@?7SEXeGy8c)|7W37U8#B%`rxoL&53<{9=j{VIsneeS zRu@M)kOdkFmZX;w2;-(-Ih2dPGJ`aIR#ZlEUHhR}5#2)S4Xc=ECGusuSPC1MmfyMe z?h*<{3#yLibm|IDr~ttNj${Mlfa3=4mU^ZxRmmbh*czsd+2Z!m=DO8y0D?mOVn1X; zxZTgUDHkl5tJ#b^ZxvxrR}IcjOEIceY&41=b6Gy#v={^UxdkaklPfUOO>ve;OYQ16 zTlVF;u9e^CoFL?QDC+zlvXp5*+!#?QO)KN!H9y6L-#De))i1uGii*8f6a)Laxw+w0 zY1lkzu6@@wfMRpJlJ$xAoq_!H`XK$YUd%g2K3mq_G+E+NbH-R6y(=|B5!EjjD!>00 z*-oX11AS&l|0Aox`5&#v3`>~+oKGSqD2yo}?CMZk- zAM&g$dm7d**VyyY2@8?vdNM}E;b;)R4CK(TwcOX+^V;(t<_L5|UtI;A8R8pYret?N z?;-d-YttRYe>5$&oI<%;7}9yD0x+r*L4*nEr9~e+@tQY zzgJVF>_yr@ifKXO&3X!YORj28ADdyAxaB7y#?A>Q$_niMKJu1+X1rerg;DRFM5Pxw zYuw3(e?*z)i$Y`7nE^<}NECwyW$DeWDew=Q+j8%^>vMaxzff!KX;nX``H$eo&D7jT zcA4ugTAi7Ho}c-=$b+cNUZd%96BPwG!`Oq$FoAPS7OQq&0YRWEzDfbIOdN3b_03Q% zm1R$N8d7m2my?rQX%FD{1%LkhS(`nbl$zSk&MuM{myCiU?%ZO(ZBmY4l{pbgAP$dv zm;)ZJ)w)AHrp!YQ#MlL4m#du`u_0_5Hr=YD4>xkt?*Dvs|K-%U8L_IXY8HGpARu*s zzy?r&JiSbXK7BrXXfV6$W{hS<%6b^xH^lu^=>5$nH{a@U6D3Hcg%Fk>wfs1oFyY9y z(+Ch)zNA#mvEKye(PBA3$XR2Fo@)bS*{XJUTs;UAtT6)^0rMhI;*wp#KWWo(Yr4ah zFl?1};Q?zuoj=-$EQ?GY;-s#j0myI}w#mwj!!~^5I2=CX00D%& z{}GBZ<7r5~T|Iza46Tl0iQ=S`&l+mA4H|!@_SH(THgmuH6Fb~%mC^nQ)BQxtY=Zt` z!rJvNxVS2nXHNEUm4aLAMD;Y+S~7D?aA=TESYq*Ckt=r57VFPE!jG-3B3`YO7lBl! z2!MMFC)8W@re$T3{rU5!CGdiMYisKat(g`}*kc35z3V~HxXHy5z+@(!!DMVqohST~ zlAVQY6jHal44h+D4i2oEa(j1@z(X3=Au6P>xI5qs4lAJx9z{4%TdH z6}hb=XMsElc&$6@NW%`F-vvDooizvmD*}2M<&b4yV0K|49U%1O==~q!y?O{JdHkR& z`V&wDJ5Rh17hd=BGJc&%(JTkVkTRocuiYs+q8QLcVD8!3neRK7`c}p+WFW&Rk_voX~w1~6<8<9 zSc=`lRA{nimF$l<520&8o$lALpf#A-0KDs|*v4JyeqI5DHLi6%I zM!sraa)+Y2-MfUb-lQq0hnvY@YiPv%d6x!h3}bI#O{7Ebje0#OCu>-<2Q@RQ|=Z+Wv!e zM?lRRW5wbG-k0I&hT#WEA%9LTLSoWYpDj0SwN&qt!_RO3obO*9rcW9sBs%W4`xf>$ zeO9C^KCM4ECyku!Z=dM8-V_8Yd|%3%nH#b74m&x1!VpXOSfkpKy*4TtJp)lV4YbN| zT`mJ^s@eDGN`^PVut9gQ9SrwBvIo0lgVaYsABZ)8bKGltJPwAvU z4{6SJYuoCb+x68g42Bc>Z;x_aT%0oMSz;N^m&l$(c&7CC&=}l*K7Sh+vi|AYbtHAL zjdV6x3?`cdKT&ax#WthH>Qb$?2c!Zxkvp^RDg;pFS<_VkJr9ual9iLxnE_$2%TpAxi-$L$qEdX(ox6({bHrJ-%w5Sd4VKxxA_%Q%2o@hJ^R;$8%Ue2#_4-DRE3Q72o!??_mG+ zfJyvU5Dla|H{$wEshJzQKe1EnSsQfZu^8ekUfxP5>qz{o+r`@8;!=S<-Dv)aGLkNn z+f-Hu?0d4NLE7YPt(dPVZ#ylJYs-h*YI1cdJJQ@#Nvaq$Cr=1EYE82}hec_4g|@_3}=4MN+0-uzTQM7*wHhsiDyzP|ery%#^t9NTVdhzc zLPcHK?=su&rV@Xmzo%%rT@?*c9=T*s6w@W6vK*jK?b? zltaIP(&NYFZ{SLxhbiIVXP%LK0taw`nKvNV_foK#4oY{NIJmOa?Fzm`${b}$}t0$+T{e%shWLmO~Cg1v*y+i!NUO zcBjSc5JVp6{x#dDJIMpv+!{OuIyL9#GnoNvIV@+}Z`+PLD10R(B&aY+vdb0`-)eJu z`ku>@e}J!>eLt;MQd*8fm|(7Z|A6NYsxhiaoteC)&)fOTQPmjlp8W&Yz(52r@co_U zBD7#%xNv*8K>m3!TDi(mui+?cW3v>y^YRe8|J1tKRCp}_mT5|Y6R{w*LWUP0=6h7c zx0+(|9da{YT!eX-Ftz>Z@D^A6tnj6kTo5+RBwSLnn z(}=2mk(`Cna0cc8DH;+QMThT81I#iq^0rV~wBQb&ZtTT-))I=7J|qr;oUZ2PK;WPFLiby?SDM)p)zUHV@`Jd=NKr1o4x}hsz?{kiVvFfJW|5So;Tui^MbYbf;~E{AzH_vO?k%NJ z?!3U;05kSoWitAmU%93hwcq)v1kOxkteJ=l}9ZE zSuB2*%?%7_-7DC~Fx(pe{;Tw0r;hv~x7_C4xFvSKXkiiSJ;8&HBHlu_wh9@}lTT1& zxMRNyK}KEcWS!y?NA`Y4#&v7$5~GWksJN__4MdxDG9+AMy*Va+q+bE7Mlx> zxP3Ft)BTC9SC*U=KbB`bONcoQYH1h#lQp|O}c%e0m&`?QkG4DUP<(j#%GfZghXM+^&cgH_HS%F4h zj+kFU^Lpr4-HO?PU+xGPAao)Rb-@vK^&@*T%U-$fv2D7tcPsD&WJMc43 zC#v<@my=>Ar}M_{16LXv8fS6l_w?LxKa8B#AZ&Tm7S^Yi1feYf3l~|7NuN=R9V4_= zAIHSF4dqVV^Lel1v?TZx#X>gJf{tBNrLrB7gkqAFf8WFXO}>Q(BcmsM&yJf-}nibcQ4K@Yc?Cx_d5Wi)gM>tf0 zzCP3WDd^2zllnErv?hGwmPp9?XP~F~;zi)G@fph4R;P|v10%$U_X_AJ?_6nhD)|@4 zx{4nMu_*ap%)PZ%`^AtbUT5{@Bj!6YfTUQvxP0fBvefhWJ6)`_r6GM{yEFu62yC`& z-`cp1m)+J~0s5;ce=*-ltspcL+BFD%B=1gaK@-oUPnY~zyPq(v;UMQym4iFrvmX={ M1x **Datatype:** Positive float < 1. | `shuffle` | Shuffle the training data points during training. Typically, for time-series forecasting, this is set to `False`.
| | **Model training parameters** -| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, this dictionary can contain any parameter from that model.
**Datatype:** Dictionary.**Datatype:** Boolean. +| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, such as `PPO` from stable_baselines3, this dictionary can contain any parameter from that model.
**Datatype:** Dictionary | `n_estimators` | The number of boosted trees to fit in regression.
**Datatype:** Integer. | `learning_rate` | Boosting learning rate during regression.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. +| | *Reinforcement Learning Parameters** +| `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. +| `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. +| `thread_count` | Number of threads to dedicate to the Reinforcement Learning training process.
**Datatype:** int. +| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. +| `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. +| `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. +| `continual_learning` | Number of threads to dedicate to the Reinforcement Learning training process.
**Datatype:** int. +| `thread_count` | If true, the agent will start new trainings from the model selected during the previous training. If false, a new agent is trained from scratch for each training.
**Datatype:** Bool. +| `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | | **Extraneous parameters** | `keras` | If your model makes use of keras (typical of Tensorflow based prediction models), activate this flag so that the model save/loading follows keras standards. Default value `false`
**Datatype:** boolean. | `conv_width` | The width of a convolutional neural network input tensor or the `ReinforcementLearningModel` `window_size`. This replaces the need for `shift` by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. Default value, 2
**Datatype:** integer. @@ -731,6 +741,93 @@ Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](#https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html)) with `min_samples` ($N$) taken as double the no. of user-defined features, and `eps` ($\varepsilon$) taken as the longest distance in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set. +## Reinforcement Learning + +Setting up and running a Reinforcement Learning model is as quick and simple as running a Regressor. Users can start training and trading live from example files using: + +```bash +freqtrade trade --freqaimodel ReinforcementLearner --strategy ReinforcementLearningExample5ac --strategy-path freqtrade/freqai/example_strats --config config_examples/config_freqai-rl.example.json +``` + +As users begin to modify the strategy and the prediction model, they will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, the user sets a `calculate_reward()` function inside their custom `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to give users the necessary building blocks to start their own models. It is inside the `calculate_reward()` where users express their creative theories about the market. For example, the user wants to reward their agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: + +```python + class MyRLEnv(Base5ActionRLEnv): + """ + User made custom environment. This class inherits from BaseEnvironment and gym.env. + Users can override any functions from those parent classes. Here is an example + of a user customized `calculate_reward()` function. + """ + + def calculate_reward(self, action): + + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick + + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and \ + action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration + + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + + return 0. + +``` + +After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple (as shown in `example_strats/ReinforcementLearningExample5ac.py`). The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy. + + +### Using Tensorboard + +Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer: + +```bash +pip3 install tensorboard +``` + +Next, the user can activate Tensorboard with the following command: + +```bash +cd freqtrade +tensorboard --logdir user_data/models/unique-id +``` + +where `unique-id` is the `identifier` set in the `freqai` configuration file. + +![tensorboard](assets/tensorboard.png) + ## Additional information ### Common pitfalls @@ -738,7 +835,7 @@ FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's we FreqAI cannot be combined with dynamic `VolumePairlists` (or any pairlist filter that adds and removes pairs dynamically). This is for performance reasons - FreqAI relies on making quick predictions/retrains. To do this effectively, it needs to download all the training data at the beginning of a dry/live instance. FreqAI stores and appends -new candles automatically for future retrains. This means that if new pairs arrive later in the dry run due to a volume pairlist, it will not have the data ready. However, FreqAI does work with the `ShufflePairlist` or a `VolumePairlist` which keeps the total pairlist constant (but reorders the pairs according to volume). +new candles automatically for future retrains. This means that if new pairs arrive later in the dry run due to a volume pairlist, it will not have the data ready. However, FreqAI does work with the `ShuffleFilter` or a `VolumePairlist` which keeps the total pairlist constant (but reorders the pairs according to volume). ## Credits From 67cddae756833dc4716fe0d08defc5b487384bb8 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 28 Aug 2022 21:00:26 +0200 Subject: [PATCH 067/421] fix tensorboard image --- docs/assets/tensorboard.jpg | Bin 0 -> 370209 bytes docs/assets/tensorboard.png | Bin 9273 -> 0 bytes docs/freqai.md | 2 +- 3 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 docs/assets/tensorboard.jpg delete mode 100644 docs/assets/tensorboard.png diff --git a/docs/assets/tensorboard.jpg b/docs/assets/tensorboard.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2aefa869dc475aa007fb4f1fd4dd52cd0b9cbaab GIT binary patch literal 370209 zcmeFZ1z42Z+AutbfFdCZC~W~sigberh=_=EOGyk3Lzg1bAt)f-ATdLCs7Of+Jv7oW zgfN5*;r9%RviEkM_nh~f_rI?1{l7KOHP7mGuejH`YdzlvzI_Iry(c9j1;WAtfv|ud z=-V(z0)&0y1m*$UN#Mabg@beQBo00v9_}dud;)?q_-D=#5}i9sNJK((=FD00vn1z9 z$;ikEh%a0uC%t%%l#CQp2^Ka`<|NK(9Gugngl7mz|7G{>1Bm1l9{AKcHr5r;2@))9 z60C0@K{No6C$O;q2Rd>maj8n-uu?6ry?6oDT zYi7@`Xq{S2;M~0VA8s0pjiIRz9aT1X9~vgprzc55a4^alzM#a1Jp6da<^77I}&wzxTg1Q`!s zJnuR)Dn0jPEeo;LK&|~4WQpKa7JA=funZG$-;uGt2$~jleL1ddr{vowWVJByWjxgj zdpuOrAlKHi2wuaUI3&CjZE0p7+e(NFZA24ID6%*g*LKaOsJ_&FJdTc6E-iiTcBN-S7OR=#`Y~vn2wSM3S>_ZV$tYZW& zIoW~8>$H^>o571XPZcT6ucxKd_AfKDSS0k9M6W#wB#lT_?`fqD0Ie_n3Q zS=NB@im7gS4R4--t_X)GtS)ad5!KOoSFU_Ou*O3~Ue?arZ`Hbt`B$rbvF%9BvaABj zpy*j*G)#ogBr2;rla{{w2Xo14mgjP5$Uz&-2wVRK;tHwHw6nLUE=p3Cy%7B}MRi-P zCFOO-_LvBgMr*KCw!^)YM~>H`htKs2K2;b{Ed@v6n6OmgriZaqfV1vB%9)>YO@l2q zzCkRZ0LFWgI-A(olDFi3)3l1b%DsN@f?iH(F?4(DNuEKgW9mvwI_ry32~WxKld)gP zX{uxTKKgxGrP3bc&dxcd|E!OPbXdf~P|{FjX4|N`-c!a(p^vt;<;$|Ce*EYCj07h0 zclJr1MUKEqP3Fwf^eFfBmtS24(%Djp%c4BDGkcKo$7 z#Yeu-QS!N1@oYU6`M_|iZPo7xs6fCV)1!Pj?E|N<+0)|n_82^v!AHl#s0xMxYlS9m`mnAG4Ms9 ztjk&Ig$1(o-SxR;>o)VHPJPYYNFfjm9-qF*8t)&VhgM)|>JFPHhm)qGN@eHRNRmt3Y z61pMZKo-+X=JxIyH?CiyUPx$tU~UKa_iqy?(XLpcl^QgKKLi&v_jJ$3x>R^eE-!UCL&+r5x&H{$Jw$afB%QLDF8` z8@{WHvZ3>x0YM2tytnqIV89C zb})*Kw_COZ*Y-08Dy&QscV0ib)Ndfc1FyX{kAN~CuDR~VYUF3<9?^QVhgjI`)b*qn zdpwuyb!?{|Hb4%w^qXuVQx0Mh(iSR8YJxhq;vZ2|-S*9nzYUWMv#>3`@nY`n^b5=0 zJoEWtu8R*sY2}%D)#HXC>{slBqbq7N`q~4g5*m1OLgYQPm$W7~+HK%lSFA==1_mb< zN!mY5C909TGV+Yexb(Zjp|jOVk`(%x>87p6!5yxV(^_C(}Nd^$Hu zW`bX6JQE>G&WM?A-sN}^l{`OJGX{NCuUypmGS6L1#PI?B@B^+mZdWSLOxC4W3HK~~ zmz{?Wp(e8H@|~mdSm9%Lo)KKgwI@!A)f|^fm+qG4D;at5w9Yrgs=Mi?Z+~O3ZW)q6 zZMV4R?r6Q3+OpA;#I6^0nVW=seaprxnb)f3u_c2$n*<9+>Ti!r1v6Sx*UC%x;kQt| zFp#RR3#M!}rf%G}p=<#RfQ>{{y6N<3 z*QD8X$F(F&PG3>q9^7>0PTw2zks1@SwI5>s`Ju+-#75yQd;_&nM0oqNd8SM>c$ z6VaRYOEc{B`w9RQX2Ih@TI;GyT8cl;Y&xsn*@ z>N%~dmfn$k2v1GE*YC#YKOJ{8QBluNvx%mB045597#{ZKYM*Et;S)xE74j-qz`P^H zOHh)y72=L=Fw_LlAT+f=HhwV+9Y- zC0u8h7mNqPmY4M;qw*JPG+%GtE(&)jH)imT9P!`ICK8@1YT{SvD_5h7uXOL5NblWPJRh&< z7-JW94`~o-xG2&=B3jlT*P5m#&xd!rdVt6?&0+qd1yxofw?;y|m?7FG)a%z zb~VS1zY%3a;NP@6TWxCr%T$PQ2t}U@n=YyxkMLi0wT}pGSgKA!c7s{ofp*Smo!I!# zu0}G^7*Ux6^R{Xh8Jm4l5(N-8JQ=n#@S<|yxw_bIo5E{8|M@}^ki;?l+w}idB))um zU0NN_tPVLkat_UKIr;Bv5fMuS)qpN~c<7-$;&vNs<3kY~a||;>PMP4pFZ3^@kgApp zxFpIr#xdC}EN$E$X-$3O1tLw1t-E_**;ajHlNG*kU*a~VLR^B+zpMBA0AZubB<2%| zl+P3rcZ_*Ty73v?ZqENM`@bA#Ivj|-FHdk^#;DXpAsA*n zn=hB?Iqm*18Uqf#Z2n`;iq+d7(uP)e5+(+OJ$?Gina)G+f9&s(hvoj&ovK@3-m{Kx z+?AMn2O{J(NZ{v&i%}OEOH&f+KB6QR+{ej|8Gk^eY|w>wt#(|>XU>zlw$+1Dh&C8X zEx*Alx^M}Ahe)rBpCiXX$L_9Wv?+UN;cBs>b$csY@U~RT#o(%*Rr-05LCcp+vvMN; zSWJg@aP9R+bzepL(Wjg?d8Tpcf4he<{KD*bw`Z&!`x$px$9g0~bSZd)IyHDLeaPriIoX4Dso7_269@aum>gE+PSB_dfTe4qlCih=Fk2`?1ussp3^f!CmQDy(? zyD5SK4<)(~+D;MWBa#8YeF}j4Z{05lx(P=^f&j(`<0z$ozGSi;zP`u&@&D5Kp;`&U zA92`sd9DAL{Fl%nOwtRU08B4VTZtx5(h2<~&(8=E_pgSp$sb}r8g}N;UDch(y&;5F z2P4JD1G8#{$aKnvK9JE!V)eZhGQ-_fknF!Nh`5Yw66`}(M}-ya3i^(yKj55Yi?;uC zMz))q;7lQ_aC!2`%{{@R74@?nz=Qotzl-<8x=2wO8K?71L5)QEcsp+8lIn7A%#Lw6 zs=~0X8kKT(*O6vP0yQ@;B6N2r$wFKyk5AisRq(yg|)_h!Z_1=p|oa)>Gb6qNE42f(%2m!h?jX zUn7~5WJ)+5Y2Rgf(n9&7rWc2e#>-|b(1nY@oj~7H#}g2bP`*6gw=V(<{9~Dac#jYTo=inL zk%`O&tr zZ@r*?_t#oMB-O1K`Q^1KYdc8_r!Tt6kRUC+iS&G;Z^NVSv_?N#j3*U;8U1)SS`T$e zdNW$0{qWfSO+80ZhsZC#VAaka<$K!7Gx-^v0I*=)s1?oNceFnTelDr)6(Wx%FUyOi zk!s3{QS20mku#jDpco&@DtpQ7xS0<8!Bm0KTx0 zYgZdt6h1z;xqN1UgZi%YIkIk}@~XhFs`Fl?o8 zU%x@~Es|}|JLZA&skJu+QvUH4Bm+aYVPiJ>A&rK|Y79}k>VA!x-dO$mSB~Nhc|!mq zI*MccKE6Cw{jKVNLF5AlQCbvU9jskUs5|zFf2BS@#Ia~Bn9i`6QaXNG1uWQR7rb$5 z_sw6a_FtPmrhiyabC=mhy-+$WWZn>R;Yus+VjpgQ-;M3iEY*)ux;FY;uTIr*yk~nE z+dzNuaeiikjMO=W7iXB%)f@a3i6u)Q!45+MUO13o8-uB3@^BsIpxGAnr83ns#IGV4 zXLbzo`;(Vk@88J8(A$KFrNKsXDXvq_0qzg zE$u;{X{M!cZvc{VJ~M)j`gFyXXq1}&o zz7eFN*{#_^cE4G@Rf@}YdS`0l+=oK)LUYR6;1q=JXCz&mOS)Ck{;9@|Iw7EL$#&}R zhzr(aD>hF823$++a1!-FlnyNlEEfiI%!|XgM=`<9T!dpWR5+V?&Xe^r}6)cAb7 zVv)B<%eyL~p)sN}+`w|t2c2bn$ACu7XBFil(ER+4mi@dB64d!QrlBQR-Xhq1zlPYL zCCGaxbE9lFuD$*vqOP+5FYZ_3ai77hn;6-_TqlT5Wbf9!!0VVq0_v$uH@*g#LWmL% zNrwLL^eH>3Oi#svu!uWMyF*h~Ond@b8zD2X9dBv|AKjHx(fJ@pId+EN=B5Ti#?14W z>$w~QVG$vIgHhsE-_Kuo(mT2IvJWg>2h3>jENq3DcyDl4D@C&#VtPwvn8s*G*n@B7a9 zkVzWF-O!I>s9#c4Wl?K)sHgtdWl6LO$U3b)U)-19 z01K7n+fuoRRE1box(@6xOm{FxUa2Ci{p~#Ew%3n88uz??N5mYMfD=r@GgtiYraZd- zDuDhko?DA(X!_*7m1XJPH_*^Bm{mO>4&-tS1KE;oc4_a zd1Y*aCj(cTibNCDqG2U@QMCom2YJQT+DKvBwPN-}4~WXUd|3XEy^CDw(L)RzSb64^ zU4=i8>Tq)--Fn?EX?&uwlHYwAu6a!*h2L#q_W`Vw$JA-ZEo>u8Z4X(pQj@=+-ZZBbgr6y}wW}SKIlpEa! z0~=J}jEF*rhsVI$O5p$nAg+jUGLTI}RN>SxzELoGgr)$nYwXfR&d;1)KZXj}elfII zeBGS`v$m10yv(|T=OLb4KgO3El^Z!w@QKJ4v8mD8tXx}`y=;C zD>0%RAERh2)edxHhH1R?h(YXTz_7vHUID^4DSfM@jrgvgXvVGd+8B1%R>eR+=$RjI z(J|+~NOjdLc-=~(t(%X)0WjgKeuUgSC4D;4MuGA;f<-EM(KyQ`lSR=Mp{QXR-^V?A zfj&r^tn~sf#P%Z?)lYw4Hb_HOK2k+5V2FGOtV!t5K;753OK4xc87abQA6cV6#-wZK z{?b|tywq=taDmnKSS(u)I`6fTS3q}+z9c1Xy~Cj2?mP=Q0~lEA%g=#{#q#Fs&U<_C z#c8O~T$s1^>{Go9@ zhd~Kh9ZaXQyw$5!T2x|&Ud?@Ie`6PHdO`G7(q=gNAk_syu8~`0yCh`wc6~*YEr(aN zY%#Y0s=7-1y>88|>ep7enlfYo71X5yQti6ev;tj5iouk%5lGb7`;7f?+yd}>BTu5h zdx^2y`#2v3w&mFs8y z;}MmN*G?CooQ1+EHnk13ukl$dvTnTM4PdV$Sxbzgl{SLa#Ts}EM@Upawu!Ocn&w$0@X~nO+m+XH?qT=F%ok+u0-yh&3T;rMkwyJ3kq37u$pO=w#X3 zlrrZpj%{(^tX zJm$#`cy3Ag6Y_OP&11D3pMY`}$j+oT={|i6U4u=j;#@W>L-!mT^XuIPrg8O>ZcQ>f zr=l;TiL8(VR79V4V&Z_U%6@PC1=_Wx?D0xjgY~+J#d_TYwVW5!ed>mRda73=0kb~+ z&uBHwA^}nMx8_l?!&75i7C3U>a$E5z?|g*4aR(P$o%fg{u~t(x;86}bC(YEVo;#ht zxY^G=Qq#|Hp7epK#9#wEc!-4~_I$9)h=bam?wqKnyl=;WfvM|OYG3pQ%k@Q0z4o+H z0cf8L6rCtNBdkGN$4zc8OiguZ>?W`UH;pGeHY&IzZw4*MZ8Yz^xtb396lt><06%HB zA!jFlO@SpRmpAm@yvue+_Hryd7gDC=Le#olS!5P}PM0vr7=X9` z66Q=%d#wUdLCd}f)$5lFuT=G39ar=3>*pSBcrlhTE~CsdLQB*pJXTvTFqc)9wVYE% z(yBc*-aeJMO0Lc8w!0QhPPTS__G(LJfVN%JM=*aU%1GAuxo+Rkimc zU|2~;cR_Mr@=D1JrzvHTDJ27VZ%9_|ynRz?9A~E8K~IBKmaCV~c0e6YMJoM-nh_z|SC_@oN-x9sHL?6s)mC z(rZ`RbPqshnNx#^YQ6%Iav8j`%abXxwnU9|nTuB*l|Jbbvl~pDWw$&IduF~!MMS0E z%$mApAn_QZ==7j9H!(lej~*=z@90}8N!5>%G?DRx@lcMuu4-T!)-LYW_SEXx^(5>K zg^Vqku$o=#Z6R-d7NSd=WH>B)gLJ*%&G==v#&MSGnGyMq1LFNFO|=UhV=q%?r}MaG0lKdMg0L8p z(vfd9s)SgAQQ1}L6^Vmi(b^7y7x)&Tn%x!XX(CPhpaDBm)=+rfRxMmRn>rA%Q|CQk2v+WEmNRVn69jnBX=n#mYiTkP)6aPvrj&UU#FmR2pLu$ zww{p4ZrHXEua<9O2<`n)lvmO(Pi!%noA0gfZm=oO~T`Am^hO=d=?sTZNGK zdF3&^!_~lw*x;kH4M4)7=Tur*p&YhpE;R1)dCPv3QaM9Mr`-~eK1uR&sh1X4$|__jZ!y7S zOcU#C19`KS9baR!I&elQ5ASTIUrgestMh9TVx5mKV#AGSd;pg49N5lS_mTd^HvmjAEv0C0q*KOazwqp|1y<<2 zrK$!Fg8T0SPany5fw?%O5y1O5c2YTQ4DW1z19f4+e@7(zn6@Kv9VXb65{ptnTb%Md zoyVYFa*Qx|#t5gF4!{fQ+Uouy(ba%H>B@}%W@pU^TKd#V!74YoTKU)ZN4`mpzK1BK z6q@2LaoaOqmS$f(>oDA{-ML{6_OQ1^>AHrgJAVZj0nT1cWf`*k5RHZW%mQs=WpHh1 zqKE0VGx)n$%3y_Rm3$U(apXtPl}`cp09A9tHB*aDySSQ@Cd1A82%cb*0d-s+d4{s9 zLKj_1$?uGm58?`g4U@x}wM?xccCkc8BYAIcwgmHJFG|wGg&I*CEu#_7kSD1$knFEDRl9*1c*1cyDk*T5)aYh*aaV(v|Q+UYMR zl2wmr3@0z1j6HB9-ELwB$OUfaxJY$G1}eHq9@T0tmw}7{XIj{q%qiF=GRmRa@;ZF! z-l$!Ol@}hDV|Sj=78^8J&xx=up6Q#@6aN!aeI!-}rV;z$u>Eaq65>&>MSfK~rYM~0qT{Bk>Ei+vu~d4EMheH!(-5N82P9;0Y za~1l`kymx&w;U*!GXv#}pwNMwvcNgfMd}LylVzR@ZrG7c6=qe4^Q-Ai1DD69xLYsC zxwBW)HbW3!(gGJ7s4IQo94!XQH5wCT6Zrs*j*>B9+|OsuE;aMN(hhJ5(l>wxeh}&a z)ZaO^B<(y$maSvlKj3=*G+Z}sVS5$)86cYyK=a~j?D zS|-_Ijp0SM0C%X~tsQN3me9@s!4;*qIoH1?;=Zoj5_(m}aJnhN$?GxP3()->xu3WN z@=TCA1B`*9iDZqIBEV8&rH-0mJEX&qzv)K*!e)BIY1Bx(X7tjM40 zI=UA(%dX*PQ&Z*CXlLPCe&@>AQO!Tze>}gwOZyI~U~vTi(XJ_&T9qsp4}LNBB^}6K z$uSJ+S%nwKoxo5Q5>7d};9M~fH0qobPpAQ|1COd&zXk0Z!34)jV$%67AGO(ON--;}Q?_hI0+Ftl}Yhjh0q>cI~ z$%{_DM$;v=zNK>rHuLN0S4u;a?+m$F7zNH=oUPc#W(m93&kl(<93jzcxIuakVaZ)X zKeYe8rTS69L{rcN9Gj}j2D=f}b%KGSQ%yR~WA&mXub4_=L{=4bm3zemi(Hz@8aLtU;5Vqki zUVpe#86tNh()PSl!9W+BA^CLO_&rC!G@>@!?ewV9a%a_ z6?nrokHd;;%@^>!yNQ`4?TwWK0?d=$<^s_0j)5DNWkDB@B)eK_jS+zYZtdhM%5FUw zU#jl0E$h9j>gC)e+vcvW%CZ}yichte8^yJ}IY0SGyAxTHLBVHJYQAPu!N!_@a3n$FV{d~7m59rK>YcF2cK<`eXAvN1pPRMrpK5yI*!`AnyQ?ePYl`~G6 z=LSw!4~Ir*Ph(Zn#J&#v+=1?j)@rH47tIj$cRT4TmG*YJ>K~(6u!4X+An@$rhT-AP zfp30#w+ZjnX3XG~ZkIR3;_c(A87oTE{gm5EQ#1+u^`*(MCCb_G@rej;fyvkt&M5Uk z?h%W^;7R?~mI)-Hdh>yu>Vlw@TKI}vAgipjooicf6e4E{G(*sOrxr0m5>v}mO2L&>5eU$vC%GAywDI(u7+T4_x(uC z-&8By%Wss&)=HQ5g|&G9!VY@)Noz8yy`wZ`8|AxL*Xo{{lPI;UyypmaS?ywyQAw~c zfT{XKVP#J+-vN;eLE;Zvk#n8D7BC#Y;>ihPUK)8(+XzcG8!CI;XqEGNQ&khc9AMLf zwCH33{h*wkB_+MiK3}K1rB1M@Ykf2fu}x@FtXALX#Av&>+RM>bE7qIh_-Mna(lUod zP{qH{em|zKH?B_=C(r_OTxyRjIP|c|5?3y36>uI2+kS|h3C0z-10suzGJUgM8mg6B zN%H!a$T~OI$GVz!L%Dry8^&d{p(`%^;X?)`YsL0Fw5;4?84;$LPk`J?aUO_tgzl%A zecuH6CL0KJ#daMbT@hGK=4npQ)WXX!1dtC7%;tYlxfm~qw!euDs7>F{?Ia zgXiYNu!`H#ikTcVb{Y)Pdy@UBC7-XE6dHg8S5K^FjNpZO5OU%uzt^lbi+%gn63y0O zGMtcny}cMoC6lH**&Upau`*OI!@nc4&u=0e7-^-d~V7B1dgSxDDYwi7XHP; zq~(k}Rk`lmMYiJLadlYn9F5h=gAlFPDlYWOE9qCJ(l^)bEb8Ta5ZbUU{}EBRD{YTK zQyDb7JwB9C8f2t>PUkh=leZ(ztG5H+0#c$uMSVPYN(yOt{^pXtrP!6-wIx`u!-|Zt zLZ>6OGan!l0OuM+;<|Y{2I#`;xFT^2iUwqePp?2CYpBP;x#05MrC-P9cSbLmK4_6* z>-iqKnr5gXAP~?9JscOCLpsNvv0#yc9M|k%=o!CD?SY9-)sMy=*7p?zPoBuAb-U=Y zlV~$B+7$CnaDlR;J$Hev9ffqtw#a7Ph@?0FYSdqsZ!O}13~8ag6IQ(3q&>u{nqdQN z#WTBB?yd|=s1CO^Xesr&Y5dOk7nTt?bA);Fctq?CzA4X->hcu3BcgSLzj%4x+O#vb z&pFU2Y4z-ZK&#bfYGbYGMEpD%f*m9~`-{ejIT-wG-TlflLu2~enDWGa)_a__03tCo z7SGK?-J`aFa)Mx>hM>G?&(5D zZ3XXXPwDoe`ylXZgJ^~V#d$|M-NbEw{%sH50lu-qtwCRRN)8}4pOIvB~!%L3!zYbt;!E<_EGvfxP8NLIfD)=lb9CIbMHJ4f4XgbZ`1 z^_PU$%N!rrAjr8tq4ZSG05VyqYV09O2isN;D1T6Gez_emu3LaeUK}NBpDa)%xalF` z3lm@76U?(y(_)U_h!kijElaD)uBcYC*o@Vt*Yl;LNQvs$^(AW$s4x&WkY&i!-ifl` z+ZidhG}klm8I?1gq!pT+qgT0)1lr}hGQs-p=b(NkVGfNTEm`O{JUg+7BSAPg-$35$ zRRvP~oVg+Qkyj%Gnx{Joj|E0|b?0Xv|E%PP=Z_~O)_P^L8E;7y1)xR(B=6S!jvlIg zO^W@H{U!_Mx24%04~B$8iVP!Zz^^u9@Iw^}HNNJTpw!H^H2egLSTq~5P-A72CgnQF z-W|9!&uHF4d1;7nXiW^Pb0yR+9SQ}MR0RbXoVB$+t7$vAOO<2cOZ86Ktbn{aGUxW#*Gw|F`p|=+$7A%8 zH4mz6EWd$vzJcn#=IVKIaj0}kWq$C zF1@a3^sy-JrN}fZnO$IxMo5c#ZtrYQhaGr(cx?JYtaY!|ZnwGEE8@7PV+8x_*+Vbx z!fKWG8tv4;cA=4*D!Blr{9?xmq*SjDw09*L@|MO{5Q2^RgI|Gw3_oa(?;`snnHDDc ziYzML28rMrYAFg@#JO`LKN$%NbkDR?gOH5Nggjf1m9c4_Eq;yI&mN0hZ1k1s(cfe{ zQ{$!X{FzLp#A!Ihp+mI>F}4&}mDOqAe;_qqJlgL&V(H8=y)iEEJ8GnL!)i{ey+vn~ zHgNmq?E2?lWB(`LA5p;c6#48Zk>yp{l0YY=iE3jFKG!?vPHeQxSMgNj`MTq=fG=wl zl9uP~8fgH%rjx?_tt$AW200;_HgSW16fjz27bBg$@BP~%6T}9ImCwS zrBYy(+b^ETG+4!otFHGp)^Q4T7cIYx8F1{pkExQZsL+K41R}YyC5js;lRriZCh*Z& zm2pr-?!GA2IId5Yv?qM_UBRrn#Qh$_Kv&l3LC;`RzZjfV3z+ds&K%w>wTxr{FiqXp zk%+cBuC>_a0hp+|z_I&L|2T5Z-u58ktplx~$LT@-8hdT-QE$~T0dVUD3t>QSt+E7( z?s&?8RD;#cSN1P|LW6VALg2Wo)3Fhm&j67?{_tPbk)Dzs)@>ti$#_%uXD_Y41BRH; zdSGbp*@VP>%f&831kY8#`t#53m`Vhb#N8gMHSq56(EECa--->*z=i%g|{xxBglE^O|zvv+)R9#7I# z`K=E+lITh6wpFU0ht?v8PmjxL)>lCvXkI*bb9Mn%K=tuN>8C1+;8vHqE6}-|TW03AkGq+ZPOI^|Q<^p$;$;~x2;)$|X=>dex^l&90fzk4Wr z0FSEMbKM-tW1hR)fYpymAic@1fjC#8N+A2|`aU0m4=oxjIkmUh`%(Z;O4HU4HZn)U z+ZdC#5V~`!?w|JbTQHT38>^Idw`g!kwo>wM(b6Lz7X@9;l^|3vgg`W)%)$E_%bD%7 zqQTc%S9n+^_cexP4;vwP*^DkNYOpG?)d_wY-$8zTw0LWWsV4GXm(eqZTLZsF=Frj# zfmTH67*AS-z-0GvsT1!1O8&F&7&C!MdMX4GO8{Y=Z~~qB2D0pL^qz2z|H_N6AW&~nPj8e53;)_+5sD}Q zgyT*-n0A5Aa-)9QG)8>T&UP&A2N>9QM>ntPcQ0AWdD=`2zw89e?xihrn3=B(p52Dh z7tsJ4R-Z$j5?q~9-PdBqxuX~adN}UPW$Lu|W@%GcZTXjg20>j}%rPEO_SL*^=#r{z6H-SU_9_9j!m3-r!gsAP9+oFs#m!$8K zbf2_;`fqvw%jutIb$_YWx%y^7Tz+k>kvTePe=bfRxtJ!NLMokwDtVly+=Tewl;19iqzf#fhNc` zaXC+`h%B85fy8t4dHuN*Cj3VTf0yH5k2`*-$wNcs_i?Dl0C4`!{pnEqyKMhb{GkOW zY$J2F8vb#dMkEnU!0+bw_cfrlNfMNe!~2w2m2dz5`p*X8E_{Hwt5(ms=J4boal1UU z*$cGpBDF4iCz8aF*6VN#Y287j2WLtwo*tbqhp}{*+q`tXXE+T12mVeZqB7?bJ6gVe z8SgpRrgT0Fa^B$5)`t|VvQS8A$XfYqa#7%-Bu(>ZOIOr;+j+DN&y6+uP9O~tdgQ42 z8FJpr2VZTMOEV|*N=&K-Kk;hVgk&RI9M5YNaaw4+*f4XN+~qN&s8o0Nd=}PHX`w`Eeo4TJ`S=6m#SH}zmYIXF4cEEX%56wqe|Bd%`t1P%9cmGud z1NNmI=!O~y^s`^VHPWkkurSa`)2j*=_5TwOAigCIV|>RhhQ~7KF!9lm^@}Z?u$PUz zcAD)n`^NRTjs3^}M%Rvt0Y8Ld!A z(0pH89x`Nqfhk8&_LYl3j$DGdrCkbLl9pxhO2<`Oi0XyKhLJy=@* zaHrf%vi2M3t)($zSwW1QOf1i|Efp2}i_xa-3SZHot$4MXY2D=F$ymA77@NFFq=xeq zwwhupolJ$2*RDpw?|_wsLjI3j?Ziff&VK`S-1z!sxc6X#^6MAS31Z&G4nYx@`Ma5O z72BndGx+Ydb#}ID56921KMw;|$DiGw*_%J7|98TNgP*&%vPL2p?A3$26`Q?7+gpl zCVFg#ompTgl5ZpoWV*`nyzMb(wG-!WVdZcUF0fO~tnif+=GYgmHqBT>rBSugrS{+| zc75Jznr>U$*jN|77?kTt0E=#eiQEtJBuM&gO-GT3Jhsg~%}zsCQ=_}|Dn}?Sz?dG$ z@9P0y&~YYo&mSLsh_DoOQ`T}kWyc>Hw+ZQYKX+$uBrx}OPvQI%{9R#BNM?R=;5|HP= zkzpB$$JSGdyvpF+oke`>@#Z78Wq9SOqoFwd_HZsQX@2Os!_`9G$f0gbq>j{SD2bh| zfo;~Wj}1m7#g!TKI}{bH=dFs1*uuNCiV-lEPi^MRdLiQ%-r-_5mp<@QLLLgA`UJe?z?)Y|X6df40vjl^TNg%cm zWvGzQz$Vx7Na{>RRfYv=g`TxUiD#`5jZ0VW+V>MSK62SXGw({}TcFgS`Z7ho1spl@p61jFh*W$qr8Q!R4cx(5T#|uoqon)T=4VEAL_+Rd(s#LQN z4(UpJN-Xv>`!UZEBgikR*7GUExkxFN+ajY963lDccW=PeBSW}-L+M??mbOX77N>Qu z$8sjou%Q$qFus+PdG@R6+Kv*>?v7VTjo39;d1ZE?abyf&<0|KJ=iCJ@9c$&ti~Y>} z98+7Dkpznif@;Z^w6FxqX;O7isJ0W~R}va{TDACHF?SpJwp2s~YP5M_PdeohrEy?h z+uQ|?J(|SmrAmM4DARTDf%!@>G3{NSGLtUe2%x8b?+&@*Il8X#IBn{un~93M9}Sv- z4@`K9M&!5?Bk`uUamJBnM4M}LGpb4w*@Q9?-0xdVd$mx>?TzP9VHjOz8ruLsT3nZ% zd{qftAh09stWs@clT#XsYqDY2sxs$MNnRQz_U(_YI0WKoW5vpm7!?CzXobC1hfc&y z0~fRJ_*fri`OTk*ok$EbU}Ob|BH-e4o&C`C0B{%x-?{MJZ!-TO>gT5WAVdV+^mc!e zbIJ>ZPuDu0<;mUY8i1tj3Qeq0)(2=mOYKlnpRjx0bcPrJV!*w*0 z#~xwgmdG?C2V{V~e4D7uhrQYq*VmRBEM|1F&4J5FceS3(jh<}HSlY-`u7{fAF(IjU zIj+udztjuWX@qFYv+kUK5Z_H?hhJ70H`T^2ZGu{|AsTN%AA)~OD}}YrPcLhTG0C0} zeyz@exo-P`_3)l9Es5FT_1Z6iN|@`l5+Broo3WF(q3ZO9*LM{$*LV5CvSSX-xqXzn zfk5ctCUTBA~J+znjM=+gE?6VjN@P6HJj$cSM^>*(?>K8F*g5&XAln7g$T z^r|=i3ITr0wKNO_%5s?7zF%7c2JZlw0q}y=!{Lr(Mk~=f^f!v`~20h z%>GH3)(+kGb;sUS7Z(q#yHMrbcgBstXIw;I)#=$}TWNMY-Xyb1p02U$*zeJ;=vS}7 z^Pbj}nvib+!v%6fZBcmA1jLGWbi+U1b(oqlO7dU4n^S~lnw=wilpe%gb^?08VEx*A zn2r&NunZXw#S7}XJL=-ZO9jcFMxM(&#sF|^&;bnhk!%othpjfa`E1sn;@uCM@L$Rc zTo#Hi=Rr7bU(dsnjwwbknzvjrl4)rXnc(R4te4bZzl=T#Px~4b@4jv}8u$zqv|-t! zr54C8^uyTbtNX*@b7T}}xcHwXOvaT1U#;iCP=7sLTrZAOEb=iVi_WAsHoE@pTE))m_NpwKJ)>+B;W?Z(k#WRQ@GVAxlC zof_@=n-(}e=P*|YbAOm@V9SpPQ9{U68jW|)4>G!RZr77Z28+7@-?_rCQxl28eDo^N zK;g0RV~PX4ua6TIg@8|BA@0Fg2XGy*5-OFQEiqrjI?v2-P7R2l{E+evNS|5@+PPJz z7ym5B!tPE{aB1XPs&#FGj4;nws}l++3@a{sOtCmylt-%-ZWx%@<3Q}L#=zK9I8Vpa))lJC%*vy*`}QRG0rJ~&xwo3w$YWC$|LiD zx-~>DzhIS5o|oiJ*wT4BZ5|Kl+j8;Quv3fXH|H#O6yqf?cuS*@w`lxOVNqJ9A=?gB zwm%d5-eGo^Kfjpn$#$UJtns`M$&NxFz0DguMfe8+nX=jG3(}JBlfk|^t)DgxD&cc6 z;=?nxnO$x%ZJeYl#p?2#Z(l}X8Fkt-nblF2fs*zfz4iU|Gtvf;nvC z&?t-6|1+=>chMaCTS90Y;Rb$d&<&A2wDxzEBS7n(0!&4YYqGIUnBU06T)_sG60oUL znzL6lg_h@T2g@0~%beXF=FqmfXgzqS3qQ<+HJLrtI3x~eDiPMdF?NirlRB`qx4qCW zth=(7co76rS?|bfaK8{VZj6K!t?vzNxKCQ@_ycx(wZIGOjA#vK~IlYc4Y`+^X5vXx~W&pV{W%O#v;L0mhPnl+a13eL>RSnkBb=?z$4;miG%AJjnD%q0nHZwW8_XlcQ$|U^EEHb*c`e;ZO4!Ky9@)6j%N+{49ElN!O=b; z^&#z&I8BFee-$(TL zR@~q%a^mqtJBi`Ds$`{V@UGwl_eZPbE;Wzj{ttU^0an%4H40;)f`~Md(hZ7qN;gP1 zDBazubR*p$-5}klbT@1o>D(Y48}P1;dCq&z_n-5=-+%9S?{io@YmYf*t}$!OF~^uQ z3Ga1-(bRH#!fbok**B%t35yrAtjd|P?N7;ONk%(u`|D^!i-mY*C6%L~7HL{Aj~q*i z`YMUAzgd|*iioG)H#TCG$SgJ?{ChJ4ruL^#)_=1vG~|wstx8wdp(JLk2Vn|^kjvgpnnS(E02MF_H)zdTCtIk zq&0T4m9_DrF~I&wTS{5Lj4`>ss7ZpgxKa!LO*apOb-DTBk<{yYO|lOu8SgDvi`8us zXwm?CWIKR($1oj0%O87mogG#2z!^2{d~Cb=X1p#~#;7VV-qy_#lg+>n%;TxHc>7*l7s{yAj{&&9brPM^kR9tYP1}(gcC3TtN>Z5PZ)G zP4=syHIS%Q<-chEvAPAHudFSZ2XpFg&%X}@wynoSp$jIMYx%9rq&0V@Q4f8_vU8Y@ zs@BF+7(5;1reNc0@Kp}uSQQPh>a8t|u^)zI^)U%HJeb02>w`wTzL5M0koEkSxU)u> z1BSOtsxByr`o{ZhkK7TAK({^udMlILs|CnrGQ$AKH2~i+;213kzELX1uZw6a{zE8uZ-*ILG4%O0btP{ zP*ZJv(>%FSjK6+!TvqDY1M5P8L(a=d4ySq9kG?s2<5{Z!b2JHg1~iTDhok($ru~7& z*eds6O5*Nd#~kKiN}bOvH4LWpOUh@^QapI1%ui`b$VX*=kFnifW3$uJs+F16v3wvw zysIBJyQIH}NPQCn*16%S9%B#&Yk+B!&l8F0?Ew(Uhj^WdL(X^5L~gZmegrD<`&tLU z0MoIYn(QXm^AvEC)lkVhBSgHi>_s9@l3!4Bk59y0h+-E^NBGy4E_rmu3A^-?65tf=7E;YuJ1^=dAGDbAh9?b!shl(4EM z-iuH{fkTfLz+itTS`+T!oC(~r-}lD)`=;)PV!Gtx;}rUE$b8KiEm+r%UCq`Wb(mfP zs`~`{Q4Rgq_EOxh1e`(eBlRtX@n2Mkzw-LuNZ!_1b!;%#3UlMW35hV2Sik@KJ@FLA z5(qeH67}TY!L^0p{=QlnM4>^JQI6ouC74Z)wzGEeG!PbA$?in}nP}>N%LKyqH4}$kKS0347x!t9!VZ2Y zV(C0n4-Ba2{e?mN_o{lR^n3bRA@)A3~+i>!ujsP1AW5>W{7s5ll#BnUs*CmYDO`VdosAqA5XrH^={7cc-2(1E? zb>1c`F>a=_^G?Ad$JWU866X_P1an*5c@E#ABv7Y5E>);%_^o&REp7#KPq43=-t{z5 zyfsE3Cb=q0eo_PhSiGK6yS=(%2TtWHBOd9OOsS~G7jaV4Mj-0XDa*~)aMES=Pv;ft zE#D78bwuh9*B1G8iTC3qs=vamBNAguGd+CS9mc@T9aqrs^b?ON`PJeuQ~JAur>$_5x!gqBvHNQ`V-Py*DLoLUGA<905niv zWLqfPB{OxA{jfCD9+-1Ft`lHN#~dZFN$H$VCIk%WwxJRZclL-<{%}+RN`rRD&d8bs z)$Ue+Ft3w90gZRiu~>btd!COS{Q4IK)o&xcy!t{oW%s@KWXpod(WCokUCRMQtD4Cr zP%9VQD5kpeICVN~r{Z@J!MQI7jX3OPq|`um5+tJpb_q%8^XN(8bq*E5A)_E=wG#9D zUpX9G^yx7Pt4{55TOi|rseh3+te?^CXR)z98V0#-B0F61H&>LTq<$KpYVztERloiG zXzMqWGiPm4(CXD4V&1j+06O6upx7eD1R20cz&6m-W9Bj}$O+!qo4UE~Hm=v8n%7bV| z%lDlDr+2UcHb&QhytLasNJU6Ig07F3ZoK%7B|?xIbRBz5O{jeN2zGVn`I%WMgT$8X zBKIs$TD`rg*W#O{`YE-mbN^GypshZHMy%g#v=hA=W5L z(d1`pw3~CvF+1K8b5*Zl+e#h7InAqAc6Gz+Si(Gm`xdH>;fqwt@@FKAz}e*v<`_Q{ z0&C{TN(Hvt)u)fG+1r5x!&8p7B#%|aBb9F&go&OD`l{0pKf8k8ki6t)bj^-LX3`Oqk{HqD%@& zj07%_LD1hS&S(Mdad|%OPy*a1!%cQhzIyr&fEbTQ+so%~vBRU;fNF>Xikd)C5(rsI z?4@=VGU5=XtZgg{kEGA3A>-P~cp~+oBIb_Uy`wcWjF8YW6N4AoPYbXh8;g_BTWUaa zig?4oWibNWt^UvpV^Ep!sD&w-?qiGpi#=BM+WWrP^`$g!RY;E5!n2>`{~h#WeSftp z(v$|U>(>fQ&OTbR-TU+_4+mvS~uR`w@py@u?bV!FJ$U#s*K4rxlB#g!uIHHnRN78 z;`Yk(iyrL9QYh>`g)AR*Q&H>bv7A7RID4~W#raUENEROhNLbHq!20nYR`&qJ`U0qk-uiyZQ! zKp)Emm+{s)&E_)?g8S9>)w@)h;$aOqb{ey@EW0H;2fHOo92HbQloXTeHB#b;s~qY}0lZM&`r&d!2T%-wFr2SEdApROq;hGHc$(+V`^I%s7HoWvtQYyF|Uml0%S9`y%%NHgwzcep7X2^CwY7Cvh%O z!$d70$LIQrLY!3$qh~_6mEMz!N{0$sx;DW68refQ9mgzE>43(u>e#L)^g;4tqb+UH z{*m%Zd?kQWJT0#Y*WW)Yw!f3jG|T)pDk_u|kt}Ci=H3 z9rgW~xDm`I89!d*cm`soc@e|BwQ$|xwWhn8lhgy07C6Y+;*djOsIFp?g=2O4m{{`D zy>66^ByRVC{i06ko=er!*_mTM2*@m~&*iX_Xjxsf%^krD^_b4&<}T06m6lh@kH`Yx z5t)c@?q1_~uH;zqh|CWOyco4-EdoGj$A|41fXB1q_eGp!es>qHyFkw(^JQ+9u1q}> z>(=~S(>P@RQi?ERA_}lHBfNNhFbOp|LuWgD4a^_hRP*}Ey;?Zu5XD9wjM_T0VJG>v zI>l|Y4Tu<;6{`tgnAQxN&S0%~SiF{4=}P)O793_HJ!9ii%0nt(T^pxe+}{VxN9{P& zI{xLd|7x%X&FFj&m{8$8Wt9JM`;v<(n)JLB0Y3k@PboLOtp+>hcGe49NbvZ2&&O<^p)PTC=f; zwwuaFtj)=rsmd}@TIMh~kmZwV5#;5)-6gamqfM-P=I&UxXGmN}Y$e4cD9TCE=Bg>Y-f7G{lV`D z_lxs~kQ~jnt!tDd;fC-8Ja>v48U~<^i=eGP1|J6;V{s)+LL|$3&p0W@jbKj}TKsbG zSlwzYFl|khPJE-N-Ar!Ar)~jQzCVUh{F_7srT{4-C1$&DX1#FQOCwL-O&3|aNy(Z` z!Q%pFno#aYvtDAeIR+D!DT{Y274c#tpLXA?DO~u9t&U;NYBqpq2c4&Ej&&8l>V*Q~ z_g@VOZVd_cGxPSDBM<{IO)5DqrRx~N>a>Sluytl&d7!YOv9#pZfY3;6H$J{0)z%bWO*HP8n2g6Am zM~pa=&c_5cLJh8pE1=>u`4?3mw4lPcw+S1L8b9SvfFRG!2`b$H#B>GdB!I;2MRkax za2PRw`n_yU08obby8HQaJtMKK9*Dthf9r70&06X6L)>@`%jVtVP;m>j2GD$y9bWLu zdX<&d!%+o5kT44zmVPC~xq0_pR`Rz^Jm8oM!*^M)evv;yS$NNQQSn}A~fPwCRvp7y7F0_;6Y1esZk;rmjWcd^|f>fjvF zNPD2iIAUxeW(@#9aSdoDBz;@ht}-dq;aD@CJ7~R9$vT(SJQyC&c8zgTM{RRBadE|R z2xqu&(q!Os3Wieon z(&C^k0nPvrBUYDulQu%lvcK)i{=w$(w@HY7178yoD>eV&_r&_){>>!Ra|5C6Jquve zMx!-$O`k2BK&KF0evSYGutfX=0L4PENgZk%=aeEqUG|Gf+z;Eh9{~BTMa0su{;w8y zKW*@+-E}@g%X2+h!0{awke(o9b=|HIU3BoM&BGALS(B;}Z;M*-&% z7igy-T#mOqTin+7L+}0c{XO(!;sHDW%@-mOKsWtP_y-t5F&G$sYM9$WG5|i}0Ols3 ztbj!F3Wh%#=7}z*^^Y*kfItlRgGSNPg}H{T1ao=-r1LHMO_R&3C>+j6OW&hF!vLw> zK!1J}^Xhe_D#mshaK2eAMefSC#cdVxR|^eugMaoQf^<232S{ni-qax@SX!l8X9S)uP6JLC?SYuS_Qd$(LK-`cQWN{u}O?s~biYNhp&)}+yBRipx{ zp8IHnedUeQ=QIA-_W9V`%^hcJKZ}Va1ZuX2fq5i_vrlU7Wq8SWBWH3z^A*g{(WQ)L z+t`_OF<7nG3bPP2x&y#^kU{*CE19VC zKyEAE#QNRE&ny7wN`3LF0yUCl2#)Y;goJ#46W}+FU&do#4lgp%iUtY{?92qPxeiF{ z0IE*c^G4)ACJXtv9e{4st_hJ`Lh*2ksT;I(vLHAn`e0b*{=b91OMpNynSUV_xCv- zs>qLx*Wonq$wlK9wknLO=e7zNJ=Yvj4Kb$Xr07(iRySpYvE=C_EO0O4rj0P z%VDP)#+8CYH$_9E=2A6dXR@%vB8f3aM&H{?t(iM&xRcQnM=3O_1}hDeU8PGh8h@6N zm0R%3%{m`u(k%@sKHq9U@hhur`~;ktPaJBuHmzDdp!r0%Ro=LVEd_H;F*3z;m6E2> z%GFb&I(w7X=lsu7{f_@SvzZTaCsnL3)8zow`Q)!uCvebPiJotw4Vcetb8*yQ2b}FQ zB0J645MJrPT4$<0xFJ!lJj0tH;IcHMpLG1$Mx3LN*jZt|qoEt2Z?OApC7~C!>c#gm z{O%XWGwl<^Qp&LXY`e~9fsz(x#ll`3&$E9+qM|Yq_o?zk9vHLqWMO-GMmA9Dz|GpS zZUt{XrqIeff*F*J$hr3Hcj^6$^QwE=U7(y!X(Y=%I^wQwm-7CXKtB+_>7GBZ{~7RW z2^3f_LbnM<|4JYF&occ@?618T(CG(yaa4%AHpI`?;g{k3H(`PCd*cE4WFx8(oC`4SZy7VId$+KN&UCmkH+jD zuid`}Wbb6m>=B;b6s3=ItXpf%W zet`ajff0l7F+BkhG1Idb{Ld{&m|v0d^6>}=3VoS`xdX(xcI$eVur4uu_(PF<89p+6 z6aXb;CTp_md^9!ZR~)!0Vx9t*4^NnZ64bHApW7pe^+zBD1j5=zQk*E zmHGnXTbv^M1?IWYc;DFIi#OvROkOZO#>PYlf^EB@eXZoxUq%Go!z;@d;0DccWnZQu zE}w%gCoUUL54jKTxK~95$?SMP!pcB2%gV%kDj)X+#t*pr>olPPIKf*q`u+M&ifrEu z8})mihXIqOfkKrKgcu^sQv+@vKQXMa#hMLclenkMKS{mLIc72GF z;De)SHIK3aaHcRNIr*m!Mrp;H{N6M}LmBo8`O`R?jZ#RP@MB6hbuH(fFAj78Bn z7MshLtzm3eyTrqc6ax-awD^n8nwdQTDrDW{!AUnGzlONvjS~WT@5Tln60; z<$y(yEOKpFVZnI5>RMP`E|wzKT%Nx`g@OHcJ=e!V64P^jErNb>C*IbPy+Of1mFOmT z#wZ_w8#2>+qnHMJ?3QQKH^BeBAmk#X%Z*)m^ZTYzGz#BjukQ|a*&ZNHkWa)`D2m6 z;W(qxm0%G^dl?C>u_&{NW^BhKTw%4~2=peM=h)ksOk-hwpPOX0%4BW^Di294Enq$J zvf-6_C`O`UQhYF8Y#DLU%QO-$G8H-An}=pK-xg6SRMF|qNLi*RCPsWlNhmQ?`ZQef zd6-wB1d@3QO%OY$qk#IdI`XD)G|xhxJohMpfpz$ZI`4_V2P<#v!cN4aJwdm4^ol8^ zp&GV6awpnWqqo~DVzwV%A3x;Q*f~0n3sTB=>Bf5pVKBl{ghdKG47ca-Ms^w%t^8eN>^kB-z77Vua#Vhu>r*+5DK-`JLNAYFiQ^& zfU!2TqdAx z)4SJXreJZoGIfSwD_2m~DXgWF*UE)dZ!_GB4AVkX8ke%IZWx1~n4fvsAzz|3;>{@W zUGUv!gqs&2>@gtJjf4$q2|hP1Y&3 zn^|1a@3f;xCWhbMH4b`bpth#>q&o+ENI~#~L;;?Pl7F^Mu7b1SPR}~^u8FVu&P8!! zvJ3K4-XQ{`%^IfQ(U|BUt-<${Utr>lyrQMyvUWGJEOdjiX^Pg3IOF8j+TaCbcc#sv zjPX~y^@m<-%XKB$#$;$~ePFUERHGx?pHc%=<(=Jv6yfXYEuxJiU`aBYn-tKfnAOD| z^M8RcYHfQO9unl`GPtz#`ayI|@S-6fzXGvPU2hHTd+Ty8Q1n#mjKGr&Y++WJY%(&_ zg1S62)e@%K3~3LA+5=m=Gm4a{fl%@;UZXpE@I37bjI}eGxtL4AYSo;mYQ-oS%6Ozo zmGqG&eNWc0{MvJw@K0a6_A1cr1|&oygp3JBzF}RI)KN5<AWkM-E5a+W z7edp`7P*N_oZ)eYX~jQk{!n6?&laaEr#_(cN$4gKwGe+~!W^~SWN?y@NV(2lg;$|c zCqsyoe;u-nAGawtVPs%{NZJB!K`q=<;xf#UP4$g0FcKyWsoTjRhcT$MoMj?PirNGs2UkK1^U{)LR1a^eR83>~P1z2QT@% zdn`y2Jz}<~cPf|A?=Ck=B(kT9>sss}V*t|^%?pt!@8zdq2~&4iuD&XYBJ7f2e;%An zKFF7xhkLu5S>UccO}I7j{3t6vM3dUuJ1xePd}5GyQ*lZU1lB}k+%=tIM1CWdbePoyzSw`e3?j+UZReeh~ z1KFAGk)qjxgY&`g06|>%s9AY#EHzp9r}EDJ-99dvjA9DmG+7B0CbcFdg{QZ>cAi!a zT7B9cTWM%eJkL#Ft7pFP_>&&az2?`k7EkZPzWmU`^3eA`Ksodk^v)rkuYQ5?GG~xW zO&gULEhzrH&UIdHwXPU&ag!veDy>XPA=6k^G5P3F^ANUAeAL7TF8E~kJs$I7+quJB zDE9868;5!O-$Ru<@`0ipvdqwXh8K+2bRz!yR}Bu%-;QJA7ZB!bB4ccr~@n&e(m z{!c*=A%{vNcUYFU@ysoS|4HBQKZTb6hs%+6#@|tgDgV#&4+Z`m3gBiF zr5zX!{PX-nfq#brc8%(xUtrETrx1(3z9bj z@>J206`9cyVi({~2ZLiE-9ZYX`|NT-gtz?c6?b{JrFlu_!(I!o%{4deS_!vJMQ`n@ zqb+$l?9s;<5-8G}#?y95l>3E+39@3_xfJnrq7N?XKJI(gT`iGks3yG$TQuY_BNoC+ zf~*vnh4KYvJjt4PBEBaRTX?kf!^gZF+Jwki3Hlc{D{*<*4|qd0fD^Z{%SSi}VNBeb&cK!x zXylc)n8n-USmnu$P`HTU=6qUaXQ7#FXG_xR6)YNbid&u}ty3POLsQmbmY@n@)x8$X zJ5)t&ipn0@;i)K6FF?aoDIi7Mr?M04Wphi9(@oB z@4G6!j?=mc!IF=W35GD>jeD{w06iY0BX{ts_V=7|Lyn=OmVz)QfpOa*)Mo4Y)AUS) zOd6}Y_l{)ft>)gC<65@$3-!ovc^ZspZ?jCuqeU479`6-u-vF%%F$rj~2n?4irQ4}V zmsXCzd5g}H%->7fA1+LdonV&MixZxfE+KnKJSI&*!q$W%AzT?=hugs8K}~B=!x$4~ zG_mBBFPz5V6y&T+rbMWs9)@N;sR>pPOvQ9)#Mkgi%~=8a8I;~fv5DABre$inWv1xU_vrDJpL4fqZtZmf6Vn<_WW%#ZOA* zBaG2&4brf<;vzjn^k5 zus|g$In1P|_oPrOO`1JHl66WDf#6}4(d>I~YGAdN+434!qGymL)}g&O{z+9N8~kL? zu#M-KKVz)~r0esMbHvf+5|ZVE^poKyRZ_B3b{WRHN_X4kWZcvzr4lH+1Di_d_lwLH zhJDhJhYCz1a(Yk;)X1!R<#%!aCVZq7mVabFh;7#p`ba&f!29e%Q z>oSQ{Gb8EFq~>zvNF5KqDW(H0incH|{uvUTi0&eRS`h)2*6f?1Vd6XDi!ui5=rWtN zP}~h4(&cW#;Ko<(tjHv=}%@G5vUFtK|uw#>*zQWDi7;Few zHkT|@9^uiKoNuV*3x^%15kV%IEE zh(wjPO{NMtguiBvFfp=(1WT*Y^m_TOqA6ylp4~YSN4}v~-n$N=NA;2U^w_OcFDdPY zvYZ0wWilcSk*C2#nK_$CB71qWSTs6swK7AICt>rcESMeam~VjKrj2J*Wu#|Xtcmd9 zWq3-W6dv9g+w2r$AgGr+r7Y?LImsi@p7Xd1eoF{nbXWhO5od?Y{p(l|!$3(Q>I zi$maUQTK_09fNJ|32l1`Aq9meK_esB207~w6QV&{^9`MBtxceU9__V!56RM)*knJd zO{)(9RDGUiw)-?9a;XZ_8tNMeI35=eVGfee6=(M<^voxc&W>W2Js%4uV z*OX)FkR{^s;YAcGBRR@P)NI2Zfa@H*mkai$z!@XJZA`6A7_J$u&B~kC0)2#ERG;`F zBBvIe*oD$4wNS^7F?tW%!r)~mTXqVp@eNVSRSmad>hT5QH8Wv3N@VgZzgezikYtBdpHQ zPB)Y3Jg#ecwm=J~4%R8{9L|8lif22+ zgMEq~qAxJ>_s2Qi2U3pAj?jm8;*{pR`6}{eDsq`JKO!EU^GYF2TT7e`l51aUbq92q%wr zOP;PZag0X?QKz}QYBXG1N22fvhNYN=updNux*3~Sd1Cs=rZ%uFFMw(i-{-+@?__f1 z;fGqU23l5TWagW%<;r5}QSBN!$}rHQ>M-sq$5vBsKXlErF%^sI>Kvz1DaI9;kjd*& z1>scP^wlFWdt9HW?y3Gpp)M+N>ulz%J*}9qg5-`< zskuThYrgb8Vxr69Dn83;dmoqOe&|1r{{|Ywn&K7%%Tj=)ZQ+z<;2$=uJVtAcnz?Fa z@qMMZDHB!f=+DO@4l;_JZ%=2uKVzfyH`Y+h$YaK`_h(osjvdvd8IM-b&e;?p&wEn8 zC>K@r)*Q2|c_Nb|aVCm-?A|WDftX#u#pIKuboJG=m|N|TmDOOK$vqjaZUN=Jftd0G zk1$JfLnGmKrR}7;DxkMbVsL+f;RKe?bb9U;#loW`G~;4zPd%v^G7dAfF2~BMa!CxU zy#`9BE3NG(zQEMfiHo;aq-G?D(8s%Em>7rN?S8BmF_&x42(TNQLikk2 z`aFQ(r87b0UpB9ZFOem>rWJ=cq|t(Vc6J33YBEXir67`k#% zF-?6-)T@aogn0E23l29(fz*0)Z`f|yHEe!?$rzzH*hn=nKjZuwVVx1qDz5~IQY@?C z&OC`^-uhcKn}BO1!y4gH#VzkohlcbBgJzL7)ccphL({0xY)&`F!1f^EK-?VJsI6(2 z)^=>-Hnt6^FEF>uog;vKxPv+u&M@-^t-gauKo$brh*5l}mpm)W>{Ubrv6Fb(XfzuY zoq4T@Oi>D>(@YUmUIG+g1Fx-mzUZW;Y9_TkNx1tK32~IoJeAb}b(zTK_NJz%h8HI) z+4(Xrdq#vnjB3q@=>2J#1afeixwKwS(DRjI)rvXI!>(FA*^6G@#EQ&&mvG~QlP)rs!iqu_Jex0I*~|Xvy zeNX+B8^90Nr-Kp zIz!Z^$9ExT(-+&CmoX+4BC3rquM26WUl*K?^K!&`IkCu4&y5qOZN7=%X{)Aj37%w5 zYe>l2o(H3qihSq`562KP6d(9JB_(o*(^Fd}xC~Yl7=?VnXxx{n zlPTcFfs63Vi-cQS^}~-RweawYs6?NapWS{dnNKb;>a)>o`?B`kjFMRavjTQL!as=r zJLmzoiSBjkYuq@07@kZR1Me@y-^fbmh=F8bDj{2WNn=>)&)cWcpL>gAd#$E0n+_ib zq!VZ6w~CBDjZK2}p{-@wu29=_Jp-^|f)$RaB9<7mDydJ^5EGbw-X2#4mZXi)aFW#@ zD&&TpK|J$FV#wXIYuLu;ZB!_%joj5rjZ&C5X>Y0u>S<;b3#ZtJWhPQpVQ!c0xpxMM z^dm`S*9 z-YjwXF7?mGMmtd+2W3PYqj@$aqPBzYpNY5IbIr5}v@()sE=Bd@)$4dqS?-?L%QEsc(f*t(LGZ@2`qvMJAxH4o+43~eA9$B66lKz>U#s}$y~ zb-c^kxFlB9t@R{{Rn+}h7Kim-83=9JfZ9VNZgQBt)5erSh6ghxQV^nrI?`BXPx=}< zY;sM4Y86T@W<6D$o$z!Kv5CP!k#ejty{Yor6YD-!gB70X-ZWz$8HE_x;|ROJ07cVV zv6)B_FIJ4e!~#vbEiQ=uwB*Jr%kn)8JVjJd4b^)Z7BeYAbqfzVMCxiD5ii9}yMr@G zbV$i!MMU_eRIEax9IsIyDexB*>Gv*VjV6_|vtJ|u!t@F|%1hmg+4`vF+suR;R-?Ao zebol_p1BEvWqIAzYWH8@LL6w*PoS}szJpRz-oCVeVq+8O3| zqoBNQw2-3b0jYQfi1g#gHaxGEDlXHNU_NyHBzC21@P+_*bY(Y80VXlBSIp znoS@XOohHI$cy+GeV%=LYiKlZ{_&f+3v~A{FmLT5z{k_hZVU@>UA!@}9(apUytfk1 zeSta$!{^UT#FWz^po%pSl8%{jI*uCTdu$3;mb_FghSzE#GHviFH`2VRAXi178_XIyT zOVGic@_I^ly8(g7Ajp~BjnShF!HVwax%6(AuqthGy}mJ<(R#VFo)M=p+r-{Srq1i(p}Pp&m~4H zdz)V*I_o4sxmR<*3r!A1ohka<_?AXHW4QR6C!JXaN9|7AoMmSX6*Fu$P2)Dbi{wXx zoed$zgbA2gmfqHQ6AQ%;2_E^2@uEi`=;D&052C76h=e1iVu~c)i8hV7%*&RbO)+j< zVDS4t|4>LI9zutly)(0A)h9OE&%Jq?%)-w~kqgsVD}9j*;~T!fm{zT~i&jPvhJ*>2 z&kJ=`MqcK8{er~{N@&$4rLBDC*CEz6ZPi~Pn3j~PH76XKk0NQu$v;F(iKoUUnnvD* z7wV-h{Lr^-vBqAVO^B@yo4QTaFgP3(7pVav>P|L7TJSxPH*(2PTvj*MCYg74Duq*|vY?*@lK0H=|7jA+5Aa2?^Ja{2nXN9H-JY&oGXip|a zY`C>+`q0?4Lqu$_2X!-B1~=u5W#=NWB&wC2)?qWv$dbi5FC-1YvM(xdy&cw|%SoVo z!i>Uq=xAhPFAI<&6w)&9_ea=_eG2PP*kLtUjgj%HTqf2@8!Ax+7+Lb0J@og* zP0G=gzRda!?P4bNWG0g1^D2W@Z@dk+Qey8a1!vYh(rE38k+ zWP43Z>hsg7;n6Ajl*@)Wd*_+eZTqT%@DQEJZ7);QfSkeyD+rnsxT(%B-KO?9m8s19 zRPx`&*vw*mcDTR}-@uqlT(ah;gLh0MA%<{XL`E>GjWZ{KaLJ6+ebNvzr%AWhtAPv3 zfU%psJl&bcX*oSGeAp~v(0qC;R+ULD-J$zg#ccMy5Pd6^EkepilLZYQEm--F)zf_! zv8@Edm<~&Dl{!=ryV^D?Ulb(rG0;S`PTwZFIWVu=0Ct~_25rBY`T~=tW(k(51CKpd zGop!4rVKU64%k+th+nOI>|*&M#tm;I+W7i#S!>Qb-GDaSy))raSjQ^ReiM>9D8Yw|I-2K~Rj9zjd`kp%fELA7 zv_ddL9(61|U!bn)$zuwByr6_A#m}2Hb+QHl#Gm>ujfpV1m*MN$mP6=vYK@(_w$b4;wyJ}BY`@2H!YrkK~}UvXEra-AA>;U(KCKo@%{yAH(w8I*sUQGd1=CBfm(@f zeh~o%5dn0Qhw z1m|s^=|aCro4p3L_Bv^(>2M#jPZdUeS1^uPr77+r8yaRVoM`(OnBreJ9@q9Y%nfpw=TfUm3aZUdcxC&1b%JVI{85r#f37!w0rtb@_c#p{=UmARKcu?m?f5+d? zGt9D;l<@Pm4-fqe_-|(TmcX8T_$Ib_Z0_1moq?pz<>i#|3!GfaqKRoif27h=A z=tfaaP;TfImS9lkK}(6MEJJacst=bbkCP|c-%ADCMYAz<4^3IFXIVF)(xZAb-XuIA z5Bh27%5pxVypm{gbHGGVnS1Qr6s>}x;$(&qq@Z<|!UK6!x@rHhUr(x- zyuMJ3zoZ7s7{vQ+m~JN4?9@Qj+!X5o-Nfo+OVr+|DjHy4~AQF~- ziZ92O5FLM_y ztByveTOR~doz8Thy*YXxJIJ|l6j0RmCSy3rcGOONB^oh;$i9AGZOZ>CZA<$b4W4{& z97V_biWm148x!9fIG*HE5GIOYC>Er97jEY4tz{1{sb-#!LOF5uWBeuB8DtnmZm@4zadBag5gh9?CP8N@@N^lxGeS*X9{+7F;(`;mt3^j44=hR}UH|%bUXT zyn0>>9`v-m{r>39Mv11pu_(==dUXt578*?e^B}zRhH_Nk0q1x=mA_ zNH`Wi26cP#C@|cckjZ^90DTDO1dBOpq#>6zoX3%kRWaw6ij?c`m+jR@&)-a6P%@4e zTU}<*ofg5VPikh3M22Q<>UW_L;i-p!;~vdK20W!Ug4C=g47(xf#qniqUrJlc;C)I# zuFMZ~c{7~bBeeH^&8p#G8N&MoCNRGb9k2TYb*O7Z|lV@^jMpBBBC)UdhNWRTRN;K>MgoRzl z!!cmka}9E`)}+eQ8eyvOsF@Cyv- zMIqB5cc+RBa6xQ>BAmLVe2g@3?F6ssSeLM*FRLm+js^UWgf9dBzOoJc>K`cV)JUL^`QObz# zp-~N@^{|M(x;WhN21tQxL4ENN?>C}(Iaa{y>dHC{Y{i(pPAo{c>6iR9pg@|kBo+z3 z%2P2RXgsT4Y8&OubZF*`22l#l{ItE)(GtkH7C@H1Hw{KvsXnIOo8_`H3u5Osd1DUO zSxDJ>xVfD=4m+GHa*De#!u$e z*xRrGu*m!sk8ljLs^090>(UZZD{22dJZ_S8Co|iMrt1es`Qs_dg`Ayg_@p-8+v-9H zeGQ5u76{bo%Ep2Sf{|ACSP9$L^3s^cAg^`^%LVM=kfr6<5}c|DRj;xE@ng(SQ<)vJL4T#82m=PE~ z>((mPj@b^@Y@FlVNa4=f2<-1xN{XJ%v7y&zY=l+rlc$xg?Q!fa3+~&1&vL zK(Z1{hFcUD)_G=BqTp<&s42p(6mnOouqbzGE~bs3?h;aW*I}DX%-{qw=}x6nwhjN3 z&PH#;NfBX^Hxv_`@$s>%>mQZRZr^^ljTzwr$>1 zV|=2!*4ZrO-fOLa)e~7|!JZS^QF@3Sr;jzO;(hn1vO zyis|6KCDYbx7mdvS^OG&h5Za)p&C#&UmC(u z$sU|jCwxgc=X1ZPI<#PKL*&@v2-;Ahep)%VFEB+?uFG_V%fKmC4Xw5cnal?6YFl-k zOiX*6qeWfV1!D5dRCY&|*jI)nvn*gK z^Ux~np>R-!pbQZOQlwuOzb+yDUqS?W@D{0gjnI>6ofO4yJ~00AJYQxe9%?g1oPb!7 z7^~oCRr43TCQL7gzRk<(<+%p&fL)QaX-LAQd|b7AvfkY&D2}2fDnzRI>b*H5RKgdE zGP2}6ykrwyMWhzNlrFw1R4qWCAxky6o$vlH#Xfy%s#rEoznkH>x^WYe;8rPb77fVuV6c&v5Gb$ zVE&~_&!$mUU)GL8nE*^%#h6wV19*+CAPxZ4Sy7V1gM3sRcovt+^lJElz z@CtM>vTJ-IOTvR$(v`*QNx<{wZO8v&?>nQSTAqD9M~Nx|lEWZ52}sT&ISx6zO40#xkoaHDvXOJ9+90bV-zC9rSbG-L`dmrvw>%CR8V0TYw7Kf*0Y5r}@B6QzGxXq_Hn%EU!Lij75DgikZr5?V3BCDoyNorkVW zFw~*Sf&}wf7f3MF1-k8hNvXv~(=U)3aPVS9v8P#3%8MY#XAR_c0(fAs6SPZzNfyLL zn=Z)C9&%I~GnD!6ObSXB3TDrtT)vWnU87;bk8O1=;9lVEYHXVuaX?B#^KjJw@+edu zIjy89KPFP3C~R{$4Sy{s-9?- z|6|j~uE|VaHk^qz)Xy_t-?bX4Z~4j}VZJ>>yKw&0!T`5Ff3r|nlKo2=-#Pngx=%@+ zP1EgteYbO6DJI=+nNpo{#`)aL^=XvacZr|OkMuNovrxtylm|`Jg$}M z8QmMAmByy}k4A|)o`hKwC3AvRedrK1ovtM9d|}et^^`Ol1@ScR_UOFK5puncexJAA zk1#0~RXW=F$u@Hl&#bz3Y5&9l)AxsA!{eHh@z6GyHHY|2V z2V2oHFYYB%h>CI_XSNV?v2ofk%*s%mcOlBO$VFT2wFN^~p8>fB)0az8?Z_Yx6KNDz zVHexl|A>7+N$wD#BwpKlDjr}Qx&_8SL2ud?Hzf(kq}Sja!nw8l;!SHL`5TdSEccIaKS0Ohor$uIMy*TN3&V#BuR@g`S(Yue&{M|Z=OPx& zgcS1;=fS%TIfo5RtAQ#t$wmpUwOBI$xNvYVSPoisMDMU6+^@}HrI_t__jlxu@3ZCe z7S(#V=z47)Mg4io**gO@zgQc}kG^s^sPXC7PDPt8dx3N87*3_O{nuxQ#-KeG{AQ^< zTXSiCm|G{0l7I5|;eTz>^pB~Iin~gq+LK7W(}GM*dcr)TBe^(NWJEgtl@%38z;cD* zoV6KnkwPTvY=-=D-k!$gYj@sh}&^O~mR+b2nuHu^Sc=8F*I>VRAq3uP| zaTtd)qb_NG?pyLJ`|T~tL%vN@H+hHHO5-!Du74hK^yrS;J8A||-JQ-T0@Mq6{&^eP zUGdvGE^s#K+VqP^FQt*(RqxApzSbWFoFLK0_E8zHyt_9If7@fEUz@3^v@|q>dHveV z1*Q5biz<|Kr~0NGcXai}w!CGOOsBeFHa3}Xa7D|LuxD)=1WQ-6uw-IqoQfg=8)$I= zI#wmGfpk2sJTND=pl;lC-!RU!*RBkpxpZ@WN={{+xJ&eQIhY=tLp&95%0d~5H&gNRRlG; z*7&;#oLemp=(K-vQv4Cbb62PiGGm@ZJ_S}%&_hHNVULg|*!C+*Kzv>E@LO^bxpVl^u)ms6vV5!{wPGgE(bu{-KZPM%*5ch15#f4b zcl_?x^KBZL0$k^&xkpgKM#GrbJf@Fd%5*XD6zJeIq1`TC;9+bH4$vEIwnoCip9L&3WJ)3=U*@vX4 zPn}{Ef8orV-_07&!aKCQWIxZL(+D-g0u`ON2vP4Y`YoM}&q|+r!%;Q{BL<^zLr*8z z@dfmgQl+6hU)d-6XCCo?ENWr|tAgmCsRO6ITk;>^e-KglYmenNaES|x*;tmEucmbw zwsm-<#`G`0*VdP@I{)rnfOQz+c(rX^PuZifaED(oyuEdVD^ zup5?~-xOynN*d4!II<^qR@Xqb_Q;_?Tkkt(N>tY1GC2x`gs3AWEw1jwBmP#}mq3b| znkY3I{UFtU-6ncXly*k1MsA`?WxwNP{FM#1%m`r?yCl|1pw%-m3llYa8M1fYQOQ-vv2iVA04SJ#^wQsIZ@)S?m15T zL(TyKb)S^#M7#X%m>e}+ZNv`P?h9s#|DtD_!72Yqh-WJ;(n(X+Dg{bs-O1ue&i(LO zJ8X6!r`|4MFkCl!(KJi$kpewX{8^z?_xqBdV$;d&XdXEf5z8eB(UDydx?EC(Izw*o zpZPHbU(ML4)}q*fLP_YsH*~utxaN??zD(}5)L%V=qLu+J`0_E~*t5x&>xE!^jKD|W zI#b$_PHF65ewQoecydX0LkvQeu_{upujrL&aNf0vQ16)P3ar0be@hlC{n4hpViSF$ z(3@lL9;A+ysgsLI^uuQJ5J;0D95G_iE}z225+n>9%zBdBor`~{bh+oE_${cOjFuDz ztin>PCk;Dd2+u+L!5!3|%;|#2fH7VFA5&PD*Oo_D3iM=-#cT$Qd}=+9v8$pIq zNs!y+Zh-H@j0h&q8jtqZ=*R=6OF|-lcDVsL04$s3h+rSW7v#I%&bZ>s05W zrXrA6v9XI{>a;E-=(fiVip0;s*<%}1d6>hke#awSlHY2;Jew}nBWoGlg@xDMQab37 z5CN4a9)|Y-Y@Y4n_qc4cWKC>91OdK*hB>9NE%^y3VaN>#n?a70p@^b6ko?tbvI1%F zVds^)R2*Knkm*LVwCX_RlVcZLJQad__>xg8_!)AalsDO4d=dQsL~k$ds6gZf77nJv z-(w$Kl*S0w0ns@Tq`SDXI@U4SmFm+@R~NdD?!5kyGOsejX7aF#o-j@kn2hrl_p0Zc ze5zqq1FCVV^()VK7&eS0^Np$-tMh-`?IF$+udKQLyl_g?tY8@zyxgEFP7g zCY0vAq?-k^FM97X%(Z-~jz9E&u3#4}mNws*{cUNhW|oSW126S5A@>_wRYV!llq6BY z5)0PY;J3XhQ%->1@bM%VLflhbeWY~l|* ziy_Z3vvKS^>6oCX1(sHN9Yvi1*m{!ZalxkN^Me5Y(7tv1zdL+(yi@${M*h52IE<7v zvpdg?o0#4l&K$(_W3P|g)L!RWKh^=X^xEUEiSsEaJ^j)w5tzXo`m|_2q*xbgCm-MM z2J^oBd5h1N#nRwQ@nDX^f)Lo8h8vGGD6B=Ie+U*(FU!q|_dcjc3YB|TNN&HwV+!n|$Pa088q(HZ%OVq2RGX@B9QKGvzv1wBo8{M(Y*^fR zU*YGiD<7@B#VgZdUmcql$EOuTSlz09ElVhrCqit#vq?N}zQilN8rXR9|Ij~H(%Anx z+{p2HUQB&4df_9HlpTSJGipBO-8`lmD;PF8y6)yRj>#+SK1r&pK+PuBVfpQz%{qbI z-td=c4rtRKDIv4bNo3M@=mlnzWUfe}C3DnTBCYLLQYJ~_4l|M>E=M|xK1-(|`nfFR z+er93sKjO(*Gj&`Krn;BRLG(|b}D5>L)Wr6PgkK?_b}598E`Ep(d;bdA{%IGI1ai% z3!nGwVpa>_n}LZ-HQ;T8W4lpEk-ju8ve zI==W0?Mad+U-@1{U)xl;TMA(cPc7M$Od&z>Oa>e;`7EQ5>WUt4xr=FvX@W-kHas(t8RotCZKix;~MaBYD;S%`1!L*JmN36I8sS#pk+njE@>h-BS-d`Q-ggwhlSD-wZMr+Mz7x*Qj zex&rrAA^HjPdMf{dfAqZssWWz@;57#{q(0Wf(=f5V&0)yiTfk9S#0q%3~JCzU)eED zRza48Dd8j!-J{|xe4mTyJLEwHZ^7mkajxbKCLGP#N=3*VOK!CyK8o0^sr_yRiLV*e z1nR*IZ>;iQZ0k6z_vKp=iE4p9apZ#U*tQb3O;g*;gbD$t1~rscTnM#g)iZ0RmP43h zO|4=xXla1CvXZthT46BtFv;s}%Jczo=bVi2$ZZ`nvP7uadFq+l#RJC14X;^kQQC_z zpZfHRTwYA=R>XuWC0Bkj4=>#5G!%(+UmzJ0;%G!!YFe?P|^fg&kTjdvdAC9HwASLAI$|UA}tzX9&kTL96O1u8&E~_cGC`;9EqH-BWIXT zG7^loIHsoUD}(;4W-wSTYP)@IY?ch4Vx3i+7PLI1trs@gCW4JDAx=mclrs#_VMq4G zCueP)?dja*tCycClHxzIP0(=INOGWCp0no^frHPTzI&PPf!T<|CnraE=vuNA4IfN& z4;jw9^>E{p3W?HsiJ7%2?Vo>T)crMIpMCC^5gtX`EM;){k)`n+)uCu&7-?5M3LqYJ zzWshvlX~bv1HlW95{IGv7Un&BOl_5WXd{4zgX+*G=?ukM;1os>hjkmzUSy^z7K7mi zU~WHtxaR@7+WXl`1dydVUqiIc13MIoJeIYZG!awGQe)S+>0DMMfo6`obN7qZq=#yl zW5F_5`uOS04M$sm!U=t930DihE5A_rYAL_4P<{B!-|n zDJTU*QL~;}=MAJ$beG~E&EkxiPr6ZCnjb4JklKo3q_8N&?qMdA`d}!O_A_k&TZ~N- z?PPA@_K;JW26Iph^M~Ze12)k1%EDH=n3n_(ZIThu`t!ZGYMSJ9BY~#V@$v*yd2(ZM zIv3!7aE~3mJr|-M>Jf*pX547TQgCzlgq_!?4k-L^yhl&~zp~KdGtvHD35Ktgbvx;+ zBR?2ZN^CnPA3N#}cVGP^n_0tI42DBbxxQH1%7$zBO#b0K<$N&;7|eF;Pih`qS&u4p zVUYMb*Q75}FKM1cWARAsD0L-D4FUF_E!rhJ$vYpJ)+8AXi)|T5@09cU71?s{Q-vnJV1X2^Q=r=(Qy=1CIg9Q%J z#8QrOp==lk2cTdWoeVzpl9Z+5VCh4%dozTKH<>ixEPA`qWFZb+m;^&FSu7Ubn;?%- zHhrKxE$aGeVJ;K2c{h@=%VmOpp`v3K-Pa!D;Z5QX zAy9kOYZB$ZruiYU4N`8kTQD1x7U5kEEZtLE(PFG)hYoCCZ( z2Fb~6a4_Wt&H$CI0&}Lx;OvJ+^Or9_=?+^hCP*~eJxl3CzT`GlUzEDECeiZL4~YIe3f>$?123%Tn((~AEP&1 zwI;ewJ+$L^k`dLGi7j8)rEhg`9>30c4*JcoXNbJ=Kc#8~Kr1wAlCSSHu&CceeUW=f z2P#*7gZhs-C{&O7Pg=>~hA+M;b+kaoG9l{zc(`3W+3Nvmp2A>^-jf&-c{I|zr0bff z6uQ!Iq4Ff7RFr9Ye5Xh%@FFePdX50A4CYHZ)Ylj`u|syfLa?9=d=&4iEP$hQj28zP zqtu{jdIG-q9YtY}v}Dis@)-AGI^IWWkHklE>+;2$CbR*(pUdCoRtU^^UGzq-;w8cC z4U>CxuQk`={Q&2l+QBMX4*~A@=asBPW`~(#U50*Wmi%go*v{c}=9A*pSvb3-LBcO9 z*n~%Nhw$r*AsQ>oI%7^H8xk48*mT}fw|DnKsrjH>L0p}0w@Z@C@9K{q@_^MK&e8p&xC&V*h}T#I+Tt-9v(SDRv$H{aWSr$8 z|60FL>zndoA^9rbWtH-4^Yd|T;(Qp@W$Pm7qH(8=O^EJ?SuG`i195yBx$In>7Gf+v z5)SL%l>Ah_A}?3@0_n|S`i`ZbY%Q?qPO;TfN-V2qG?VnJQz&5QE=$K|DpD>yqsoh- zK|C5ymmll+;hc9iK_-@Mo-X`7@z}dJWP#kC0vL`vxwQH`+FwJnA^P#yJ|()t->&;p z+9$6m$XsKvtE!eE`d+@P50 zL_f4+l--^E8(IN4lw}G)V`xwKY70G74*vz*)F#eMVvp*!!X9Uv3|-;}EWOW`&~7Z# z=`oouu<4w8U?s=sb~dQCS8mnYPCK-7W&3=3Zk{$T>E<)+8039)Try}jB)J#AjPj{! zL_7C>=BW`>}R}Dwhf2#W$Pt~;;v>@vO4J>(V)?#AXz91QJHsG|I5XV`#{>8CpUR7kIOnTb2 zYa@k@T*KHW1f^rEI2i?Gj?4bMwcT+b&j>f{Blz)2KC&uZ@j%z*)u*|=^W6eyZeaaG zxuCv*;ji_Q8@xM?gTNm6H}Ls9V%zV{&s*jG=%YtsS3PvrXU7}s!c=fyE1m_51~7TZ zTkL8E#V%SNOQ8&wpMT!^lXD+9W}*F~@db399BLBs8WfzXPPSKbY?;jM5SSP#v}2e? z&}AO8w+V@HRMGHjzaC`g{S_EqHjk2Dw7hGw_sR&d@oUQn%cBMMAJVI;)PMuD@9VRH z^NtM5Iz;;8G`RCRi2*)^5Y3mcLGvpW;=2$ur9}s-=k?T3opYIGGI}M+B`&tjI*-`# ziE0`DBRCCNjt!*eMbwKZmBip_=C|%EYY@c{9+3LF8?7NU79ds3drT@#3#{&s>X z3{Fp`uJemifL11x4L%x4EI5Tr^F19l7yUyxm+?H%9wB*j+wBDO8iprw94ltZPYG*@ zgFip|{K%mrP8vp|#b^^OvtG)Oej~Lg7zinQ?W$#nJYc*?iOMPHU8rT_bF|AQ_?Dfo zeSd@9hnYoyyJr|O6qps9^l%^YseNwwzOzhRui=N2kZH$`{64FcL$U_x_1lSS+}zbc zrLwJfuZp_)D~jqBc)z3hy_!Brf5H`KP5WaK{CaWw@4kd{OH}Jmg;8 zy9l^C^6FcyQ8zAE2)eR$MxxpksUk-m2VD(_ao}3Bor@J8<)HWKUe^_y8%VbE7T~)Z zh}4DD7k38X_f%CES3YNUYvSoCr{_#I8xe6eNOC3pMfV>R7=3s?7WjObCD5+3{>efX zPFD=wv$U4ufxXX=%D+-<`ivIuS)RNmWA>9Z{UIKX&+oL~-8O+P@UkxXzMpGTKhh!n zd`Anlq~_HceSWc&WyCJqSijc4sQC~>T@8;AsDyW4OQr zkWBKJ2?bknLp<{ctRsvYv-=qZKX?*zQcW(g8PB8{&p2V)&gd;ovp`$v0rs}Iw%OOj z(;s<<(}h>FpmVee{h1Wf^SJRa_%fq{9heY9qm3B+9b5-B27)`)CEF*|WF4wqC&4G| z{iAb%JFY;~#os1Dgm)q;zJ}cUGkHPdiWCeTA$fY>)bg2Y1FgB^W7A=4v;rz>#`9OZ*;S(9dl8?% zFsYRb>*s58iVA6*#{b82KuKh6PT8F%Q}1Of2B_%pf>yysdR%7BHQaVm&g66Tn*LE5 zWvKBc+n;N6hZuV*Y>Yv3;c*UQ$Z!u#rvl|8QP$71k6k1B$sX43PIV-9HCW_dEkxsc z-rfZ|-weyPGLfa!VQ!h!mGHx>i|3trtHM-tTfw9Exk-u= zaa)6GUEz3I&I#{W%{wckwV56-HU)i~wzMFbeA?9W&xZBouJXdqTLZQ0;5Zyn@y#-?qwqbLb+xqBhijRhK301xKeX897B<6$3 z(5JD&-K1)mOd2pw6@C=c3Qb2e3Q^*pb!1$4Pt{yj2m(M`2(X8JCl(s9dNVYCIY%>J zxU-G-8`UA*BrPPt>TziN;u?So`%?( z7^f&W*RD!eOaJD6SGi*Z1o3Km4LzqiI|F88!0jFI?9Qd~)EilJf9Et~q64M-fqk3{ zoIh9J_uo%zlK7<5Fe(+A#|lJ>0NfTz`(P_cU3mNis^9tGf?lfS+FsGsK+tGGiNYB& z$I#tIV2~^!4f)|6C_{FX>)W~Vwx&D)^|}kFpshYhE)p4`eZOuIf{28~KXeQOuYvF|xCs_EorhxR;Fc#u3NsG{eo4R&50`Ps zE+kF>N;OxtXMv}vq^C@la=)H={3?N6CHnB%We4>@r{{k4pPw9=z_|Q`a^!3+SyD2S zGM43fzY*ptBLt&V-9zK&9DVp!4W{@-kb!2RN5dFdDWL zu8t(7>L?+v<0$aOpqW=gT5OmfD>tS&ajdL3TSUiYujLQ*xx|p1Xi(KBfG@zuBOs;b zM#?rCQ;`=o)TEFTs2}Dk8AsSIjpkyH);AMgPX?3o0ow9mjX5bIU)a^1lqN?pA|LMR z#w`wh>?)0`UV^IhE{M4t#kE}%#pJ%?mCM0D=P|B!Al_4#=p`|}hO(e6m64;M^sgs@ zye7hhtgAE22cq9U%`LU*aZYoQ$LI{Qp6N~1?#knY{$}s9p8oU-0ALrpq@_jfsm&Wl z-X`!Mf&JYILTCEMDf=qqzh_ElOGpbb6W(bWN1cT_*%MWkZRGT;9SW~VUVH|`3Dw26 zeQv@)l*0Ud70=ONoIqWtnM4vOPb>MwmQ(;m7DKg}He_7xHB@ zR84y~rGD4@XkYaFtE(We2k|1tnII1e%)AT0+xL&kz7qll3{1e`ApOd!1nh~KeeF5^ zHI@1Lvq4WEmUJ`~#BI(M?0$xHL7yFyZ~we?DjfF%gyCzk;dow>pfRWqylyHfZ%Le? zl)SRU5*!1!di$NT^B)%jd$YFDTi7pnO_oi(wqY`ICJUYqIZckT5!9~S*t1+_nZmM) zP=gP$6pWry1rqv$Pz?2+_%y^eNzy0FcLd6q}s&A3(q1V@n@G%$x%(1N1>>=KIe`j_{#X8lk~#Wi-eP+vn< z;n2ItF$F09XoEZzwDwdT52!lOINA$|7s)?1OqMW;_J!EKo;&*0PrrreR$B?mlITG}?iTW#5Os2L`H0 zRm3QE3t*$K%!k{E-5&yg@_oP@I4a+Gm<(OWrbTK~Cde&cW zT8l@!m(Yu};cJ4W+3Xc&k)~nCh}p4^7ZXw(t_2HXEF^8|X;I&-irr!S^xryfEX6X= zjr>QSw5Vydy07SS!~VG0;2k5_rRe1foM~WF*O~o+&GuBSFbFXO~ z$UQI*_`5*O?VhIa>cN1>f4NfU?KE8-i~k|4xElG_k6Ix>dB(ek)Z^A`dHlz!Nn9Rr zFohwVU~HJ6M}=H=g}imUiKU3d-yVT~DZ57Np9KE3clVD)qT4NxZwIfiJPK>E!e1(d zMYczXfdqbdh)j{yF6}MdB$xIUilzvLuX1oiDFhB2ocUUizZVd!qIYaxYNv{%@Dp)7 z`PQ&`J1g${pE*||FoK>x!2O7xW~TF$`UR>H0YH(x_N8XsHs(B_QqXf%pj~@+Z>_k- z#ND*(y3##LhW*sOkkUamVw+w)bH>0YPOx|UVm;J;@?S2^=2>LP6|`#FmZEEx?F7jq zWR{{O1bU{O_$k8MX&pOI7v?+H`KSUIv|%$h`Qh!<>H^*WDoRWn_Mb|DnDg&ujJU5k zgttFJIwn5WI?kY*-zp-5=b>2PU@u9vIEDohq&70XBuFx|?-Bq@j=IED%N32b97OQ^ zs|CUM0xcQhW^8btO;)s;W&OMCeSBZc3v`8P!(2L6Rc?d;%^AL4RtnCGGyQW=D)fmB z<%wI>xvN_^4dzPF;mp8vD-|+K{g=>T2^;wJkoJ~Or0Dr)dMq!tUw5Zc- zI#+tjvKV3+1yBCBQxkEQ_3=i!T}=f@v96!DVt-6H7_nOKvtO<@3a`(`fj@P>YsNs% z%J*g)tMKHu8XX9l`qCOhE4TK*qk$`a)DHM5*}pZZXxuab(1PFl6?vpY@i?PLt~Kbh zR%Sf07jZQ=O(lh6j-$JLaXpLfWkz21{c}p>|ET4jCr{bjhCiiO=|9}IJ@W>ljvY_& z*|E90i>zf1njHg&1UtN*U#7v&tV=p%b%JL`_J3Ft^+CSg|QgO^l@L>ClA_Rq*0lQawmZv4a+9L_;@Ym}^IhbDA zs{VCM?LP`{*Xp$A-A%I}vFG$+Dt_(?tWxR~gYZtARRf~K3+_j#Q++^m%=U|5zr+eG z_vdN}8U|ype$#fVcZ_t^p^Ua|MT7EkIfpE8jlM*}r!&1MGM_o9L?f8m2|IBStAH>Zy zAiN-P_&+6`%#N-FT?xMrg^q7(_PIs#*8FM?igl(WNBqCqV9$~GuM|l)@?Q+FNqy4( zCCP-Ne5+i9jh~E;uI#_B>_KW)!i%-KAvH)RNRqOSbOF5UTP3HU zVM3l{`~uYUF0x~*lH29ytvjbgKuoi1dzjxZ^x+TVR35iSX~_*7_qDBQ=`le zkv2%6hML)S>rwOi@8t^)F3{@J&lq?fLJf$Du8NN^5^++!OqFE$B z)Rq#ra|v~eB%1|;ax9_Fit?~wE+(Q5XxMw%wP&i zOL~+;6ib^Vt0>2npyeelD%@e5{Ep-M$o28K>s9R3t$NDdtIKaSz*E=dp;CTc7j!7V zw|+S!I^Vm_dhRYumeW05deBtdTsYa3@?`lUV_eSCmd1CCpxT!tbc5AA=Sy6XHM&GX z-M-s>pn2&jtD$EN?Ltwp@^xB=olw6yH!nszN%$h;^dMEugCii6Vins1lh`B6ae~@x z|8O_I>6EDjK4Aef{YAa8$TMDE+}dBfFNggNch{j50Iub2CF7$5^j zvaT~GIF0PG`YXJ5<*>Cj#>Qu>8$SyzWiwk!d{?F^Q!z(Apz|>DF4H)YV$a#pf_xvoXschE z7_T71=2dG=vDcIGoTEF)ZXIT*0az#8oYi_aZFeLKsOVX8XjK+5^INkLS0IA2+#C~m zG#x$WW2P$7YR#N_T7tsR;qLf1Ug0_SPlo0q)!8Wp zP6c2kc5ppAZDv_?_L;fGJ}!|sWnKdFRG(pU#n0N|iJ@A|cCNG;Pkc$qYALHD;)B56 zj>>;95*0?AE`JpQ-tp8=(HZkpf!XdB;?Z1lej(jdeJ|Ijd+8dDb_XlyH0-sUYgeG{ zcV_HyJ*uQ$?X3GS${Md7!6i?onsKZ~YuoYW9;TIW!AMYM@gH~~*zFR~VRkHL-kk!r zjpRSlKm8gx&=k{ZtLxSJ`T^8E7Q1=?Z-RC8*Z%SZ;|d|9apW@wUX)-(l+DGC@2Gq# zubHLxFDxtQc)gQ%r-m_-z#g~v!OiEk0ay5Ivo&PX0li`MvrZAa`K}_VyDby@b@jRx zjyE{i+iGimJK=DHNwQ5pq|r@XfC;8>pvA5jq5u^*ti3jT_4Je^S*WAf6GoT3?) z3}%j$T5Kh*KM$!F9mYEM4EA^^d8hbS4p5mNqy~AK4c^rkEs$;Eq8xpN%L5I^|+^8YJz|0P3 zT{>)5$WB_G)r)-6Ka-VW!h*67HubcGuBahIUH?~=JDR|If$Z0{Z0WN&bf9ikWl{KsBA;CN%hxJ={B#=J9vY*_9wvFj+)im0LNc(`uERV5Ku z)vi5wyKmP)kr7=q4VpLmkifP9!yY9*^y9_qg)J(Gz?f+N)jnP@dt&sQ>JIC(8Bqcd z1iQ}t_Jijx-g7l-bv|le7bDsU+yr1Bdo_yRxz0=Gp0_hcQZSI}hb{Fl#kI^d%jXVS zk&^7LX=o23(dF{mYS`24Yl6P%`Wb5DWb!i3;#4Ea57U$;&xQbzGP$tQYk-st3oRk8 zR9D7f>#w;AM&{{RogEz;5Z@^S&qoqy!Yi=h?!9TF?##S2IMFUf$Gm`X`Q|a4tGOUy z#t6$MNo$?ri`?CeJtki}$lk4ezD#XL9{36Q8GVvd*Z=n_wZ7zPA={K49~~(j6)L@{ z90VSx7HyMqELjJ~?3iV1#zn@yXq^skzg$77yuYu%|H%FI_+JO>`{jC}v(Fa}vz+{Y z{0~715;&R;%%DK@haBy!qKzkg|DXP^?tz}ClSd^x{zK3QMrUc(r;TvHtjuc(d9=VU zKRC(YP@kPVz=EWE07PcPnY7)FikIMv-tXwtj2sf4shGM9xsHl_Rf#{;HbnwKC`!K~ zro*!`uSSYdBF6Qyds0a-_HSdW)%$_0jq=P8P9bh+^t|^|w=!0xQ5aX>xuF=#;!orxskyd5i;T@q%6VhfY7!m`A60{_e5ErAx%kE~E;qg{N z@4n11W4EgV)0ak{Wjgjj52TEnfo_`^tEQtOrKQGiKOy z5OP?rear!)zZaLBM^8Yfso=;=OQvaR*cQLP?EEuS@(8?&uHMOX;)RO_5NQ>a`4fuvmE0w(+v^G!xtDvfO-VHlek zQ8>#Gb6dP1qv{)nGYNXqTa)*KisP)*W3b!eb%~3H{U)nE<0O9zF8;g;5rtaDX-0gT zxQ7iPXaZWCpl}ir>Pjgvq%1O%B{66s03Pu-_iLYMn&Eb77YC88MW(@nR}f~o<|)t0 zwh8wJt20Y1Ptn9y;OfsdWJI}JymRuHdU~jGFF;;bce~6!!6;=-G#cGBKEm8>nv+yVQpT<2V=(Ac4?v%GdV}1OUH^lC1{>M+2gh zUq`Dpa=wgJ&AU);v>Wy9U#rofypxTj9A>aI?Y)YA;AR>?}o_fCNPy(r^0!QTJu14 z3CZ!VoNdY@yyO}F+*Gh88yAeP_KRM_lX<5|0{j2;e{m0d*zT6dQE9HUcRu4XoU=Z| zIky9)#}X$GkOI_>`(LIlje}`_YG;+q{N+yT;Q; zJ$aX*b^~fOZWXD{8juyu-T7S^@JJO%j=`flG-QvIp;K@hI3)L?aHH?R{J5?F6RXKMS(&2O?&%+{9$1&5Cipv)FVGf%@3t0ymC zf0{V_(>BeZA}oe>M5-kmSAi|};nO9(2)r6P+xT$$2l(sBV-2K;wVKW6h?Em`?Qn|8 zUJCK_(P%$;J#W$8?z_4tIS6J(Ot~qrDUD{ODV>$Dt-2*k*U!T%9Y_-MIOEAUu#r` zb4SKO*f<_O7E$`R$Mt3M8M4H=LvxxcP%PgBCzjj4>R%Nahp%zT>j2m#I~iAS)6ZL~ zSEq~ZUCvnYH8sR6*|6<{z_Kk2u*HXJcSYVz`knz{upNOT7C4d=**OHvVP-)JMYcgx zL;aMx3D&I{Q;^PG%@H-S+%VeqrZ8vw`Rm*_#|~<#<4pmb}95P!3DBs$q=sbAgl? zr`d0u`3iUv`TX4tCUF5!q%zI3+|2wjgXYcD4LnYTp^9-8v-^^&BuILuOcs;J5}NJI z%o?f{v$^YF$pL80Uxl~_EZL&g25~gmsw5-yn3k zd@@(QI8xkPEnj9APgmT0f>+#=L=JX|XX!kCl1h7OpXsKEz`7TFh__N_o*1Z5IicvL zB=;rzVy^OoKB?LY0p z^}FQt_JU^$_N*h;s#L3V%Z!-cS7{B@JXid?C%H3$rcjp%rdz{$r;FD~Scy%A(s^QQ z>BZLxg!%l!j_iG5VR3IQk4-1(k?mL-MFY%<@xqQH%l%GsYrAf*qVV#|89AQS`e#Y@ z8eRGcLwqf+^Ba+|HU7SJa^q$*oMd{`-;=ku87>bK5lvKz!O*H|>Y_Gt>w80XIF7xE zk0T$Jv$@5FvDwNAXS*exO!iVkCZpB*ZS%I45=2|g!Ezw4ktGEmp*JRSU!%qdnc)k) zO>$FJet36fWR6l~BRM#*@TlFY%-sh|V9#gv)#4Jif%oufr%hZ)qxSBjjl*mG-gFc? zq*2L%hJ>T#HXsAV^i>*pH_r{}EoT-@JXRN}P>CiJliI?;KV0OHcb#TlD%SphV|l+F zh>>Ou=&n*t3#d#Loww0#t+#L#_32|V99F>RLppd?6xT3((+=I_C0I1n$60>CfYAIv zYi@wA{D3-6skN1=)xOgWqQahH+dyh}+-`LrG%E1EBUP}a$cml%mX2)>ha&YLuEX%p zTR1Y~SL*Pn#&xXL5akHr`f(_RO3rqW!it2p=m9hL| zE9{oV&NxO;4>hA@5Yys}JEgoFiFe>kEtQS5Ff$?-&S=8>7eueTVwc#eEV5Fc-a<;h zFX_uyHXXJSGkI+yzBU_Y*;zd0u>C|xiM3ZTT(8hiYjiTiURRb*-i-w`ZR)JjTxZz( zSmT(>)>}wi=4ht$jI@sz#8EgTA2=U9T8Y>vNkG>AS(Fw?<;+Ibw_rgpY%Y+Yd+@Lg zK3*IamFumE@P|H*qloSF4vznfj;v&0azsRPl=dZOpEd=2XmODg2ykkuTA~}LPr;P* zhuo|W+!a&U*j`aHGwB+Ke*mXyAKaz?vngnWgAXRDzelGo2D`25tIJKj@p_Drq9RuaoJ+rUX*BR;_X;$B%e0L&VF37Q$zo-V>_T8x+?|@Xd$+S#6R`u2Q>sMH~ zQzA^ms%&iX^OoTX5Uay+)63h3_*rE}YZ zuCj~qr93c1pKWnNrzNN9^?*v!|wxZzsxN3HRacFZ_arS0+1Z{XTpEq5sV!Z5qJCoc?WC~&!Phkdw zt(lUaXg&V(y@lcS1Xg0aZwZCj)y9f0QmJE0gj33TTl#!8x?hSGjh_tR7TIC8Z&o|V zGh1~Wu|I6o<#bS-x+u>E$r_rAAF#FBNr?!*Uv>KCNc6h;anaDYmF)^UxxNCwuU%jB zM~_R#BH^Kl7~FoQ(LFl$(Hvaghpv)2G#k5b!PAeoKP06lKskrW1K64qts&eGV42$^ z-8Sv&>p{G>3G8G&g`Y?6EK{cnvE|vCGB9?cBB2?G;()K)2nsyR!>{w{04Peb(&bS({ z{T$eygfwi$p!0P*V(;25N{~{-eJa(NtowIo7m{(-<;>Aa4jw$oNngp1d=X|M1_NG= zr6@5#meN|iVD;zcW+}?y*RsRO9|sTNZb3X8hpsn|2#+&8-fXO^PM$>`Up~T3ZDyc8 zPqD#cS+z5{Bv(l}T0TxH@q~M%817 z(T*SA!N!lVXXy`Dkd#*eE5MvPdOF)+E(tI3r@8bQK$GDMs(moHTV0izKntS&zlu?m;N3 zO(o?@N>Im(QLoRrSuvg$NE?HMA}zeQy$*9yKnq_Y61KjNgbpPu;Eo91uIw5`gv84~ zoWb2ABCdrcRn{VMpR^`z4IhpEFZSL7Dz0p46pj#q;K4O$aCb{^ZKR=b2@Z`zqX{Iz z9lCK34vhtO5`w$CB)CIDkR&9Tzmd+&y?185?|Wk#5V4e^!Q9zQQk^YxVQbA7*YCt z;WsA(^_Fc7e(lmR_o%1BV*N}BMmZT!@k;nB*

jjb%fPJLeka_GF56u4f83H*$ac1#f$;Fqt9Q;rUSzZ;5G z$IlFHL5`0Tw*ohe79hb!lEL7LL_ka-@NpoQ?iE|iP_v#KF;}h8g8H-~LagN;W4n20 zEc#3O=bt!p&Bs;n`|r6It_Xw(rB&WtI7LSafhDOndBs*5SlXG(6|!|Dx|ALwL; z=Ut&mW+m~hDKsMc_yGyIY8Rji*^%I5j10WOlc?g<97QWMSmx8sCTf);WUyUR2Zp6s z-+Kc<$Py&YWsT%yt?^J2)QKGCUnqa+7IFEYWO1prE9{doY#GmN6yhq~k1^{t7%10W z#DUNx*zX^c)X?z#=zVS5FsQ@tC9|hym0}Uh4Y(c5iyv%az5}R|V2v@?wDR+-vyG|x zqR#3yW``YSHUWwT^qPg5CA(U^Ooc-Usf?$wgJo*%kK3du2)l`s@3<^&mMqgtb zcP}+4kJ{i(gl=caqUJ!cm8<3uY%CI;2kzw9j@&l$1SicEG&+};CRq7hJ_03b_c!a= z9)XQFJ;_}KpAW8CpE@&+S#N!%^z!m_X5aRd-|^J(ZVEfngWLkKGJ_Nq$a}`&gH~Dg zgmapkTX=>p-D>4~Aripx8qKJuy$R|&+57r2K2sQbsR(Z(h4UDz!k^ddCRe?;6i*J- zrw$p%EVt?jk1_G#f#PU%%4_)4w7|niV`+z4wpM%VoX8-nfLv|;@=>Y-#W;4YUomM* z_A{h=Z$ajNq!%@RJa)SLXFpfv`_MTg4mgS_7RYF&aJe#`EC@N$PoS*kI1-6$NPV;6 zTy<8Dw^eoYJ*rt$1i@M)2XSJ$Wl{PYYr`GBC7>*I-J=jx2_T>uJ;m)CgtI`R-@u-B zsP`OC-r4|PjzIrR&cXoJ*D$%%1NDr?JDA=%Xt(S z==%(2`&ya<1*2M6`WvWn$|FF}gPQFD==>|e%_NI5=jd+Tib_y$m1)Wd&wZ&zzMyie zLRmgV!b}U7$vsjxU*H6DC!;Q%FvV*CWa2gyER9n-rH>5MjabuOZWQWfJsfe?_Ze>+ zJ`~zU80*t|$M#p%^Tx&w#g2{+SB=IGVYc@p$UT7fkVSlr6k;X-IS}Il?to+L3vXY+O;^+sxcCJ;{t7gS1KMv#gD*f0Xo)N`6!H_p zmFzA?-=f$iDU2=Ul_P5r&*H*8VB3g}|I7f~j^Yw?sh&VI}h(i%sVZJyh&qceQt@`2&Xu8vQH3axM+O$3f*UwSwY7PDM}Qv$S&GjqZr3J#Z&p{6#jkO~jBg|3ueH9XkE z?QfSdq2@%?L#t#>=|ezd2s|1boZw)$5vt%|WM|+Wa1vP#D#6VLR>``bBb~P~jIit? z2dQNVN087(oOcMw(}0MM)S32hyIp0%qt&U8VvO&O3br7qV-LVVd6!(;r5>5Qgyrag z(KsJxld=kLrMDX&VTp5TgD?$NG5hSS26bY50|8xrs@5z*C zvGwv3lIC_FUMp;=MmQddy|hQsNb}~_++Q&d{XS8rp;=5xM?0(`^^Hw2YBte}Le@ap z=yScU_k1X1*2_nBF+E*B>{~hz;h$*aK>M;*m%~=KCezCBEPj)PmsD@a0&^lu35o|% zNU6X0t~T+xqBT>*)fuk`tA`Q<^e|LXx#X^4S!P9%Mi~yoNno58jaeo z_Se%ZS9GNH?IT#N>dL7i!@UQ$?%~G;x5~K1#3jq2#|treqfH2&XSqm(I57GHdks*x>q1_m&xNziTuVPP%>apo7(9 zFJGDsbd1&Px7yaH9O8?N4aO7j5}ZPwknVM>axTe zcuOR9pcO!8Wp-4C#2n*UXRgCl@6pxyyu*K)2_0-ADl*atR3<LC8B5ySN9ynLG4P4OO#Ufo#wj%zwH&p*RB#KKW@69yN5cqMJEOp#D&)78FLH=a(&d zt}TELZ^4yEa{`@j3SLirG5Aext0W;>!h?DE*4i03G={spPN%B~L(VV{Ji zgJA_SKwM_FO&m+FYzs&0iui962v4EEd_%Up2_R%UB%zy$5)T)H$jBLVZF5-(!0-eC zlInHmZ_QaU3V1;hLlVGXG#3$IWK({yGK8fnrLTV1-LjffwSpRoS)%<8r%R@xp0^}{%wu{Cs!bT@Ad_tOfbG%DpV%Eie&iMOP$}%?*e>t^Ex&1 zBCqgnHmKWQ6>sm|1gq;^VZA(N4AoV4M~;cj^SQ#hF_)_#D(bvHOp&7$gKM(@#QxYV z$!!?heHm}@{L@mw7Fl_!V2zA$Xg^G+V7`A9?vPc%`Z5*G`TMa^p{RyOuVN}tn2d9J zDYB!fL$mpSde}=u^zoqJZ4E@mzVj94U#Zt;lcnvGd7Y5MgN0Xyu|4UY!M)XUdInpY zE0=e5&47zn(Jf3Mix;$jsi5u@+pz@1)qL=2poGZqKI!~ves>l=SFcx#Mni_d-)o_D zJNO)ynNpMb;rHrBt23=d`I_Gy-bN+j%Y3fZ+t&v@{RcI>=xPJ$7*6d?NY}NeB#K2- zn$P}qzC>(aTF6fH(bLeMm|Nw!B8`iEH758m-b`fyQyQV2^>Qai5)puTMEL5_#=?As zdTq+j2F=csf=%H&JDFB;1a6-hG_YLt4Gz4XAr5M5G^8Hj8Ss!R_Ho%(&7^c?^3+p^ zCTc`wtyMF834Y+0N2*Y4>LR|w)^uH6-)PYr{vo2GJ)%ea| zDK*DF=0=D$)~y5@$bwQGObu~X(io^3cCNd}o>{@Xq{@~gMdV@6i(1S{yQqjd5l*0= z+obcc+nNtl&JVaGsBZ$ag5~6V*6Y7#J|b@LA$(EP949~6gI*rb)rqi~z9Gfka1&-R z4=<0$+&NA%JuhiJ_v1H;Iq&boOuIzv!&vZf)9xcSWf66)E;smIe}O1#R0{f;M9PG~ z1Z}MK4V7^!dNDH4&mcMj7JPgz@XZ75-(1r0+BvL1kVMd+-8t4dC-Azcr0^=_()rWE zjMy4+R;f%!SeD2Ri?yDszX~^NZq*DO>fj5vPt_NdGdXT!~@PH;BSANR7IhY#9>d zp*;dgQ97;V8H%C?+1+2Y^VnP(p?{+Y@a=kU$LBB2yTow~=@%)L*u2ff9)kYGBO`8@ zaR&2FJ&!1};ralExtcvEAySxFmk6rIsV|Yfr+rW5GYLgpAw&x&q(rkpF@mlWwj%E& zmYf@52K(Z5Rd-yYJLX9sLH6df*q}|o&*&6X9E{39Zgsypm#81%|hpr6~lTBfRppJ zLzsOCQ7Xp9!js`KfJ@18>O5fIka8hJP@_9M`X!m;cpj@y!{2DsGy%M{I0_-yWNUJ; z&zSbLA3tJznB_j25BnCWX{aC7pkL_K@+&Z_5#$@%(a*~ERa|BY>Q9YReP-~bqbJWF zXxh2cxf98$t?;CZ#h@Fp#?H*b6$8O?a<{D-mEHk$G|Q|Fssh}K+9?^#bUInwSx_ny zPz4-4iB=c8$*M)&a8U7wZlXTax&0OVDO&{cbry5J*7G>z=B+d-jKv%EGC7IHVR z!Zj+yj)+W8iL5x3);{_#j0Z~tK0tM2$pe)~bih#(KIq=r)VU#{c^{)V6IB8~VMf>~ zo-Q{R+uc5(hgB}Pk&CI21Ci48>DU_LNH*dxKEesKpTO{)PS3TEy zN-rQH#*)@pwp1-e=#=@xwRwGq53itp;Cw(v9%3c$Tyd^*t|?CZN&p8x%OBRtKhF5Z z@W{sWV2o72G@@>ZJgH!id9$u^OOih>>pLa)re~dKq90Q# z#4%U!)tG3`4~ME*&1E7|)14ntRxCV*XGav0`%o3Ef(1z;WwN1ct~;c-u3O~DhG>_% z{&8nsvItokwQhK%8@F%-`FZiYElAe!s^h#CxZzatYt+#`XJ~fB!Z{|W%|A4Ni~&sE z>6Q(3omW)VQw6`{9e2i4>s*nn>-f$MLKKETltt`0~ippo-}*B%V!VK#$sQ^Yu)2+^SN{ zWJO786G&4!ItqhKNaJv;xREDYFE2(@fjjnz&fR6A!9{M1;xL&K4Uonv2~*>i1+Rx+ zC%BtOOTpb0?QnJw!QTE zkEKw?Hv>%zOiIhoU8y#TG!CF=VDCU2x`C85aI%HLV=PFW3Hw*a&1Q}dMp z{>B#KNs{hY2E9Lj`QmRO&MrQj8Tvb2sO@=s>*x`9k*ZlM_sTw(+1q$5FG_u{Wo2ng zN43f#Iw^)$$s0=Ke(x5-YDf0#cl~d4pDF<@dA<<)SV2e%-Sn%HEdfLdmVNcSp)rI} zQ&7j#S!~NWcl%dz!GC^79jvNGNT*2@n`fkkLKfnQ0v#9Ojs$Yfj4rBbGr~uIF2l%=L9O~)FZG0fs%|=vC-f))?H*7^o@r`Yjfn55v_CH z8qx#5tX-ZJuRdo@2#F7Km$yQS=r_e^{W}$LSw{0e)arXP>levsXPIr|zpwwpOqR~;8M$kgPoQC`VkIslzJseZ(KJ9!5DH}+C>eBGLEl!Ja@sPU6V z+;@t&q;xywMT5=G7Lt#39r~D&#Vu^2`e=y-Pcr{^Ht)ol0GALxvd(Vjuk!6 zeG=3Nl(ty)@nI!otuxfK0nylbi46{YI(u1@=9Ie6OTwo}iBI(^v5p+4$V2;GmDN|g zPn*6^FX?pp8bj23ey7w5qdw%ZPqHe(7Uu^Sc`C}tym|?_Psq}v%~Q4auA}l2^IleZ zn3?)8a#&D7e*U?Tm*NEAsIXrOeO|vy5b#K_M;dj>M2#?dZ;{6aMZ=&afTmWhT)y-4 zFZUpnym^xEdu2Hr2w3pNO2AaE>^*<3ny5f7<6iv9BF;U%@ow59y8L`JxQ2S9+!&Yf zl_|Md>z14Y%Lg7=`K1U60xx5mxX*XnpRCN~=f$qha+KH!#Pq#cHOl(q3-K@PQIASH z14Z@6a~GI;p(uq#PnC0^s>)Zp3f*rC)-1rbTmcM4qrfVU#6BfqjeNO%btNahl`x+B zQwlh{QI6eddR&mhiC+nIHBMEso`#A9i0t4ETYSB<)sSk|BSi@4RaD^=f9eD-mYhew zue+j~8|CWEDl$0@yv-DOcOgYS&mk7{RcBr)H@SP_VIf<+VV0$R z-V7J0ILtlY&@;?D`HlFdrnfIMu?aq-{7Ix~_KrFy7Tv6DN+@5uuz&Yr1)z5%XN5##*dqongl7NE&h#;A2J+W{A9~PB#uxtWGd+|!5T+K%SwnSd>_$YRS3AO%$ifOP0^uKlA z-IE)uJe#)g)U(V8m3m{?A1vPwc=Cs7vPO>WC*)hi@Ih^d01=$C^8vG0y_11W+{YY@ z?hmZs0oB}cGd#TA4G;CoSgEzBwrR{EBC;Z9Wd2_G)BIunIh!mc8`lacpP@s#y;yne z$+#kZw-vKGN%e_hZb^2z1Z$RndWH^Ag7iD)-sT^W=AN%X4XP@>0J3#mNx@?)D1IYd zh!lNR^B&fMCfr;t)yZn%xWe}tU78aShtOJ>Gi+0Q9s`y`>+q@|7_uBD``SQ#Vq_{P zuFFA(sd2DuKhss+i%%Y9#MgZHPTrghV#A_#UWD7mN3?{P`{#MO#XnX47C%s=;976> zgYn99i>roQ4pdawq<_c>!nZU=nKT49B?{`}a{6Jgp;vZ@6z6ep^WyQ(9-5yx5leuc zJdNPKy?^=w;A3_`yBbMs6Lj%lx;cB9_L}#R?L>aqCO4~oSUfl{f$~2LUSK*I4tw!LF(jTZ3E_~w!}y=8Q9nT*m461Pa6MUeQUU=yQZQ+ieB znB?D+P7f=j%$DWC*^>;b&T`G^U-_Wt=A#+TLXe+DU~A(j_l;g(JeZ>Dd+|9VBGD3x ztYEGMrwgWN)zcZ5$wbu`!~&j^NOdI(-ko0IG-$F@gXKu(1x1F@;Kc;Gjh4Tjwu=e% znT|41S{FUiNhQAjWAeuyifSxTPFv2mPM@tDv(D|`r0~yKco%N! z@ujQwX^yHj+Mvw7&Rq8nxvY#O*{n#~?mu7t<4P9HC26@35_ujU@;A0`g9YSA|3YbN zYz8iGWysLwHaf7ftn?waiR(cHacGhayz&yXLt|pGHYBoBOH)F8O;uYfCBa`t%f7Jx ztC_Cw%1k%duxcCYrJ&g2i9^gvFXJDSSZ!1DzWOzSh=CV65&#Ad-mC0Q=0MJ^Hq)(40oFUuM#Z6cUb5A zuQ+jYM=T(@1oUr6*Qaj&>J#CP5JmPO1e6r$yJU7D6v_B;nMjz}gI-VVSfWP~r4)m; zfUxc(xt}X$Nv8WwEvg}f?X|sEk=rAb3DLoh1BaJ6w*AtR&ZD@W1HPaemne_>6&`td z3bM%GOG=SEw|1{?f{Ttxx~`NblT`9;A@yC8{e#%^6yFZ9sz*cc09N$f-U+^=xhCB^ zhZxf}hPzK65KXT;x^ece$X_HG--04z%eScZAI)4aMMK?~XP zMJnTFy84tln*t3(WkM{2`$TNl1X2P8C3^HFibqR;;50LiQ6g(}oIwIpdsj1hh&;Q$A}D83IodgDNoEZ2<9c*?EsX6eJS>ucOzXAqj=l<<3gb zIOu`<>O6X=tC@Lf9j~ZK;xmbII|~ZZj40Jvqi)4>;+x}i`1#NK!c&r2eGSYWXLo^p#xXt3M44lds6V62F0X7pM7 z?b?IP?PYP=O{k47LCjRb&`pb7!7YiLzgokJUY-T{vv>DA<7WZFd;OXm;h(5yy&Q`g zu<2jM7tUdiKbtP(cY#P&VmbtBAcDA|rWuYDs;+$k9Th!lce!Na_mA*-1sE1!lc`NF+O~hAZ_UU zdREf?Gb7z)q?u@sSH`s;9BN7&xZJX>@qh5wOIFdTi{mYSdP&%y;akIkz!>gONJ;^a zw{UR1kdMhkch4M8L|T{)%Rx>IV|TjKn#<4faj^}`SbXxbjhP9- z^1T|+J}L7ot`TCZkRV11E~;LsNMwT_B2g17PJHlrooSCeJS{u>vcffjxsb*1qu%$B zAx{G{LZX5-C7;z*DB1AFjKz$C(BrC03Rni&nz?R~`?}O+s=4U9k+C%XsPc*dIl!K{ zg%8KRw%K}X2*zl_vQF7mP8A3V!mDI-sobqrFXg$YrBt~gYm!q|TgWm3@1fNivL-P& z3V(i?_DaR|VhMxks!5Gn6>CYQi4BY-f2(BH~Qw~$5!kw&g0%6U^>x)<<1QAw)jb`g4Z4X>?4lFr_0hy}zkRBS; zKL{2uWt+v$qaquql;k}QHDA^>Be{V4QmoWVW2~l-Zk`LkOmaaMnUeUplp2Jt5OGQ2 z>E#{MF-)ps{HAwb%K&S=Mxskh^lFqEX0K4NV(1yw>9^suiKE=nQun1|YP~RQFl#ox>PjG=?*dCFsXvsp!phxg2P`)4Ibe37*04=FG4n=n|H2xgn`uXI5H3 zG+l6c+*`GPkirka{?-nFU?MN@LJYoP(Gs2$4U3U7dSw1sefFS87zsHs?wbq%tHBQ-m^|g8}fw__gt+`rUne?#?F=^)5(mA;fw_cTKz8=pn zL-?cp<@ zkcbiWq%fDLCt1eQH~O*(Q-`h)Stepcr>%5EVBO?W2oQ&Xvi)&Ov}JSD{DT|vZ~$#Y z*1|7jb!>@8tUAA;M^n?jlX{XvZbIv+n8GPjBUq*TxZF%1FAz=g7L{+b0Zd1L5gl2h zp{+b;ep!Do00XgIVztV#^{^7`!iJIJ-3f+&=4*_CK zB;Z?_-04!2+27aRgiO>~u-)vOHd_>Lu#3Sv{vHaM`G&Cf$}+RF$u6*IG#gPbT+QGN z3wCYikV=ZFTKWUWpr@G2eK|eV^hT z`f^vL(>{as1JhwFT)9vlefAPi-8DpP72&wTe0~@3;(#$JMf@C+RdXsJ*_MH7bfv*{ zAJ;!e$8Zre_TbZG(PT4h&B)l@zJiP0!@6)R?5|6LVT0Nyo^$_Kw z9UCj1)1aL%biY`N{KZ@9)(ql|hIQz6f!X4KsLmZH=T88#LaJYT=4edn8lYZCVWe)8 z8sco1Wl|1)6A?Xy5v$vmQt?fN8*a|flY0c2x3_p7y1e0VM^UmMvPAy?ifB6g92LRV z^kYoB5hvv_OzQE40;1scQ(DOO2S4@C?6`ykn*9W(KX^W=EL_G7`{xbrrXaY^yV%TJ z3*0EB-ZzbD?VRTJHP9xq9%c+P)qEu9+NLl8#i2kyY?krj=#d7 zS(9%pDgrNW@bnMvekp&tmx~P7`j74ptN@87HB)wVO1X+DaXsQH^(3uQ%8JSvj!yaccR<|W}6Oy?g^7>q*QRs^chba7u^YEIbMzIj~u8k%9dusYKf-vqPb^3h5& zv%$`h2y>}XY95S+`Tzd+k#=_2m5ae)t;Wc*R(XPH1_H*Ekq6J9bf4D4ZaRVZu*v+WSNRL571x4a$ znmo>q6hj?o6o1Gcig;;+(la}$^x|s^PPOJXYJvb~NAgHiCwY$8@BpkoRF`EQcaJJ7 zT3&FwWQ-HtvkWs0m5*ek7syHENsUwRj|~C-p$RA>0sr*7j0mFs;V-TKk!breOcF_A z8TJImg{Sv>)uaMQ|I_*Wv`FDu*~LoD``+_;GipPDM8HaCX^F$!WUbQr0-KJ%Y1(A{ zRc?v<`@A$dLM6nP2`tRV1zio48bVv9u^1bemb9)fbdPdHl}@Ql+P@8>NMP1>TLu5z za={{+#b;>Ga41DO;0Q~E2Q~tTb0beRn!qvyKSSBCT?)Uhn{@sb;1Mpi<{I6dkX{j0!n)%-pap!jm0CP?ITG|e zybt}_jMDXVs&BEzo5b%hVP5NYq}RySwy;!uS%K$oF5WtWi8m||X{+$@z3Kn2TF@6T7yP5Z$5>JnyhF_Y z;y})yn3R_yT&;8o(t7{yG|`_pAN;Ev|Mg*+-;Du(V%zwi{ibcFr*w1(m`!&&AQN4D zP5*2*OH{a=dYR&a?Pc$?Us*LGZkk`Vo`3Q`Ugx{YozXf+;cq*?E{nHv0ZX%YsCmHN ziB~Bu?mYmrUr|RS($K(&`@J%8lMI=R;~t+Vu6TX_d8UkWXv@Kr_?;K|&5^;Y4n!Al zJnq|st=D+OKmtDN})LPD^dMkfrUB*E}=+$7yNSY7u{HNwcq*s-sVE`$4p6$ z=p8>prU~J}N&hhM7mP;I1Kk|4{dGqIyNSk;9gvsZ>A!@0XGKwpq=jei9CNw# z{Y!D1&DFqObxZ@vRZSa8!{9+{qopcLs34(3ij;P7p_~MoSa^j#JX^h_SEERltYj3< zv#Qa!l?C9bC(8we^g`g~U*!N}-p+Kux>b$j5L$!wujp~$mAD3ww3zfLys3vyV|>*) zO;aW#K#->>t{d!t-{2}wAq^~?ShJ4<<|F8r3(CbQyrB+w|L}6l2&pLle033-1UWI= zdG(cg+v^OP@`pQ~GVODTy9HYH9EIJ1$wmh{(fYaTe}?UYp7L!y3t(7A-BtVT+Nri& zHm#v`(2jX0e>NnEcKw-vPs8L)4m8TVKeHB1%8%k?i8?ygIYr3*!Q1#QagG_#aOU+{|-F>HE1~w8k7UjgT zRF=wtxjj~!UkYL!Y;9;o%#GWFAgpE{5?T%W(;9>$sIc7VcBr~`ON8ggq`OG*|$ zD;MkuY{{Zkg*%tWX4L;QrcYTj&j#67Vw#&my*)r#PVV^GM&B)scx*A@xJ^T;Cft$L zDe^L)nS}5rMCP226P+o?_M~<)U195{XC>^g&#0nmD>e-`@fkx}$z6kos!lx3?>E1Jco}bL`#YO&B0$(Hxew#UCMkw^z5vR+pDIgMv}0=d-9EO}^TQ zTxaxTgK;~z>r6`+;IL_EN!@G7pUF0r-2r=FfX1Xp9m?Kx)4&d zCEj4#jJ6vMcg6JUZtnv={0Tje05ksM(scCbGTIUHp+`nLdUR#%w2bWTsEr`FvWi*HTg26-x}S3sBZ3 z1OR-GA${NQ;@W3f+wIzCLG9tbP7zHOUGXw|ZX>4==~1Mj6ntJZ`y2M}-6z@HNX~!jueGly(xzPN= zCboNIM>SujFwGo;IQdgZ-Iz1xlmVU6rs2>o-fr9 zQI4PO`-sgY+SLc1fB4H7Vj-APnUaM_DOi{h93==-ckq{9yCumHfWL#<=2R@ z#;>Q;{Lnw=f>Fyc=W?63j$1fxOS?tF!d&h}k1JUX`XkTZuIXLGGhs*006_33EL0i) zu`Kx#TMoynXC^rQ2hURC)Y}t?U@N%#CW?XnIy}Oq!TD^H{dJw@ooLHT5Blrc+8@#d z_n>P&AI=cxhU)m=mzzj0R}jZS?Q$-iRq8WQ43~EbVZ*j#05wJcmTOHp(MrubF_)Ks zNeM*#b?xYSi_Di{hR2)#VWJ{u0dk^ViFp-&pz!JCit;M38S?I3y)F!|tV)+5b}zol zyKEA4u*4oEvpgc;m^W^xVosxCL0M5LAM5wT>i1nZjt3&tBxI`1z) zYni@HT9Ss{vw5sJ9}!TqbDy2jbf-sLTuxT-w`=K%Ria`6NU-s(=$(+OH|$T)Ter`VG{(x9{A!dGprw>({PdyKxf*2lv)}JSt8ub!a9w0X4U{n(-3; z168M&ZH=PfTFWz8^KrvToDyn#NRo`fc{wT?DP`kFKwOopt=I|6W*` z!X{$*I8N@!Pfdd;uOqXD$w%3OmHMl(}dnDRA=!(X2))gmPZ%lhHHmEVqR0gq$2`IkZ#FId~C$| z_T8ayl>^`%zdotzyFPb^V~+ffrC**+q-Scgere!;Qx}&YXWF&W@Z7Dxrz{;_*l!z< z^;R7WuF$y&D77iBZB-v9NDz%NvLbFBeObgaW6v*^BFv&3?*$L!dbogT2j>T1K(hFZ z_%P@2cyFgw-^VdMYmCjyi{u+`P+V97DXefO(+M_`ur5XzNDaT$-5~=6hXyG#wC8hF zrBC=JuY;7QQj{IEH~LdlC{*<g zwb+B8Y9{-$)=guVQk|4$@ktI2oRmkQB=;|MtHg$g|7Zx!Jt-|9=g0uaDoNFMrFfoQ3fpLFcp z*2Uv}ffo95sby#aC?A3q%AH$tvQ;S-hpv%z*U*J*=FCopsN%z1R^OXsi1vIAzubeU z7g~q?=CYxOsRq60PFp)44*VrBFC&3 zrc-x5Nus<+oy_P#b&7sRnImu`vy$0|UE25k^Q+m`$E(jCGJ#?5=I-CmtTFYTt+rK` z2V3xUd5z&v_L~IM4O8k}mlsTHod#iC*{wtQh1{Ya()C$MS94w4%#(Us>jbMy*rV5p zV`J?@U#oL2)W_J}d_<2E+oo_@tI+Ke5M4gWU)uqcno)Wk8EX)K`h|t)ROnvm8EwRT zoyr%jXJS6p_*D5XI_RI;sf|oXaH`3%J*_O*((lb5R5rQKKxbn}a)#=fH8j8#C{N#| z0nO^~8#xweN|Zl(rf${8q{r+kX{($Aw^Q7B*_OMQa%bUB_79Y1i5$Tz&aC53A(H=P1 zmr2U)_y7e%08iNj&5DQY^2Qkh0&+V$LL8)iOu)E0UnrYd&r17eJO@Y8ci5#hRkY6} zYX3kJ5}4lUoFXUwysnfP;!pOq%5pdW{RRsl1KzFY_2ucy(^Gn&)D$7v>S@7dR&Z7A z?wn-@2~jXj=XOdzXS@`=H9Ize3xNem};cLx@kbwcQf6Dw^|4 zXQfa_%}=iWt@3jJ`fLR=9L_+#@D5p76y>JQjUNk^Kp#pmovqbQ)2$)J_3~=|5^PB0 zp2qlKc#=6^(;**Gq66vAoSU4PE__eZ+F8>l2)3S6CywYAzAM9KfcdE!I3~m| zWF1lF{v8oeLR}1gFI;eJPRm!>(uU!q9mpUKWyb+PD4iOI9U7d5v!^yWZ=nJj@i-V( zw;{%3^}R>-+cgZd$P9Xaj;+15chBEt1Py)Dc?&fKgb|93`F0FdeJFR?6JXIlQ@-~< zZ$J%?CDlLkA&bLFqGq*^i#jl#FaPb@^j;t3kI4VkQnz8U%=GBBp^Sy?yNOdt*9M=v z(yH}M@=$tVU=Z++KeLV2{scBtic+ibN>urx{_*S2yT*@}RN6Lr5`cTRkN>?sj=l@t zB-1ly9vk)^Z5F+=@ZTvgvCsf$Zic-*jr;ey)_=3F=Kq+u{!Fi6snt;^>xh{xTlxrv zW{T1mXD6!l?s3-#f3KB~5~5CDs3_{bV~6Sc+?7TkP)||xxXZjpaWVX;+|Mej|6R;z z9BLC~mbyN^d-Bk-c{$R>;h0tzF_INAlHeakB1qdHNLM3BUn2Yfjy#vRz4(~5A5RH# zcz0zqeYA_9vHY&#W*WzYeOx|lPLTCSM{|ZonEq3sC)4a1RXEOp_AXTg>7?*o+rVQw z5JZU`BGI~><-^C<BXKQc!#F*i?8T{6LetnN(P;0Fa&|O4RyEmu#uWXWj8N_aht2fPCM?*;CxY{2=NA zV~^cEHPKSyD4O)iltI30kvOIi(Nt}UiQG@$ART0qUyIeD3Hg@PpR|~iJktIB53|^+hQZhOr$@eJ-g~mi`=oe3u6dX7-H1u^@MCR3+7U235C6vGy5lg2RShtgCtJvG$`$kB)?1X`(EKYL39R+2WI2nHeMWPRE6geSJrKb zz1r@Ld7ZaG1;~fa`o-^LjM&GH5!`!%u?1*t0b4f68hKXzFs|#<$U}Wo6;32>+kl?UNGq-)?gFlDNrVDhbK69e zbSg}yN6&S6xB?vNu#{%C{|Ii66HZzg7SzEdBgbl74FKUu3j4N}e4}9;T>(G0yqLPH z6QJTZGQV9z-zHq4u{ryCyY7^!H|838xbCTyMr>A6P3pdoO40YKJ=Ls9mBC?8%2&?U z^li};>zHtP`j>vv97~GD{&o$N_6F$mn!cjj6BE*z#s-2l2;8HY^m+0s;I#~mgNaX} z(omYJ_br|{(RZx}A~0)hk%gl0F9F(Ya83%aH)Yx$gIqMY+ff%wxp2etO)U9$!sg=CmQg_L*LNCdgr9wn(d zqm_wWWI^qZTuqg0`F6a5ReILDt1;#3q?H_B0{lB>d+vnZ%u*>>$L4%_(>_vhPit?O z0CB8+rSIY8U4K8FwWs>ymdnbn&+{o*adL9!DF5TZgAp9#*It7Ycfokv9A#^V^0}&G zXh}j|*@CA`v&C>l8H@0@9*jKBk3d*JQKLc`cA*Inftt;IN58b2_E9CH1}~=Wm2s22xS1} zp~VwP)kDSN{2!sw>Ob-~+#6pzKdOYz=JLxjSJ7`r6{NPRN1!c6Ge+8Qyi7lkZh!IY z*%(DC$mK8Zd7W_og#jba1K;DIA3(JML6Qo$hC%P^rC@xRMCuOzepN@+=RgsH@y z?$+3xX%>T~E*#)Den#NTugyFuQp9#c!@KE9+GxsyJJIZ2z8k zf6nA_&aT=P&<$lliz-)5V&);FVw+nKbiW}_OJRRec8e@X!lGc!r%+P%Ak6mKVNWJE zw>;=Do#t<*S)_@K8ud*1d}-vXUb~Uc$08zv#KrfkiU9Ep zM~8hS#{yn&OHOL%jQUq!<=|+_v-?EKyEa?9#(K5Nr}cv688K#rr3;f=K|TX=(@%{? z#r6laY%sNtJNTj1h>s z1@tB-ePXE%Nk72S`4M1D)NKPcowV$$=3Um-KG_thqY>^CZe6?gwFzpy7N!?(;Bfw$ zvZ<9fXA#*Xp&qd;iv2cKi!>%%m0Lmo?!T zT6`cge_l72G%VOvYO@##AtR@a%khocl3qjK9)m=yTAQ_!D_0?hw~E%**E$ zrzR4AP|s~tP${K{O%PHYUQc8UZ7dB24bO)&0o=EmArr3-R8_CfyG#2BQhWEEu0rMy zkJM~l+!b}^IMUR}z=n;pgXN2youdz~6{-Fp%`qT!C`f*9lS@oa72#>n`;N;M%U`9o zJSv7&Vu=GtU(K17Q=tT=ol|gEuG>#tU9;XS;lN8Xgc4!<_2e4dKHZ>;$0qXZ9^Iho zh=PRjPSiCV^J|ATnp(6#?_`odwv^}%vc2K8oZnmPw$;xi`pk0k_vgB9&bj%ig@l$4 zgb+PI+te?;LVnoTixpsKr|XD?vDrS-#ADU1S$m`&(sjt+_++51^s<60)U8nEkm_|&_>cQK)&b7()F+ZoiaAuzpMD!Py zO;&J^ozT0F9dHtomF3QRUX-mP&kq595I!XsS(O5l)5!8T9K=l5f$Q$JV#n=ACSWov zDH>=fqM;*2VNYG@K`19f0buX#M=3LK=@@R(W1#hE$1#62g%)}iYRfciG=MnlK2icCF$X?rYP_t+CI;zEWNz$qm@{8Jg^_-`w=QI z+sEDz7k2-4CgU{lUF&ac)+f8bb1K-IYPi2_O_J5vgd}>P?1P(2s9V!i?O<048C&Rp zTYW+w!jzIoxf5{7nD0;1MTpL4*7`Q*F~7XEBITD@3sOCt=ygZ646zEqk8oU(Vvf9d zRC>GuJ0JGyTh*FokpfaJUqf4erfKPaAxHHLGnV6pw;bT z;*F%x*>pVQu|PxvRHE9u(B_@BqY-5;s$=l=lXoar`KOwcGakVymr1R=j>C#$+pjcu zg_zpR2!*wGfTHK+xr!x8N1i6bYo__I0_{U5zvn}8I!{sW$Kx!cRxmdwC%4;TMma;D}(tX@M!HVcZ2dVxMK z(8i+i@NKLYE4>0%7opf-U`>{^JyXx5<8e;|;-gacURy=TIM5fJ*1tLafMW}WX`)J3 zY0D>|kwgzOqaL3-RzGgwrdTg8cF^p@=M4_1_~)}#b*1@oZ-f3sjP_B3(xqG$S<&YH zO(ITSSHXw=Y*$*Mu&Bc9{YLxaBg!qgaD7)PYV@Xli#YX57HH-Pju;^jJ1K`0U8!Z` zQkzyk=|bobS(UD5a$gKImAPm8HmYZ7vDGoDQDrt@o-87%C9*0{M)|~Rqi+~JUWr=A z?mcFCB)=3s&z|RA->`9c+v5+h6CQ$LimuSgvZ33bSRx`Dq4S=MELS!ghB%F?l<=@Z*R0h=`24 z*;MYGd=c&O@PK;+i)iCR7r~N@OH4P10$Pu6nm3@(n+h>GG47)aC`N_pVE^P2lG}7B z*H+0Re;o*hCYsKEB9tQhfI>>cO7)MJ)53!BJ!IekEE5D!mCRk3rG*oiO`vA<4RsK# zcMi~T$!p}aOu$d3$Ck9#N~muZo+@aaL6Hl^1BwaSmk{k}Ei7lU@I0qOnop;BQBAN7 z>YJc^CLR1ufz7eLB;{x-?ZLH}1(5wQ01KcmMCO^JqY;Hd#8=YjoR8n)BmP+4;G=-R z1T7@vMpt4P zvvzkT+VQGg7TN-y5_;oGIjpUZSTIIRV0NY=+DJ@AE!0sN&r0k)gS6FD>7*cIi=Ar& zJf-$jZ;ri$37>oICv$ge4aUHH9|8~~)0O@0ZCp|E9D`zXtC+0Jh4amJdo)#=YT&Gc znsDFw#e9*Sd5#V1(NNyZ7MX^ad`4ad(y{g%1K*wdoY?6%2V^Brr8dY5-i}ypuorXB zW4Wx~#K^+TA|YoGPs>{uU3Pc|*!@=?-3RAJy*u&=QD)5VYq@TLHoc$jaw*I-GJ{Ut zArG%YN*q^1Da`znve0?wF<;sMQTlFbfy>TAEz}CtSQ7+I?^9P#$(Jx~QcyT!$66R( zIxqf{6z#7U(Q?aP6J9S^6tH%+F=29eQ24AL$^t!}i zbkcNkNBN0lqHC{MC=QuB7$0HA$0e2!VLujrtVv=^<+Ox)v#^^V_gHI=`S&{glc>)+ zvo>URhlxep#-_$-4pDekahaj=SP8sg_WWX}f4$2_*%295nsOnmD_6}tPc+T*RnXv} zGL_t{)ULYNT6CkHmH10{(`$^IoGjx8=;s*rg%|5k!a3~ZwEZ&6epWC=EafPo0V?xx zIr+JjBEL*7HgL06)h>XQc)8STMl5EceBI7f99Wlter&6yg@2F=#hwD>=VR|e{vhpa zX4kG(FSMJeti?8S$W3^N1*w|wV~M7$uOMl#mSj|%%cNCpo!;?<=}+Hf;BHLeR4NEB<%A0)&_rLTx3yQ`7Ww8>Z5*PGNx z2;i~cze>nDnlify%xu#}5I3pbtuoWty~nKbE4!p1WtQYXWmQT{cSR4oykFSfAs(v4 zvegxo4yq(6ritOUA(JoqhKHYlZ*BH=u2ET4PX$Q7pzG$PGk1%nGE+Cx>78fQAcWDk zTjpQ(v_ORY#Flx1h)@^%xpx_=<7AMYE41XW^~!HzW0@62=HTD%iD%q80ND2JU7NXB z2y;S*jHzC>MF!OTs>N;x|Flcn(J|dVH1dNj;!yD5CdJl{h=*)e?UB#ikGP8nCmuJw zc|a{dG?X#RdaAx*Cf%NlNPXel0*$s1}VrsD3h8-k1GqSxD7iKJW(I9&xyS}rh zM0feJ^;))Squ>s&v^hI^_zk2{{@XKkDbla?me)+wuocvRVMcxmDy@H>t-H&leLZwQ zoLlbYe!srrtgZ4wZCHMH!#_8_zM_420w2a?#kXyO`?Nf*3W8Y20GEspA4V_n?9fa# zN(2&O=WN4M5{c9`_6{8p>nCfCk)NYApE*{%s1mfU%T>R^o2;F~?A276=Gr}bB$D}f zcnlS52w1|Kplats_%7o{?p%keVqF;$ZpW+?O0PgO5ch2bhm`)54v^eGP;s!_7y(JRKGOX zF(Ok{8J&SHa#AWRKLt`IN#G0a!R<#fP_k53@_5-ksE?#ENK+03fXb1I3i7d%Vy~Ch zBC}5J*I%RJBg0B#o;hLZ`x0TbHz*8y;El}qXN*2u&-*%XezE_|svN5dKJAun3S$N+ zE+}WTFBcW1`%;NEtX&Nt`o&5@xr@g1I;$D#PeJ6!;QNXi%ZvJ#dJ+x+aT>g&!UlMD z>ED^jh>b=k;s!Bf;i-(-V@sk8@ED-wi|kzuUe=$KV=#yQYYUpJje{yVdo(m?TTo_=5#iZOnM_9e1-fV=F%Ni#u@h zxpW@?Y?!SSH+Vw%Q&EvD|L=?D(;I)E%u>g?+~dtBVAVYhA{65J)uJyla=Yob>t2kW zACnuag?kY7Y|ifyz`o!FFVAB*U00a3)S82PIJyc#KHUGvf+G zP-dsplkt$qd(uh-Dg!bMkCJ7Yfx&4?(5JKB6!~mbSn@PaZ zKoWa|I0C`!85JY`YOB*`>3J%Q`4+XhzQ{Rwt`vz=2N{%@$w`$cAW$l2ZyWPKjuzb! zO1bAtQ>(xR4RuWG6OU_D8D&!&ztrQ&`%k@;U2rT&I!u`G;~IGQ_qQAxM;&gNZgZXx z-5(A*qPSF}V(Xfi`8nPAFBO)F2x1ub7va_Z@_%bdJ8p&WK(A3tRXMb?GPhq3$Jd+J zkkVsfB{K9LBcvsKEeH7~G-~An#xkyyztLHn*D%n7r7w>E@btx-8=hv34hlONBji!e zP=q;N$DF8^uxp_&zvrE&vWBG%#byp!Oqj=TD}@RakLVa+z4y~iQ; znQ6QY-6+kMD^i+`DWJbHf}Yy`a@|H{8m`hJp2LdNks(65clj~H?1Cz>g-TOo%AU-T zX`qUCQ4>y-S0UvhKc+?w(CuP3GeZFw-Q&RBlOv`P4q*K~-biEGbz3KGm<-@RpFUHo zJiuzI9#Gb(1}J?|FYp!X>SxnhZceCKNRAA}3VvDaT$NVb#n=^E>W3 zW88dKn>e((A#!8s;}fY9N_hh3qKX;c4HLFb)ZxHysK6_o7V1N)6b+4djf`Yt9^Cdr zRqwf}%5FuDK6Pi&iglZA$f4u5zjBGL8_dVXcE#gex+?BVl1BZLBhTHm^$YJ_v00nO z?R)TC9~MT-VfY4b`gca^S3Phjye#~HvC!6%WLWcdTg%#%XqeOaX=NHS5*;x5dG17m zUtX}uxV^&Q#J~1fefni~1fJ43MM3p#LY~bvUG|`6t*5mzEdIx0F4ghjEu*y#pdubH zaw1xwb)r5cZ4#Td6x({K!8^O~xq#U0_x)3=(k$NsU3(*l)rS|yBG(e7=A#TK)MDU} zPx|M6iaeUn7fE5Bq@;-n#9wTRf|ix+B3NqaA0*iW^(%r?yR^rpVqy_by)i3Ko11*j zirNFz5O`=%9=`8lZb?x7p#g&@YJ!SJMlO7%QH(2frrf}KcwpumJx{~zTcV@{ammSI zzRVk$Z5V8`*!|}?TnN3K3DGL%?^^awaENtte?(VfZn%-(1dKl-7jQ42;dJFw%b+3aYGi-d!kr0)Z=MFNuawzeBA~&Uk2r;|Xi5^xQTo#FNW`W+uxv4;s)hkG|Kb}A`i8reY3>itewk~39+Rm zT0f)|NS1nTU_VTGxI3dZ8Hhtsc0ck+a0OC_TDbq}%0GY}`6k;DqOvBXwvhCEBh(3> zle;H;)^wA39sB86by5ywDU?0hbeCWK&cOPbvCS)g3+w^B0jCPH>Iyi-=LE~~N?2lL z42}@X_*StR5Qx~{|?%gu4f6Vl3OAcI)nGMM=E`ZwWk`i0Lz{gV(PIIoyA z20a2Y1zu^$y7w9C`ynx!r11*WoUiwOvJs;_6!_R<;P(fK`i`kQ$I>JIqBXLAr}*j3 zlH(FpifT_E6!AGdgi3ICAsnt)@IM(ZSMD49(D7#+?n^7B<|}r#7-y}pBjD% zJrL+!hX(Hfu3u&4Q4Io;&#)tJ(Zkl90U|W^})A%#>E3cdHixU$fVM&sKguc1CuyA67 zxEV?<6I;vp39Wo4-;pgP4T_KKv2pd{=UC$%vN~>!oT$L&6+83j+QsEj1VM*Iq|s(6 z%SEg9*f_R4CB3yKse|6kjo;iHy;#H!pKM3(#Zn9Qp{YajLdgVARO_O)0-Up@MB4V^ z(fQHqJ;p^V(Iu`WRoNn8ge)|^n-g+%i?AXcwT{W}H|KqVR@Vxi#fZn)q1#er*T;kf zZ{3lsSCvBp7$tg|`_Noqd9AFb!!IfSjgs%?kRggJRjzD$)9uRk>@K*ik=mR^WxGR+ z+=34m_XAH!%H#P76|6sVh5hWVFl^@8#YA}m`JJnqUKaV=CFl*K-5|gm;v0LX!4f^K zlDdc7UI15>1Q8rNRUSZoG5;b<4ga1~c|xf*Sziw|Ul!9y5c_4Yait#jeO|ofY155a zup}9+<95mV^h~Mjt;9N&mHRa7cA0Dl$(mW|h)CxDfp@LJ1WT@g&k_3V2?OLl{O~I8 zFN+oaviRFy7IXY%v0=lnCxtM5<~=R>MvacLB&T9TVNDCO9#J6tAGFY&6!o7Zmgs0# zR#@n>5NWdhTh?>{asL0Nm~Vp5b6ZS|0H(|T8!#OM69W|m8TB~|(!T@KIbXfTrxr)x z(oqQriQc^B`lz~wfTWu^{l%tp8^`}erkl2_o4a(Jq6H_M@tS@82bPZf7nZ(5)x_>yhA~;>dC+E$L|6Ze zq*pvh_uPHQHiQRRpOEUSX+xIU8+H{EC&>Ur>!mpBLUiDo@(O42c9aiCFQzWunF)lx z{C2(j2Z`T5pcjXZ{4FJ)wofWj%V6~QI=Wzd4+SvE;6q>Pe59+r!=fLi83j!U2^$?o zt_N>Om5)0r2oSS7oJqU8+(y3keK995L({AxG&VklVdzk{J5N%DO}zU{Z4#}T#GxE& z1S;WVAR_3UtJCKUZF7`&Dv`j+%J;_=wPTt18&%{w4zI8vLAdCJRVD95pYN!Rt02)! z&sqLG=;Vpo*h;ctI(MwW^Rpmj+{j8o6o)@pHzFKmBcSUTyVu5@HRu{VH?}HvF?V(E zCUp=_npA3TCX1Dj^V-=j0yJMIHY|{M=?0EFix47#YzfB4LVEOB?V#XPHVvNk_7Xq- z82UCD3q0xhGV1tY_eQW{ct%XpNwToZEIij7-^ou}YaL5D%B^TG476vA2EVv!_EUcw z->9VqWYs$q;~eN)omPu5tWi?idC4Z-c1 ze6j%U0`V9FKSlelpOUB{&uw)E5*)r4qBO=OCL-9CB2huB_OCmJ$4siBE58@Plywj zqCaGB7IitHL90>_lNHon_S{NLgJnG&){m|Y>^R^y^Mcn)GV1n)_%7PQ8#(|J6~DO*b{wyHdHbrrBl1In3BFV|}=gx+OeqnQgt0W;OidIm5SvsEXnG zC(+IRP+|4Pbquk$nu=qaMb!=0nO}jZ9ctx$*EPqr3y_t7?~0DkQ|&8AAJ`_+#DPJ< zu@@1uhfb%Nx0m%TX$E)dU+3D-hNE5Ux5s|`K{^9%7(_3K`gpQqztQYc`K&5^yP$2z zI*f?0SX3AsQ^md$zlyFow3G=5oYSls$;&Ud7ISCJWKrlo>aM8O*(ezPFpgbA`0=aY z3~Fb5rOXNdAo(2Fgnm1#%5=FVVtx zcEUB8F@cJ%#T~)Oh6GizB0vtma(RL!E_PVebVW?l?MMVhfY44{{m7Lca%s{yhLwm; z_1s$abjOgLEhqE<$z$!`g}wMdO6h&7@C+i>l!1&;8un^@X2-jh zvu`rdD9%c)hnXFCGH4QsfTNKH`NYAVXc?x9VaG^+r}((BrfP%e6dl<+P0$qYk`Wd` z8Y9_m7`MH>U7}4Eduj$3v#GOQVQo5ey8cG)Uq*`pdFs>P2fQb})Jaj`&aVPUlUVw_d@b!-#F7`6g_vDjU~w1m zOW2S}g^2vF%1jle{%AR9X0;fBAx7}7A{Wb%lIbeGrPLVsY&EYeS+k++s-$i=0z(l) z^gz^Xxd7(>9>@I?)~9191*I&(Y^`8&M_^{^hASM6w_`Nh{->FKPg>9A-F!q9{SK5! zmb0OxlfTxppOi{FEvjOkI51;QWM=4)Czo4a3J@HcP4?J2M2>ZNISqqHIoV{oCZ< zBrPD>#M-@#C@iU9+EGW>U-Rn!UFPyPwX3}utC%psDBb}1!M9f)5p}lfFRvHwga512 z7;5?64gS|_miBkz2z)?L%5n4uWs}Fk?PRJ!XDZqq;xsGrM>N^n;SwMdUA+D|e`coS zr$0!eR0>(GQF_=C#mxPEUft-o6dU5m~7@5QQ1DyOW$7@FN0B5Ki$3& z{oiC7b!MuT!~Ki`p9q#KDsbEG9!K}Zm)M4om|FBeKJ~vgxv|95{!j0`lG|VYT!mW8 z5C?EhcR?r7CF1YJo^YBA9zE3A4QZh`>p20QCn6q9iXo+j7WB*YySdGYj<|v_f!nlL9+TEiu z^6>h*;14F#1P(xmauSV#vU5S) zTAf|97PzxMW*ImkVI%aqQH#l;1~mCA7e1}3cxPr-ez%1FQwuANjLjLO$w#nRl_Vpa zZ~UTe956kS#>!9X(;OdRZ0unfv|h|zQ+mdX7AYHgas-IDhsEH*>{84w^EF#Ki44nc zN^}{nj%W}I$6fpHIi)Y7LP+*-StRjrkQe1UHqFeDewphs=@EHy)4utlg_`sJ*nwt@ zqlAf8kptY_7NqNy(Pui+UL`rgnyMKr|GN_!4?;9_oRf<@WteM{26nU*CX31k6PcH3 z^bhYQx6p(xctwi|Oy9EtPN5xOI2pM8z>UfWU)0{Ry+%qIem9#*CF9^wi?#4++vH9~wYYb@Z=NqCId18F5-PSNT2B9tARedGsV4t4 zQ+^JRm=BD_p~+!bQ=2qp;JNu?to1g~Y*Md8XA)Zi2w zUmBlOPes?IwXv%N)dp^)=Cx|9`p2ea2!g(@InOvcFC@35i&ul7W{Xb@S`F3TfY>!MziuubQ4p^hB|jK#5TFo)3R4{!N(Et z@|eqw$ki>~1$uTe*Hxr?Z9>UVdxFEehdasY7V{&lnYKlIe4%rkCi%~3U(wKv#{ChK zmJL;*3mPiL!8`6BKM_;d=>|Qi)vZV3ySg`wZM758oFZYT3D%v2G>eP51%xm;y{Lr9 zlg4Dr4#7bPl_pm@&k6AJO(9JJqxn~3+poGEgiN(W#jclRt3F|Pme>*%ewsq!`_XKd zoXbZOxoQDy+(w7n?MSaCQ-*Jz_msyXtaIku77_zvITIvZ zv9a40##=F31QGV_qyvei&zYXBn*w|~1(fYR4-pf6o73P_!e<|GMSY2;gpXpcFA<2^ zMGpeWA)&$n1*lY8aVIRSos7l;ca*g#E|p`YJxp*tPXs?G5|7(d#pvDy2Kz~%_lrWP zG<*8UvDp%ft4ZU z!;}niv(z5wCKk)#9NH~M`#(Xb>RXqGR^ez%6X5eH9G#z^6mts z%8s^X<)D;x9F$j{ND>$O!mlMLqF-Ojwk;<(w5y*362QmG(P|B4j^^9US-<%QlZ3q) zJDpLUM zl>a(pYZ~kIZI!U-=B|bYJoDa#CT!X(_P9LQ#$ML>m^}yM*t+*sdo=q{!m_LfNDW+F z3;6555fUbGn&aGwg^JshujXNMVdo9BRA9P7E}xDRQ-H4tI5%^!K^=3B>gV;XbR2fa z*=+1?F(ZIKNI^WZzbS;53XDBMg=lg)lv|wqyiKnKw{#S`(^v&!FyrjVe*n8CkJ}F7 z4zpa`H*getnP;%VZvPR>g3n6)W@Ty;#$W5C!?3VpyTSXirp|bh^A1&U)96wv2#MAMox4D{ZS^Ta0~-p`>M{n^LqZDdt}Cdqj&+gIt-qDm z?j5V?xuM${I5n^wp*=es)s2#@g7_&s&=yadPVGoD#Nn($Iw;9Nb_RWXNe(=H#MQmlJ+>#o9qbPJ9NZ+Tt;j7 z40w5;WU{}0yZu2M**xaU+7OU17RPAY-C!e2-N}R9vvPdBk|Rn);ErB{Cr#^0zU1KD zJD+asxkj8LH|hs|^MoZ!DlM!sO6Y#hJc)7=u1zi#EADtuvE0W1?xF1?5(<~>hf3iW z$_-d8*q${kTnW6e(%*86A6sN|52k8A$xd9vABdH9jICrJWaA>eaVl9}i{Iwv>ltQ&$k`Hc&I2 z#ElS?GM*WLS?8`GwZmoS{N5V$elAvr>o_ze_sc+)kRH{SOL3oWs$yjTH}mHu8fo^B zpfiCB-gz`E?)g+!iWfPqA@*bHaRL|U)|8(Yo-lxjReNN_j?(y3stmkF#E&K0)*~om zT5YADsZq?5uwP>#kQZ7rTWI2U(qDPipWp>4PM|*nXh_jkw-GP;M&ZrqnPR(E1%-Z8 zuh%8fVwkoaNPh7KD9hU3@48p+uSKu44lLpqw1yjfpF4Tv#yWT5-4WMtcSOWO+8%?r zW-9KQ1D7l@g#+F#rtc5@K>{~TCp|5A!qV)k&Q5BajDAx|tSUsxyQ-RKtUp@|A})GiU}m^$Uhqzv1grP+^rV6YBg zn>fx4?+*mtI{!iH-!4I{-$qb9WZ3F)4hIh(1S5`7y=~z7slXS;fu>A#%L?@E(@+(~ z1j^8x*&H)T@qB$(bIp%c(()10vWZ3LDcQt0gg_7J@jo62JL3$e;S8wF^7)xvl#w+G zsjZ&7R$lF|G$Z1MTo5rbS>?xQDYEY0cxh6e_jpDkHd^R?G0AO-l~;+WZ?n+5RAH z)onVd{66#7P*BKzLoZUvM+#f?35wc}4RU>N?&M2hf<-M_=K|(F>)iCOHFoE503vWY zQDma1-OsB6v}>F(Gfm_ac$9j4UHx!Z>(C@_1O&YnC^Z)cJO}AmW4~(S3}BpBq7m%( zhL{^5f^KV=Tu;+Bcb-tsj$%f6)e;PNI;eB5vJ9_sDioZxQv7XX_*V0}~xrID!+FiS; zE0aGfPFsc=CLkr2q)ub6Au=dZx!h7?>9<`!d&e3}4l@e=jo z%*#iH^DjXbKk25qp<=&Qwl~&c)(+%^_b%2?;S9m+e(uo`6P-wh?Fh?|_$K^Fs;e#m zt=;+eN+U~3b71JRLJ_v_%}+2<97Wd{^d(d|@QGBw9K~s+C{k#twnY?dgL7rbP$u=c(W!;-)HmUDT%84`UQG!GZ zi_aS6dyu!D;}&{0$!Xx%5!1XhcYFk{kiQa7LNv65zb0c>2#oLYr}FT$`l-d{fhEEV zzk#+>MZvr&!CS&d-F=dC_fJsk>TIcYNxYQXAoZb_Ycg{b zqJ8g)`FWT!Xh<=hU^!=oZ>S76k*79(FneJiSFYv4B0ot>lMf+AZpwVU=(m@DkoE%? z^2c+t!H5V9@{57`OUEF@42Y}XbGeyVwd1jJpQ2f=lbNFihrQDJxWE6-b%_R#)z{B zlDd5*2=cAM%`|7W=IWWXv~E!mHMou*CT7yN43PR}qiFnB0EPH8)Ouz@RM|Sc)%x}f z4NoS6FAt{hEON0mxVDU@>kY<{r>(&QBoN7yA($=M+SiauzM?#~2gW9Ol2Ep)1|ce5 zm1l+isXVZv&`sviR&N|+Nn(8Vy^m`M=ZfJ|e9&Bm z(b}`FpZTlAzK0SPv|A|`eg**2?vtuoD+fjGsJWX|sy4&OwVK3+c_CKjZdj5(sdB>I zQ)*$>E@cNEi1~QsNS$=dC!%coAp(Niqk#c?tCh`Seu;&*(S8xAtj zuUH`6y)=?;T1c*gJo`+-Q)d=15JHV)iF&yZ!?O;LfqE4>e;TN6f zAWwcg5f~F;gK!B!)PS+_(K&xeSS%3v!LoeLeo@GIT33Q1|3R-%fTTfX0*y6FBys2JQ|D7n+6iQ7>kl^;z+?#@PSy5Xn` z!RAl99*I@!J7av_-RQSRqwAo5Fn>-9;QouQ zP7`1vZK}#3IO7~D|Dq>xgK}NubUjKB;GAY}VB6fu&DYcqO#fQ$9NnfrmRsUX5{IpM zUv36qJ!Z7Y%`7Lz>wf6{R9}r!^G>^3simuoQ*orr<6-h3->KVC-8X3$N|+~@58Uq2 zL72;#+_Q#OKG~H}K9j+JOZ3i@`os@`_>_kW=`5`l)e9hg%&UH6J0%mf5cL2(X?h}j z+`SlK2R1brbm*w;9d@AvN{pq`=y@v_TeK~lYfrY8FNoSjZdj$vY$hc;l$7@Pi z3WyI2YJ%xrx3RG+7nuP0#OfL6Z=P(rZ?LA^s@;YLWCuk(lT@{T-{o-o31!IYaagm{ zw$IVBGq25kEo0u8|5`>z8Lu*p$&U_8ovGuNai=_IeeEt=!yS}eMJ8^1Q${2HYQKv7 z(Oxn{`@+ubzVKFLJW2_I#eu`Od6m8%I45%abwhaGH^1wxcRf1?U!@ii6wVSkdPvyK za7~O~c&+ssvEw23SAX}+cIOwZr)4w@pXcinT56B<6>YvUTCZ~fLK@uo^{!!L$W954usjBG4PSkq8w>iQqV_V0fZ{X8>8rQ@8^ zQdxO^FJ-ZH9XY6QM3@v=`{Ppm!42Rn{|G6FK2X90YRG+v1 zhE#|qThArBx*A6^D4qd0SmO4Z(Wd*Z3{h<3!WohUPm5N0xiIGI69rEp*b=`x3UmA3 z$3%)3-(>ihiR@xGq>?8jU#W5ige_K)&}ZfjgoDITr>Q zi#NArSO_nMyXtY9&i250HmQ@MuNJs2Fw%q^J8)fzZ=`NPUyqCv@@Q%v%%7z*+<7gW zdx~m$u0rn*lEAe-%>q?a3}OiT_Q8g(PL{|YFXh-cI;~x;`wKz%0^GNGY3XCL42z)~ zq3)fxQDXRl(shB7;((PDVJ(!S9i_q?oo{BVM{kX&^MRdf1*4W~P2M>}WqzyuryVmI zXXPcozf)cXY-|-@l zfvy!3XoQl+!P(=qA5EQdD%U`6pH9s>Lg+rIpM@p@&8sH=0SG=rLK8#osVK1{`&aV+ zX&YoWf@j;S9Jc_+QRe{<~`s13^l4s|`c=3HgPfhlL=(wRze&;-LZASL;67wOXYVuOo@S^_4 z)Wmx>>y;bc@pY2gKfr{7gGs?Wo;k4QLRda{1_LC05S)dQ;?vTfR-R^Xti&t|g)GcZA zaxuA1+)1ir;(%?7V3l0=8o)!a%RuQeY;)G6y^7ZxHIde9Y6}*x0v_pt4B6pq=oVT!W-#lkFbBW#+3k->V&)%=I6T|E+roq(03a^DxISn;_rH z0M|BY1C$}F3~Mqs`x!$6sY~-Ayqhb#d!}WY(d(J;A;3LG5Yt|BneUmfT}65G!5EaV z*RW3GGZthK%A(W9Hbdmrvqm*v6`NTkQ13oBQ`U)dFZf=|D!)qYv ze=#4hcb zGo3gE6Gz<)$N=fZV81$X=idwuFLu{^YR9V!_fqreB zT;&8l+z0d7FzQxab(jDfZdIwP^<<_oxaFAwPw?X?tjeXW=iQ@29u3)txTP`o0>1v7 zF?9#dn(QH_v)|kpHmXV_jZ|YrCDw?ge9ia0h7{ELLlqfcvpuZkZ!QyWUcu=dN(FDv z`x$@1!|HryEOq(k@PK+#VIsmoXF^J=;Bp!z92(k&tPvLr3x2$WFQ19A%wiM4A2lS< zY{kc5jyytf6L_oV3VMhQ@Q4@0;T&{j0q(nx6G*w?z&Tbi#$B@Cu*OuIiXpyZ$`H7*h5j} z+}FlVl9B3pDC0w*!XGHpS-`|Cf}AwwRe6o|H3<2y9O8v~y^M#_HWgIJrO)dP9H}K;FUeKq>`J!lHI86j?jZ3UUGW(s-4YD%Cm|R z6s2@wZhvz}3MmjKMGao}kiFbXDow$wYqu=I4+4F691NpM zc3AG^XG51q?AF~m5_qF3-&0l)Zn2e`6Wn0XzYzp=g0084=TXY0?cE;XpJ*g!PF$mXL} z46o5*25ULQblNm<=|Q6KBng17`z6y{ubrZiZ7jEvkDq@Y12cnoH=zh5D8xpSM?{QP zeb7i~WI89-kz3#}9f4`g+3*7(M=gWz7QV~KWwSg<8AX=D!`_-YZfkAL4!opMWgPBl zze=pcQTBZOt@)VkgndiLf%BFreFBgZu5JE}7sIu>`0`4icOm>#EQ0i#AW~g0e0g{D zRw?p;A^mvYF7je)Tjfc~&jiVN_!Ss4YpC1m`S{bwpua5~GO=So>+TGTRbg1qP_6{& z{-%1fi|bx=rJ-DLLWG7&8=<$T|HHMy#p~|?K=cEBAZCnFC0KP>KK>7wQoMoM*Adyr>?GjaWkT9`Mi)ePR2kM>oU>1H{t$vV%1XgJTG{Gwz_yiB>N4x#Wte# zRP~yd^SHx}fT4g52zgV$IfDBYs+9iXv6aRDdd^BBu2U75g9hCjv-u_OXSuL}oGIYQ zYK_nzwY}~f^9+`y4db?MF2D!n86p>;aI-C{Xtq%a>sVBb%R~yTXVrVRf`O4Hi7y@F zj5Apcjb2N~UgYzI*E~dY#xo)5303Lx&<2S0?~t%_T1S z??UN{!j7Q%C-N~F^p-`bSuv247$9yp_(N>`TUHufm?sM_Jq6bFz)#+Q`%9UeEY~l5 zFtdU4&u7-EzX0|)RkDUkA`LUfKxzd&QeQw=r?$#DimAri>88C0Wj!l=UhZ~l zi|09J*OiKu10~}8Oe>fIgYDFGsu2-rclbk-)Mut+q=Hlgd1W+QRR&k22S6{??R@57 z(l?O~9w=!w_W-IfuGU=Zgsv_To-8FX*H8Yg&qJ&u;@WP!uJD`6imyxq1z#3?z2DTU zGK|%HqAeS>MRJ#!g;*^Y!ikiQq9>rRC=WoY!F7k?JC~OoUnkF1?)BPS@?Naq2Ik<~ zqKZ8EQXLJAc8xD&w6dNdX*%0LU)48GW53xVuElme-Ql9$9QdfNNP%K{t!X%quG@{r z;*I8`_{#&J*@D4Xj)tTz;dj$q^ugl?K@I6c`g4~?T61I!9iU2PpXM(`DLQK7jA-N5 zv43H-mINnia8s5IswP?k;T(Rnsr=#Y>r8(hxwY@`L=y*T9}|e5wL`3)nW>J_wn?VI zkmj1dpo~EF?2Bc;9(a6J|Nb!)lb;sf6}Nk$G=g5EOJ!Hu%`Rsk7;1 zPVr_RUkJH^B2T~7?bF}k$c2LCzB$UevatUC_C{Gxg}QdfDj7Yu12+=|(uK)jHH$xl zYfY2tf~|ObIt#3kCU|`2>FO9XM821mxN(o5a~SPy=dNc}dQjEc^Vqh9 zMU0l^_;RNQ@RNs7dVQ=`QjT9ng%^vhYWR(!vKsUOEhj2o`h%FTmbMuOa<1Xh;RUql_J_qW4{H6pq_ZU%zT`X-U9Vmdk_uWyQ0-lxntH^~9Yf?*ldeH-eH!IBeYk z<724*Z#4dYYNz~o+<4MHXV8X1b)QpnYO;&zopb-hKfEsbEqFFrRP|-|M*1P>`iox^RSn^ zjFgo>ff#EzJhnhlq$Io`c{T5r*du5jxUXZEKtr+9>HRt#(Q5C>w}d2%c`|m z2yq#?0v*UTber`-ZrCe2NFM9BSvcy^L~JNgGCT8&X}_Btcm0vUIz)brYP49(kV98P ziXhQcv2{ozKk2s#prU#f+XpONUlCQ9NgWNNkPqsE{l3=4(*e)v+S;biVir!NdlQxv zaRe**9R+1shQMucF9;ro9>Bp1rcNH#ba&*;R;X%NbJ%VONYFhL+?1w;Wgd$nnRBLJ z*UEn;*K|@^u%OvXM_Kx*x31Hdf3{@l1Y!g(^a1rls*_oX4|@-1O+5KJ!VE@ZR~`{s zV#;g?JP4;-_8>-C?k9NJ)dnYYYLx?LC3K=wp{zzA0{G}X&7ZMPfXeoIIlXeVDA%Z- zD1NV4hrBByJy$mSoeLS-xSIfhJMhaI)zm3I{VJ`IL+gW9YtLE(-A2;^x-B1yrwa<* zcaXyF>Go-r8KACQTv_}u0i2rLgJ+RS4!(fJAP0$c>_}t~H z`^0DZOE`ywZXJt=PDXiks&^=Fka8~Kz&C$oKWb(STQzbgt}TT(-1gWU1m#P}45~H{ zj$s_asDA!M6!r&&dnb<>ei6m*HP;XpTJoOo!t(kfr-&nNC&vB31A^u0yM+=;ic5;85kn0KlLk9u20-ERZvRWxgEWa=|%Dt@fTQnsQHg{ z9uv&Wy}(2XsE_901g?jr-Nn2jktdqNPt?OlH%{q=AVF84!^!3sk&&NY;`SAJ2kVy` zI=*&sepsERu~1ee9%8G>)L%pqL02-Es!nZhMInquZUs|gY&7)y-cJRrZf;0By6`mQ zY9)$1-w&u;TY3r~t&LQ@axvLN$F0$~;8*eVZgKMksMyx?_VjH@zWZCluI3v?7X|@x z=@ONjx@W0Bjr?PS?*AxC2YsO^K?b|=*FHgrRba#nN3@h=Z`GVyrK3mDzTT8(Ya0~3 z_Ku-r_I`Qu6kz9!CDpv#k>X+wWpV2o#}l`Wm7=HKJq+lboxA#D&GtdMtk5u_j>j)C z&r8H5u$zY^X*)-(jjeww4*IyEQ^uow4)A}`Y9x;o?_4H^ z+17}NCOO9^^go5-``flfl=PgA$m_d+j)S*!IEGW*YV_CMGKOgo$g_%XuNK<5v?&bW2t5EpZBvfNGq{YtOPrm7XaA)JFF_bG81lC|xa4ksJIOZlnk%60qMM3eAu~#L-VO&;x zh2mCRW9vJ0+PzMqD!%c{*#g2u9t(L1V|DwQ1o;R0LbdDMe0bCg!;)L|XXXPqRx64` z*Xik%TbJBbVrXc5QmMX(Rs?~@n0S1xoe=)KpRbFM#S*{KMDRY&f@5Yi`<1jknzYjL z>nCOIz8#{&nlI--)Edv}^+l@Xm&Sq=Ae736HJ`P7&C$00MeoeBsqNwze9gr8UwH8g zm}B#Q*JGgHCj+P*!=^0;z*IvwE@E=D-yhZ0Py6vB(BjpW(nLmUL$Vd}bzoyTK%F6j z85XxdMUfb6E>&_c!*lPw;uA3CH*k-u-Q?K0K}wBSl^Z>MEG_GMba^Pl&d;#_$EoA_ zH_c0N%UjRY4#gatQ1xRv57g{*`%V;7d-q?cGz7EAQ35p#Zl{W0sQaT5Sv5PTL*dt{ zCVc4(==6ud2CDa!{mj#oOU(E-OR})6Y!UI^I|ylp8f<_isDy7RfIB>0E`KNUL3nR+ zxx6=8H46R_C!u5xpyXU>462yh!XMTBB2tm985fbd$C4^!Rzjwqxd(PmgAI8e@Z}es zvt?G3dGov@UDS*oza;RItaFE-d=7SI)nBVwv2V_}KV*^p#hP@hsC3e-+fY7S>RyZ6 z;#dd5JHnl07w$s~ds_Gm&AhRE(}lI4Y9cSMc-0WBk}>2fSt7vq878L#eB+p~E{#(f z2u8iC*_97Z)3~%UKF$=!?h4oJ#SF>&H<^6=MYPnl2O&#dho29zYUYlCMI;n~4oFah z=5e~E1s%$)U1~_bbn@OhEo5H9`b!W)u z)&N;2?)^T5@w6>s^-x$mudrm;C+nM+W>sWn+PhjJf@BOinau@z_&L`vqQ(ci0=yG? zH&S*o>KYEFQITnl#i+uTH3`#;b;y(Vog1swpz~}^m<8uPGA7G1>9Dl_XYRw(O-NLF zO5TyzM0T=>#~Pz2gbSD&>K{rh4J#@k7>OOun)I6i4<5RidWXD8t(p2>iQBntx>;VT z?FU$?`MPnyMj?}vEi=|(NS5@dNc=;H$T{H1%H1Je0P&MGoughonj4@yRgnN!Zy^WW%@<@pKY&+Qoe2zQc+>ZIW60jP zar`Vf)0uz(&!)|0DnPi9g3lc7!^?U+Y9$It*zy1iL6wpAK*S~1b6VvYf234IB?E@- zBk^;xlc^3i!y@MZm6_W&oPeD0x92rE+zK=rrRhn1>qe8l^-Zq+^`TBA<)PLu{8bv@%n6|P#%s!D@DIbDNi z&Vm{YV}GmJzwfG?#HY`+rUs{(O zx2Nld!Z>-9SUEdUojX`D9_#=nRjZYh!n`Ty>Ki5N4p=TrzO3`@S$qv>%UHL1#>X5f zzU6a8JLid@V*=idL$yv)sL5PP=M!Y~?X+9Qw0!f*Xrw`;D8LIg!!++U7>;;2*(6k~G+?TxAkxMHmxxH9O8rrZK-u~r5CMc? zai;_W&RK{U?cAg-)(l{j*4x#DEv`QFI9TXjzW$ivj-UA&X;y2fS$8ULVF~24=zSGZ@7DS}t*(XdkTJRW3$5V>?w|zmS@n zF^7KrptN*zQvw`qH& z-O#w?rvMxlA5f%|ik8jptNh}*4;#m#S5jyz_k$i%h0OFl+NdrX6K5=BgUo_}B#X&Oq5c9P*}e0<7!_ux(Li4HJ+N}@>LJDeiACC^!$pqr6u zSCCL1qSIhi(mUQqYxJ#IDT@^fC$qDyebs+zvKYA1cl)%1*a>m)m~-gL=JI2VxhJCP zeiiW6B?k_#$rBS1vV}*1(%ahPX6$N}ncHZ-N9lfh2GZQtH`}J%PE^Oi)%g2S8l$86 zk+MMc_CFiX73e)@9`W~aGQ3rkxcz+ZHR2g5pG(^r<@hq-|QIzH$i2|5-UitmJnzPitTJHu%;Wjw^!-L5aFZV zBJiKU{{Z3tubAL|dlV<}@RH+iZs$+-KDj=5_IoEb5qa3SYhbdlrPXf8(DYck1VU9KRJPbGekZsRT^F_8!%6Glz zDLKdAf8r<(O}vaZ=DacX#m#Yc$dg(Yi&a<^qx!&L9;uN(*uNIO7Mj#mQT%F#(?Dv7 zr?N{6tgnPhsDJm|g?6%yL#Tz>ukhp4VQkAR0^-BeMC?8vK5q6w{sH#b>jToOrifYaEzLUfU(C6iLc6E8DZ7>b@$*Ex?B)w5TY5il)Y>%06&!l8{Zc1V+I%A4g zMsvSUq)rXt{53YRn~Et#_Ivs`rP&w1=~Djfx{uDr2vdv`)Gu$b{G>WhK zOK#kbN6}GSt$AUhM?MJ9yq5oLnn{koEvC@&Y=g6ja1>jZdf%yvgKGhL+xZ>SWU`1` z+5V3^)m?J+v-D;~jEd$^6OF=+@o5^UY+)XR(OZV?LgQe%vIl_u0tmUc+h2^XL^RUvP<5eq%sScE(y6Nln&y}6_K{@o^RW6SrJNkShSbt*TkwX}MRJY&`LNPo{t{7<(_Hqv7hIMkp{ zbRM0Fg$SHfqASEYk6BwcHc@>F^)!JX1Y?N~J8!BKYl9-1Q&@V8TQ$=;R%S?zzH3s> zv&DBGR*iKZs;hk}ilJXjsF#(f>|#mkVG*rs^Gnp;Q)&t}N_0d9bm$(Rhs;&!CYIX0 zwBQzpH`B@+81}p6+Bzom<%H84513N&eevt{3bmCs0q0evVQQ}qlqad=$_KmH#P;n* zK`O+9i(c7>0-x_rw5!dHyK5sUdf7iln3z5rNP^~5wYyzPnrwBA|0G-dyC#BiD`I@N~e+huyw5>R&*%Ut+9U18@sg*lr7Q{gWl z{t|DT+l-#HPn!w8X9AtS!Es}}@rC#(zDb7O=B$hFGQP@9X2O$TJB|!P9FVJHKqrxc zp!9h-<7#aYnoK7F`+gr7!a9v|JTAz#RGD44q<^BK<+c``>KJ|6LCH4>21{SYy#qHb zfY14*pm>rL|Df$N!Iowmy3fG}CSuO$grKAcpPkToIhMxOJWZ(Gk?)e%=og{TLnd3}e$EC!|5}}l$dY_w&4Q&Kuw9;_H1~@@q9WqEL6JgcH0>4_ZyG1# zq*;2aqJk&=O-f$$Jt<0Jk<&43y240tP@$G7K5UmzvQdeaGQTEXhkZsPGW8qupsG z#km-#P;*6GjhX@HwI||pG z@AXC}1YigR2nPn30azF}TyvkayVh#@3dU7NcW&4*0dyj-;AwRxaB-K^f7m(d#n0}K zMY|N{M`}F9rx8rOQ!)$F+Ysx-KO3ucw=hp%lF z-|@{RWLr&1PB?;4xt4gIH^eKJ*A0KxlHZ#<;@<9~nK0Jb%_vMC=k~3G_@2X1rMn8E z$goR~#dmj##v-Ti0|_jKd(&Oz8J}Y$nHp!4_1bGqlhxNRoq&&rG<&};cqtYoB;00N zyL|6~>DUGALET>iy?Rp_X=2`3#vo?DR&ILNHm;Rmwt$3v7XAC&cDLH?5w&zg`(*c0Kwi z_Iicm;#t_%h1IO@yCpC4$4{ery>3o+Ox>NB*o;Y3(nX~wr4Yx5P_1QU$^5hQs>ITS z?<@TcHXb$ghZD2Gl1}&0_%#70T8L^!m|IS~wv4o~zg018hJVq~p@cwtIGMW)=m>oW zTedKvwNK>W!oro;{XM@b_BHCdE^KPgcOFrCNP{iKZhD>gp0p&%kLAgm(Q{-K4f=WS zG(1m|Wwsc@m0#((gL!&IdC1BIeleLClK4danZqqmuNa1vnJAHB=Q+1 zHf%8JsQmZA`o7?+MB)fUgq)R}v9(`Bz4hC}QvNEwG7&4sdd4`!79{P<_f~*|*Ns|C z(p8Ib+zohP2&c3s$l8C!N<%1dw|O&;VnrV7d{E0MXS46o6SB(N?%9aT=fYu zaY&!(t*tOp;ao*O+^U3)!`k2kdX*D-mR9l7Y-#h;;K+l_4F~4Hh|Gdy&9R)rmv+by zCnh|zms#xuIvTRRZfIRBUb7~Gk0cmx3C~i)h#2ydd2FynsOXg(TZVNS0dDs&TB zi0H>1oSHz+5`ILS5|I1zPn!0f2YCZndGW@br{Suogu)`%xJh6L7u9L!$oYJS6@*@o z(J%`&RVDUbeP~Mnr>_JA2EJ~Bx~br=FoC!y>7)7iS-xo8#pJzLMA^<@%#1<+O%F{)M6G_cLV$5o%GW&fJR-PZ!N*~$hZ?Y?<^EolT zJlv0M$pEk_7~ve*oGQ&*3DM%ibu$kA0M8!90_2?iX-1f&lO_eM>-aA~HP21I201 zWzsMBO=^7%yCO}WISDA{dDRd(|@%9(Q?FzV*mFnkaYHRXt9+u>^-nar zdMXx^f2eF=GOD|ElG?Um2a({L61msS2DX2MzFQRhf^VhIf?hu=_cZH95$NDVxQOmy zyyR(kaltc}O_ov80?K|Q!hbHI#_CZ$o_3;9K8s>gqV0_3OC+Kgo#_^1yJq&Cv08LA zcR-3$1oFXCo+ll6MI7pC=g#U{g2dK zlxY?e6d?3COJAodADdq?rVY}4hh5o(F%~9@!Vh_Qy`9`k(N69xeRnzcsWRzHk+Otq z<(56-VBaeYz#;+TFLV`rgSTrOqxYYnYwBoTIXPsh`;}gcW*G^8tCpG3?Q#ErcRx!D zVK&?I9B@C#$C{!~>Ry&9xXWyAyUMv4VPQW!HXzEJ;rhfg2?I%=zO90^EXK+#$%rTl$Ur_OM2d?A1BCkR^^Wj=P|YO@>sV`=Qo}Yv^yU z3n^p~7TTj)r8cYs*K_63fsTH1%0PleZ#+t0oyjvu1~an`A=}a_uYlEGSJz*Cch^r@ zs%2u^xn{#)Wjk9>C~kzz4dpaCO0U^J5#N-HaS-E#O?eN{LlfaS7&+9}5I_?;gjEkP zvsJ!wOUHrNKP<&@^7X&tmz4~=}XK>4z$#5`SgTvYHAgE0T*<0r* zTSF3&43-tiyC3)9)R+`V(%4km_>h|2NkQ%5l8&DDjC&UFkf?Y+)X%mbbZ9$=*qH(q zx9?~=k&vJ1JPFTijk0MJJq#J-zSTRcW+WFHU@Xb%Y*MF^VwB3QoX-f@d5^Z`^k<>rh?Iv-DUi@~c7 zFFXEx-iE=+nwRE_7>Y=Ylel5xfZ6*2uj3fU@6UMLUwhS-KQeUO&JGwTg-1OAGvi7& zH_gf;2=hxwV)R?O^oY-XTh4LL5I?=D3fDqVM9c<__RIw{e z8J5U1KcP;^^NWc6tiQ5qiuBHYU~{nXQ{DVB49ESi+5HEG3i4&nK_VE^6 z7MQt1{wo3kk%p+Ur=H)Ora+~#*-5MJ7GiP@j-6CR$;GmjElw1gMQgo)Ci6hY5bs#K zaK2Y<7X1MP*}Hw1k{hG8?70SgdT@9w=PG|Bq1B!1{6y8W(iHs@CyEY$2~nBkr=8`I zbswXy9nhagAU0z&#XyG68%y-=Oyi{m{lGr#2C%^W5*xa7Gt<%U5jm)_)d8hHH8E9( zX%&v#N6{aYHu4In>h|qvARS%^GE$VlLee;QiZ1^Q&XB&oTiZ}JyS{@_u zLI-vsnSTgZzk^CUUf4!YY)bQHvdS#C)7Zd1`5hE*ZZdiF?K%6Nw8Iv7SJ4}m z6vN1)=^KnH;S@g_Ma3Z*Lozs%l5>{mrT1(GEijG(^E8N!RC(1C|8^OH*sq6XW!r@A zTrPgPY7{o#zSNQAF7i@0l{+ClIb3DFUw|kL`%bJp(xq`Rrq6J2d+r``peJ9$XhtN` zWYTCvz*iYo%`LheEBQe49uwti?Oe8A%I6zSyet-X5H2E%x^)PvgtwnWbsZMnW36&F z>kN(ru}#hn2sg8Z1;fa!(%j@a+I$I4Z&zu0t1Q{;1dXx*BW&;VCaZ~-!(*&p71ryl zDw-gWDX@2O+%LR3{c}=d7(8fgzNkK$<3uc)61N(q#*u|P+R*MFrJ@s(&%}k6Nj*4Q zH@)L^RR7*l?mLQNu0kJL)?2TAP}#ydak>APG_Do+qS!3AIYlZwun{ zR)|^9idCB55a6igBwL`P8kU3KXYTLIK7#)u+D7x7PaQ0k5yZE=`){uHZaXOqD+pwn zFM2;ulS}Kzh3~De_{t)oGDry|dbBRF;>`AiJc{iT`TOn_soLAmk zTu--1DW$UVi9oxc{I=#F0;+tJV4=?)cL^fxkBw)z+f2swuI+M2pcV5U;9uAYA$O0lyTDD$56zY7xOw-Rdr=V z^}X`TS4F>0o_MVI{qXH1=iq=JX2`!d|K667ck_MRFfLu&>s}9^4GAE6KYavvJEk41 zM*DRu#GuF#;6Va;L`Bd2L66xky6Nq_CRm5~o{03>?s^M2GsT-Y($H7@G9yVag8tZHA&%+#aL<8#Y@ z#$JuT;iL`7e710Dd0v*yU`6Ol-C`q3i{+8<>s024A4~XNh=L}tE*7-G%w8lZ+7WTZ z@n#!DSjCt^b+gS^)#|Ezlt0bWw zPG1xG{h?=Gn#sBu)C^yN7`nszGml;;T_P=wN4j)ayyqK3^L4VDhPQS8Y(QTaPSgRl z{ySfj`@mCDpzKI9Py_f1Qu$Fs{8D-0z_mDp1T=&y=O;#9|19mx+Dsl^k1Y$KHY59k z*|~j=k5&6@S>o+{-q2kYnpFo*1pYOTR{7>Bt6Eaqg+2Hh5>WHOW;g$Dv;9fNNRsO? zfh25;U_pcQG0P zkX^1>riXlm+>43KlKKw6G3OsaVthb5PEn~XulYH)c(!<{A2X3hQDo_`kpXi%n1$=~ z>8k*hVeNnvk~!y9h*B~ip30B6O_>|l+6pr?73CRM(Hziqc9D@*`&2BF!U;_3w;oFm zCYS@xN5QDH^8`hfZ5X91J|QYSt}#VvZ0hujCDVT3Sr>Hll)`}y4*B_+uH4Nq`j9~# zpWvL`p^~&;E(96|MnXe8vlSX%a1gA$*Ii3X`~-(KH!N(txhK47%qYX#!6G)MmPtm_ z8=_xPn3sR4p^A~R(VxBaMt@ao>I;$UQ=58KCAoP+(|~I}6;ADLjxG&-KAtIs%t{rl z&glyg@s@UuhxKgA&fV4*G3h+3=E2S}IX;G9nsQ&}u+~z+Ao>uI!PVzep1~jjlf`jX zQi+4frL2Cd8DKnA5sf(narad+Tg=j+Be+Nto1Ne3tUVGj0~8Z>;nVN}gf zhPs2&7?w9Ax0u{T$BmNw7tsL7!xNB4zc0_QG7R`=i-ILr8`lRqR@Rn|jfqT*g!2rg zw*!@NubyvGl<_!Safm?^v3l^te=;F3)>@Lj8P_2{<$5J{E#kiaAT^)m*L<-3{r2}H z2BJ_+RRevnih-`@;AT`h;uK(9XIv9Q;BKwy;x_2vsMEofQ?JP*n;{#it=3S@gcFgZ z{({oZFY69+_w^m;DoAB)45y?S-J)w%t6>n;$Vf!mg7z6n?JZ`PqVm$F;9dx+lfbU5 zY7kFtxMgb6M#(Dr&fZBhLx>Thc;Xf3eovv4nY#l#AT zh)b6~Wa1bnXh5m|H0FA8vu09^wESkXagljsVmMri+s8{jsN$xWUXn<%u$n0|$@5W~ z$2*5bQkaPZY!{znqGS_R?vjxVm!Tn&m(8KwkT5RX(On4ZBj>E`+`E7?9@RJVYX9Dv z0IkM?2dE>o#Pu?}1-^9|a7s_(rL=gSmWOO|?Y|vZX|W_~K&jb)`|^ulNMx?NK>*lx z>29SvZDMSi#g5n8*4fjOMvVbe?3_m5Z4%0-{J>}_D6}A9xi?)PLk(ImAS6CjS6LLA zsq7uP+3&7mjQR^NB24D=i0(DPdyI}gsE%bkAe+isHIs~TH8t8Q0o z-$f9pizeeY{cC1!n-6`#!mAYx#nb3GRTUaNFbip(>AS{CIUE!r7l?G=fnryPcuf6$2IwMU~>@r90J7t<1J-Dzr zre&wm8l-bJ9K#YPe*+I?AT8QFsDN0ps+gbpH`lw z^BvJr8>GjelX*#(oird!03 ztQi>`wf0$5(#x9!D!^Y0XfuBdYFu}|MPK*+z}2|2Zzyd$Ed2>uGX7$~h>?w;+vL*c z-4@pOBVP}}lbFqYMJcx$Q8T?uIqN{_$>3XFXY>@Mji9a_E%}_}`BjQsg|R+NNqAtq z;HxC_)QYQMZp)_N!4rlJ@&#&&ku#GjzWyMb{>Tk?=eVxiN>=}SKatMGNXetFT_2)N zY-Ykh7i8-7g~Y;hYZkti)Wk0q z{o|u$7E=ayj3M%E3Ocf)lGDG4%CDb=)JR3L6V5C@LEjZ0|aoSue`&-4`ih8u^WTL{zs zJg`NKd(purK)nT6V}%g!T_}n*o}Qk@PmI}%ICHc;t?PhZ8|?9QqiW8%!|7ms31dya zVW{mc@YZ^33L^K6r%5^{t%_3iXDZ^6!i}VzNJ?W?XAk*AX&>f34y#s;p%`vu)+%v4 zgM2^xFCzU~lRiU18?u7MJ7s8Lf_c`?Nqv{@EIUkf27p37arVEz|Xi-N+o9fbx>iVD%K$`fmPwfXD+SDB> zQIhx3`u6De3Y;6Si@jrgX#H%JxVRV&;(A5-N2G_WhF{Yq`7Gje2Z$SHp58pJTPzim zo;Lul6fSRpNvI&o*X#jb@b7@8jt-7q;E0k2ySRo&(_^_(Lk{L&?C-c6s(LA8+-Xv$ znqoKqGNVS=2+-WGpQ_7E>#^s+8qeIGVV+omHq*l5TD9pp?l>q%S(IZ95^XVvfeInOwmc~?&6!6 zBsr1p-CzM^-P>V98K}-`I3a-w*$@p@71@bH;N(^MRDa%*(&Du|-zE+bC|{w4<#9SA zvsj}VofFH1)DYDZWX!EWjTR3F=87)W*?zwDw7WIv2Zlo*NWx{zpP(worpwsHL^mvn zRi@Hm*m?eJ2_@ORp4-%EP05o^k--kEC}gxzokx+s?K~Cy2$i`{5oH*5)}y#T)jkMu zm2(}ruU8zCHDGaf!bnWV&Nkk{_ZLy%-`aA|(vM1@-;C;$=NCwyUnx9p@_t!TuW6$z%QatrQZp{VMnEw zYY!wm55dPCKNVj?^84z-D9W~jb1z7Z@F3?pKtzaBXv1XSSU}&RON2o{Sc}C$sP+Pi zfl1V}xdQ5ZSdxw_^fFItCH9LPcdq-V?@z$^j9*5!=ETqzSfWCEp-wc|r`-8D4!Olx zVDa~q!l{t{gTEsDGj2swN8ynBKa))it$Uln#N7#{{H;K_R7l_7lNCvKyB-uab-U4T z{Vk%76VC1Lu?Xc5GB5gj`4WjA+-lALSu~-V6GIzI?@r{sNgn z{g#w`1&_s2{q0@Fc1IkkOLK*>pvQNc?)!^FY$keoORQOzk`TSc?K)H!@qif1;HFD z@Tak{mDMvF52yd#dlN&Oc*gdPe{OD83etH@2a*OOr-Ud6t0k)W zXXg%o5%JX~_z#q3^SP7{2{lLr*i;w>ZR6|>?lZ#6+fvw*@wZ1u&9$>`T9+plM* z2Z))sv>hYH`WRbT%0G7IpF1S4RzyUO(Y^23+R`m}JcQWab< ztm~lKVF_zrA?skr5#9!=km0#Wue?bzo&DIfX4%yNSnU`Tfjk`8X$~In(9Ww$9~91Q z!~@20MPiJenlT29L&`)9ZzjjF#mH|Cl@>0?u}>2q#~Jj!N7Z4krW-$63;ll59lvGi zpDy{lYO(wKV-AXVP!`MMTYT{Pt~;ffc_)t6Ts5yQq8w;P@T#eQ3@raj4Z5hjreIHb z!_Yet#m;L-dBZIHqeQt<@S9{Ywv<<11RwpK8q!zp0>1<3UqIiqcBi-Qof2PseZBjY z`G@4-?zh}+ep7)^-bdr7KbT&_jS|j_ZsmA^jaXNOw(Ne=J+V$++v_-fE?E}*!{@Oc zOJakC`IeCyR&t@H+nePYg?SpN?uGsa|C!5RL+)!=L$J|IHWfxP)Nx^0pemcncG{^4`N1IK3xlmeJH+e;U+f#<7vFi^)$&Iud(|NFC&Cw zPEA2SD;@woeODU*))0W0_v1{^dE=ud=(?2h3MY5ojngj3*n83@NyY^hw{!LU!md~4 zDi2ad)B4xVVm_L9PDTsiM_*L@{=Qd37h6!SVlQx0O{Y};n91|_!}|5JHqqu5uTP(q zX1ZUWQta@LD0+EOJ*W!rn0`hP&aTpSFgB6vGsiT#!Zc=FYxgMiWy|8y7zRJN zUU>=f>P?Qw+OLx3U*Vh;2Yei7_v`dFmj|Y!Y?r&H1AAHm?7d6{g>BC-9UAamGQ}$#i-#GYI_>~~N@{6eXvrKdM1+`FsN_0`| zt>{uhZf@u9Sa2ZU@|i?fDd(oa$|g`K$}PaAsK&dLlc&bWK>e5ujlZHYI=uG{*Z6P( z3vazkruhVEU9`E7=bJ?7MQ-JpXyVSJxOQE&F00=#js57-kFsjT-ZOGyq;L3*ep(A5 zUw8qhb2OQ_ADdwLwySWZoX<_CdH z@-PxU@x3+K#68<_=l=cXBE>|_$TpY3Jqxq>zIm2^e7 z`%DRr_Zw++W2Nhh^L=YcY6qI3--y|Ql%4g`p0=C;goJQtGk78xp<2- zowC8I+FCku+}t?m458~gA~x2OmM;3pwz@%Mpbl&i@tSk=DAEiqG*JM67b0878&?zdGoX&H6 zOa_wC|DCO^8Er>iN66CtS})!O`uLsT+95DKpyF??D5!pE%R>C?e|4Y!%P$j4*b8jh z8a}Fme$JC{ePcuLzuYrR&o`Prl@h=IkbMrwUBkC};9C`Xz4ZS4ef(~I%XxmYKJW;g z3SukM6!Y$8)!#8~D~%dzU@-rkX1(om2_c2`P=gxE;&JnYNzJ<+O*U9FZs^}`oT_`7 zm2a$%lC6jGia>u3J+@_ol{MmO&NYq#i-LbP$Pb=U>v{M$ckl%N&k9M-c^;}IZC}D| zvbsDp_}GhN>hN{b`wm-3U&EJG5X%3TV|MWTMO5?;PX6_|?w_qu>JKm3vR<+jDGWA8CKen^9UFEg)Z~kNB*qG<$RW6H^#fu!!bfH+5pIlsJvRq0QZ^ z=;U33&A_d>mcfVZJIxw49|LQS-;D>fu|Eu(|CTxbIgI>9N=Suh6Y_#LK;QfECsMX6 zdeHk@dVB|?>4&=XuBWkwajJ-op@!v*oUe}zWpWUY%wnnYN@gyEyQ3GEKlvJBt>&&sIO9F6>q?q)qh@gS1maB`_fBawd?)momhTB8R_w< zIRi3GU^_ER0AQeGY=IP)*L1BCYs1XOKtvNVm$c+z`)l~%6~!sGuJ7Dtzlct4PQYL_ zEn_`S#i<}A;-~!32OJf}2!QI94^hk6%)DiMm9em5_`3YTsk54a1t72s5U}f?D<|+( z0*v&f&(8oMj6`BvG?G29NYY=qG!s# zh@6_Dpfppm{4yD-y^Ej!-H+rI)Z{$W6puFqCNkBM zey%DV*m$PP&`D;_^l!oqyk_*r-xLqfOt9`!Fv>h&WBzPrAbDQX?GnhnC(rnnow+Xj zb&Z7>VyNrkV{rBn;SzIV zLMLC#`bBh(d%pFlM(sF1Zp*}v?N%QX+lO+(=@Z{7oXN-kg0fE}$;XG-*?Xjm925oG ztCT%S#`7aCHllpl);=j)&<>92SUvnA;^61361^{#C>Z818 zSqJ`nMpNH!h&Cnm8=!eBXQxJjt!FH$5?7U-R!#Z8n0w2pIG&(y6ha6VAOsB>0>LG? zLju7)xCeK43xQxkg9mqKan}S5?u)y-yX4M7?EgIPhx_5Z=iGBky zRUKU#;eal#O8`c5=LW^5%)IoSq5AYf*lYSm*vamDU8b3 z=Q+)zH41Ig4J0cmk%!lBIO|o>(`3p@qZkATR}^2qt`f60ssI}y^f>j3&6rcOpwkO6 zh<@XJ(i@WiAz4+AUDfcv z<5)p1K3kUOTHx~CB7?xl{*sNADg%3mfThW(y4g-c6CB}$Pfi66!( zR4dRmAS?`eJHFH}FR@k90Z#cuA=iPEFcTt@Xw)zwmy`2cZ>W7>LyNcsR3#%fEZ_Du z&ky}gIU1N<>&(s2(P{2`GamTkyrJW#0ViAYpQ(TgmPl z7>tXap6Sis!Fd1!l2rcjF*inFxqM^3NlB$HN%ruSX*ILW37CJS>xoOap~NKzLU zXD|l^YJYnq*~l)X9aT%~pYDPzLWhFTkEqpJV?1Ry1aEg}$)~}zo2tAl#XiRs73VAz zruu=QlNBb>DX^?A3_WC69I#b8MlnhfKgPy^^XK3?ooIk4E;OCD&8%2H88 zuEA`88pX_a8V(=~@?WauEeDdFT$pL?s<_O2km>&kH8Nthk%4s{3Vno1+1Fb;J)2n^ zVKXjTV@5h%`P4!UfsNcbW?>WfruB}GZkFuUCR>3JG{L?5MMPhG|D-cX|J((j=q&7M zvVnVb)3v1So;M7$=Rna1guIaOdbK2ttyG`Y-sR|9t_&Lp@NsbAKTRr5FkYV}{7_dO z1>Msm%7dhH0!v{Ur_Z{vDZg((WMq*@K&?%-{1b}t1U2>p9YnZ~U#GiE?ee4a{M+vx3TvZ3lxI7@ckeg(JJT5X@Cr-CBR^x=`+6G*FIXJ5qZN*+Bm`A@?v zyLI3Gp6~P7{u%0nB_(3=TF2}|NE4G`r97J1ABPhVXc&!1r$GZAwYP)c`GsZ(iLV3}q!aF0=R(3T=@^cy>v0 z_S`L)<$W0oK5Ed(q{wYRl8Yfrl>Bae_hhItRh;T-%R}bfKKMoZt-kf4Oi&71XR^`L z7RtH|6Dz8;%Hpyv1L!J}=Jt>f?3=k~GqY^_R!FqJD>{HXk;VYK^6G0Y&h+VXe|HY&0hLEt7 z6ZE!~2G9<*FtXvSwaOg6P*(2DDUz7?2@-qwDJGn~CMOhPw@9{*DGpt^9ni86`+;hl zy!*np_AKJ>e7S6&$#we0wn|E(!wZH9@ITC_dtuG!%DJ+?*rrU-TI5!~nl`SRLrvk^ zJ@4B+X!_(?AfaOgG@f^ioyolOF&VI)rXRXbGz!InLc)WX3{E7zOo>ubztNk)r;~B4 zCO>|nz?U|-xA18X?pfk|oDBKrEqpb(<8_smf&oOv{vxY}#tOzYCCm-Q;Mh*^qg?qM;&=Da@`MUjC0 z*DxaVk?R!o!boEtRGmK6dImC24Kmn1u6t)@d-ZX)}vp3Sri>l>6CZTweUk5@>pPm_XQaiWSwT6 z0u3pLihUl6*7a8deJU`pA$Nye_{D;1ivvU%r{0FF0l#ouDN0gQj}Qw+Z=sqKrCb1FbAc zwPlz8WD(;(Qw89yJ?yu__Q#q4mz7mMrzsU#DGLK5`Yd-ZTPa~P^jf3zXj(m*F)oau z7!`dOtxTYL2Im4&kmC&CFkuqvn5meIf)y~*2`Jzn(z5A*aTNO^)hIm&1SQLXObm3wNApt=!F=gR;HgZtaTqyARKnTs)y~fG9q}%@ujLOd;N+SAz#yY;}Y%| z@y847T0Qmvzp?MMbdo-$6>+q6#;QTOC8^Gni-V&{)O};YtvQpM-In$YEPXjqU_7&d zkGOsaz*|K%HTF!dDo@%ECep)&y=U*?s5=n8dIx1;$c2A{S^&#e85GvFW&iM)J>>5k zsbU0g6VH?DAGR6hVfm3;kTp6EjElV$Si};byKs)m5-}7gR+#4~(tgLF2 z7qbtt`4(j5%JhkD!_#%`plBBFdlDQkM;%MI7jgj>8kfiHgx4*FAPz#In!-`f(p-qU7?vv~`BwHzqu$kzr!Jc-Hw*Q}r zTGdx{6SO3VI=5ge5;v`aQWC2OISm zQH_uVU%2@Qko_G{YIInx0>h4VdHFi|vd{*VAXCI&p@q+>8E>-gaNf!}86Ao3#C!86>%B;p#MOTMM%6Q&)4mYN zRdp>^(pYz;05*M(g5h$3U0?!X9Lb6q>b_v()ewqI`~4Q{y~Xk^P&WDn@^t(Ui^9ym zbzgQyp7}pDytCB1Qx>d5L20qwM9o4t2by6kV9`30t(LvSfskR3@X`LnE%O6Q9UE@+ z%ZPKreYc|v8mWRv=ZO=wkqkid5r%PBXK=vWl3esa17f?KZckPZI9y*ydRUVht1El;qEpGlPZ6KZ$fCiHHg z{hHirbCNIg6oeb*_h_k5c++ zpp2^52v$mLiY^jx<=~mi^;$KumNPmf@Z0dS`t-`~p5-V4Qg_^KC17SV1M|S)V+Cvu z?Hj=q6YBe%6%Xwm3z|SOk6^g)7Qa>oo|CU5$tZ9>N=Zy1XNg8wU1l2uD}E5V*tk%2 zf#V;+h&ML?;goYkAjcLKlNo zJf*`VFG3Pz7Zv+?_vwUrCyuhnNfx7F!qRaiD|1c92wHfO_&gr!8@M6J8uHGPY zKLw3P{Q(vc4{PYV@=z{v%1)n<-rC*p8hdgZ)*nkc<#W6hACpz1bPP8F5&pt=X>%<1HPPL)GiJ*dj+GRx1@Z za?UeyHjpk7j&gH5I)}>xM;dM53hPygPY~JR@uT!^gg$!si04q`Jx4duw2CL>NDEBS zuL`ue=LDkBtQ~Bm^Kmvb6}0F~zM2q0<)#PuwzrQzxX(|c{id6M;;=cS5Y7fPtfLq6 zt=t$1M`FbXjz$Mk+UCm~v|(jtHZ*=JtMQ5T4JqvA9Bo#lxtnN6=``B92=yiY*Dxs++TSJmH({%0TJtMrhc1|#Srp5ln`idQV`)g zKwP!yyWY2?8h$~p9iG<=LCPcUq)E4e=Wkf$r2IaC2KG3W{?GI|za#ZQ4aoQ`6t0Z~PEvJ&Lc z!mQuuHW6z@C-+_!)RE{LmBT^XeX6DkO1JLG?)BGcq@mm5#^HA=6EfFEf>-8mM*m?n z5aQ``e}nZT!3;-s*~i;h#echBI$ii3_S9PD3J2PIiRB>5_H|XS-eOjM&Lo+?S>#@jWD<(Jd%L<$B}Q2*73czf65EN0BGOvy<4r@X4S&cV zmT18@`kOqq6UWPgl(_R=8areo8=*`H9TL32Yk_dACqORpb%g6L@twHQkQ*9eu5jLe zCi91V_Av!mt@l5q^*BzEoe|!nx~uCtTf!E+gBxbZpwQU(09IYl9iR) zpib-BWFRj((?(=RZhikIp)MG@Sv|Fb$~2w1iI%hB6IMbN&V9FhzAs#~2=wZvf}eb) z0CFqt{kCygDad&Fe*U+na=Yg*?|qz$<5FG7>tlH?1Fx0>l?96VIutmYn(y*-xE6e3 z|2%d^P`Q@{rJ1vVLLWcfESxVA%y+>3>hgT#wh``O*N>SI!W;A^A z*)x6YlQxx}Da`zZGj{hT{v>p_{kmy$5?x=wFCjh+>*Vx=3XxN-q-_PQV+|HmcCA^5 zrDGC@MvyYu<)})txsq4&NIr?xAT-wWnWK`!XvC_6e3Eh*4To2|MoT_&4o_Ks{1C0L zl2G0{Ge>#2y6Lyl!Hr`O?%1~bX5Ol$an|S+y~|BZ!-wwKCjC9h2^|8{Q_T|~4QsVw z-K=3P9=VXb8Nd|3wT0#^x^pX>dnR#4Q~`-H~f~c{+?FpS9x^(r{I7jPrJ7b53IJdI}5ztskq=axmQxo-ZN=-KQjHQ;NG^evhyYWsGYaPS0g z0RCRYH(4p4$xaqb^yN|dzFD6*U`(X35*4%*kU1)x8Cy5cR$MmMMfRE-U(Ox0`i-?` zpX_6-va60EA-dD}eF4@X5b~0%g86r=gwGG+XQd?;*@B)da{RT-h5(`I!n=B-DKnbb zwqspE^51??;jT?&9*nd22(9~UESsv}@R)X~MUlG8V3!HQ>4Z08@XmOgRN5Zr?P;n2L<;fx_~B89dOWiyXDFO?P%KEo#ry4s>& zn~}Nb7gIb`5`Bt^H65DPy~W{p)T!^rjiEDbJqyMllia4nc;W6EC|8u$qO|qK7Sb zCDtwv$sOtoeJ1&CSx;`w=f58U|5uj}Mo8rw7~#PeVnJE7?ZeuTQHOq2#Sh%dK-7VRCQ{#c{}X$g;+pWdDxxG~utFFAPKo>eMf4BEnP~pF4w% zF#Mdk8n^9}p8HJYm0M;lF%D?6M5@zgg7B?RgJLzshpqX1s+X(`5!_K^id`&U7*nUe zYTI_io1}MPSYR>?bvvyduWv}(F(DYZnwgPc<`#F+gzsAuDAq`x5j|XGR$YQAr}lyx&vEU_(x?g``%9BVA2V+CWbp)m;W+ zOiflcCb(1T_J5hB8=ohc-^78jiYvUqlkpg%kAruyJ}xHedcx-RRNSNam|vLVF|RVr zx_l5R%azj`cNnhbmA*|ClzfKu%5+P$d7YTwIEf@cl94UHgc05hK%U$I|5Tg(L^c30 zAi&zEfXbaD(-a<0u{favP1oo1aX+^o0b;xe_%}*WK9m5^(#B?w&ijG0~m=O8~^eBt62A2x<}!RMXGtpvEIX zsjOE;L^eV;8hOwcVDvWG?3HSeO-bpiEz`DC4X8((C7kr+fy_HEV_^M8YJBKROnO_; zUl>m4njx`w8L5#^J;ut95Smaq)ucsSFrpcC=!F-?!^49cH7X|O#3AvoQ_+20VOSqW zvj^b9XA>RBSWl_(KpKpkF7cs{ephyA5PIr5dp9At9`@-0j05IqxneXY zJHT8>IV&KWR3`aJxCT0s!rcKYa=Y8&$BcE8>bGL@+Qnw5>dYLg+fM1hF(5*noIaKB zlyS+Y0|+1C9{|AUiytunV1h*@0YSbz-^qp};9sZ3(P zBNJL4*!#J)ur!!*Qf8@xZVe3Dj=puZ$AW2JR?$4 z`KX*3>dqrKV|XBa_=F!EpJ^(8EWw?NyqJ@6#jvrXIu8Hc8<+q3oezd%;(1c zG&}3mTI0@q6b3{GyZSnpu3SNt??S2?a+0%mcaj|Qw*v{gj5ubS&Q=g?$%*Z~)*ELi78n-8(5PtvDVHH@;on{;F2P$pPg-_Ht zu1PLI3AboBihUmNxA}&A-X02N$<2Iu6a5QBqh@RSp_~722p@&fcr4o5eW2jT0w+F) zxLN$}f$Fm)1h+Kt3{x4EXCR&>**gH7nu0QT0in`TI+fKa7~~M8KwF7FQC5Md5_^Ly$`v@=U69qQEF^ z*xK_tZs@E>gy^|>wD$OZTIoB_Pw{$R4DjEQd|N&(znJL#mKN@WbCJ0+c{=%eUG~fm z0*zj_rOp4?`x8n+gx*51-^x^|RQAxtKf6rRd;v^7irxX^vTS97`gteJPwuNZuv=9l znnh|xZazU4dwHlIOkF+nQmUpyn8BC&x|e>&?9U1)+T>#=C(5JMj1j_aY`WFmxh)rd z{e0@|h$#MsI7ex{+N@%}t4qd}!1^C@1>;6IdvfQ+V4z91Y7NI!0YEZ|G!=-(d3LZg`M<^G@&lxMuZ&aN$7aHE+6^ ztOMaAE1bkM#f{(_L9UldHfF4^>e2@q3_ZK3th3;ylKLW{1|Ixc06{QrE%aljn=>fR z=qzrYqbgmNG^MG%{p76YmMmjg{)sd_^^CtW%5ED?bOqUGOfV*H2a6_`R^V-;gv!G= zb4ua(9@BZoso2ls&{)3ba898j-R}9!6UtpcBfzipSON;r%sjEd4dr2z00~xZX~MZc z4Nud49N+o*Y#}pmBMV=VPs_)=4`-M;OglzrRPFXpoJt(4@f+M=`O2+HI`=oC-4yNZ zDHd;1SGX?~9yg@&?*h9>?!x`)%}Dxj+d_9a%vcgNZG|Zd?eU7PFusnh0RC@jN8^kD?g#4?9B`!d4~yhS7tQNmad z1&_>%XFMH%#8(L~eFxqi6Z~iA94r#I`j?GOVb57vcCS$%mb&Y9TAH})ed?32bFjMC z`K)I!(!c~RW(w#7akePg{3?c?o} zqCn@I0CDs`M}%D+sw%El!#&$y%rkFzsMW5;|2f2v={Gi1=N2Ky41}0V{49sLj_AHj zNV&g07BQAG#tsZa`SYxSoT0M6vfO2qMbapOh|eW{EwbTo0=gyLp+9(G?m#?L(bNhY z8F9kZ3|_-saw`Zsy+Y1B4M=L&VhDH^pE<=c+zW?sew8mZ1S#rf<5H`EP5i_?6LkCX z_d#k`=Wz!XfJ|(F{r=cfm{p};D_w_;xeC@rEO}Ay84w#`S*nj+Ox?)a?fG_QG*7jN ztv2<|ZPrIH-K2F}Xd=$N+5DR+*TyjsumB_IZTO4e~LuTzQSzh=rtc zt?IFB&U$-}d&8Kp>)<~d*xTx7S*rKB(#F-432IJ2j$kJUy)e+HNCTILX8 z{^RkQF5P)vPSOzdGx}36$*RP$?*L>d@5s`=)b2kBCd=4}C+m0crdSqx^7`hhM^S3Z z${U;oKpH(|c5shp2f{Ume56B+6bJCWQ`f}A5@fEn?lF-SW|GH#SW#kr%-Yt?W*>-7 z>?$3_Q2B(nDpT}v`5uj9DwbGBV}SHcIaxOn0jb3Z1eYYZsyE_-^%p*Mhp!5ajX+zM zIHI3U@!cwHdtbMAkFtv3?)mEX7tAEwBU-}+K&o;$vUmRj!y=E3PKzxB$cUi6Ct4kr}d*p)W`LD2e6sAAl1u3rpH}U=y9X`DjCJUGd1#QO{jlP@R=( zu{5ATyocx#^mRPZ?qN5I2!OJY`bgY!r6VJlVq}Hm@h|~LoyXHMb0Z_e*HSLGD9rA& zzq;po6dA|!0{NT${H1j==`(AcJ(LHD-&U^G8J6=b^-}93_aJjt1-oI z@r0Xvevtpq4!Vy6NbMyh(T(;$5qbT1g*#stLkzqqoz{Gx&J)i5^Wyl!2lG)x% zS@1>q)b9aeB^&3>4i=RXmxazzf-;xJc@puq+$5jH%B$=$wz#7hdrgJKG{ts_T3YEt zXS|m)4pgB{j<6bEjnhjKPabEB7nIE8lN)WxIf|KSscr?jT4Ho2*_BjsjpUgU1vozC z!m@CV>zLz2Y=UW-${Rdhy>39U=x2J@sbMLjl7CTyyFhB=9)m0z)=)m+8DJ0(zKkrc z{xX59Dk3vn9zOmR2jLi&lsU*Z>uYsQ?AQ!VRr*kNw4>k12=8@_J?iU)f~!7it|iZ| z%o|b-_|9SKo%y6^EbOT7lCm;uUknmu4+YpH3FLx+2w*X$RwYandfh4%*ok-^Pl8#b z3$?h z#~EY#r10vp|5?k{d|>gu%dnG&e|~Q%XV!v;R!Z(#uC`e)8*I+65#$a;^6eZFb2i1R z@PdS3cBm#&6SF66Kb!5xVrgF{aFxEi&q!@QgO5^{sRnNP6x6qeOQeOHJk~t?ZMO~8 zBcM%s&cx};@W8xY`Kra|8rJ2UG;zFw**=O)A$RDEP_QW!S>%|!YP-t=x_KG>&oZOP z{Z0{U7%d6CwHvg@Ng2fNV>EmjnMvZIE6cH=2b6w7^}1owJ4@?kmR`R?EC?vTsj4W${3CSGpkm$MXhLTO{$-TzP3@ z&}%aVxd!#fg9Alv%ryNrKU~eJ@_cO&9FvSKio`l+E7``Gew?x@&zm@2r;b0ri!>qvVVk^}}^`+m&pO%|1(>QAF0XO0QM3AsJ1s9#$K$X4uS^c35!GKcRmG zM~E`^fjmGBiSB_V;RqEH_db@Ulg5G9lH5nU;ISV@THE*+1-@!?7!9-YrBi;TB{cZi{ayk{-Tu+Jty; zz?r0E5Xms3?I~J`Z%P7I6af*I>REpJGJ>QPBD)t!ORIU0a`6uMIioKGiQa&D1T{TR zlRKQV{Pup&H=A2DlP zGEZ7KkOe|s838uBmOygAAypXgi3RMdXmZk^FX4_x3&2~&1(;j_w95bRwt1HLtIPD> z$HWF+49Pi@&Y?@Z+gC<6NtBhSeax{eG>t~+7)c8i3K7VFfG=wj=Zz9yna5hlNE8T z8fJ@+Kh+^3#ku4IKKEGj2mXOIfP*K*J;FzcZ$evvMQV(sa0*yOp10xVYjuKq?7qu9 zv**{H%)6g3y#(srg5curkaSfpj!7Rd?*Vpq__(tDUl=<`8h~O${0GIx>wNPI;RXTI z$vCeaL%6J8KyY-S$u6P)VBWGW*77{KueD{mG&PE}a)oU7h6e8Oci@TfXaK`^EaVKF z=D3eH2)K63Rpp~bAFk?h(r&H)!t$BVCaxvPhiTE$*VCS;-itQub~Su`1)NX*$jIvq z^rw*J_q+E%Vfqq-sO=mwqgcOkq{@FudlYD)I1Q>%vx#2|d0x39O8NB=M=Uo7wZ?f2 zlCD7LbW9esp=RUBgoml2u!Z!Y%A|_h9+N1EoqsCwAc-2towJm%hWiGLd5O@iclGko zsL&xCm;%9sND45y1tX4cdV_dWWv!1auOtwLWF++TD>aOVYZUrlt}nM_SgP4nu9$Zj zCb4@>MO8t#fl5X)kJ`+;?r!CioHiwnI%HJp7p$YYVr!bG)SN;_0Xq2)PMg#gvPW%P z{g{w!;{dc!aU4>Q^rMcazggAD!Eqim6pjAsSJ?KgV}F~PLh#YfszLHI`?<-dS~^)C zYH;Z1)ddn+79629Z!n{Iny>1lF*n10LRsuJ38RA8O8%NTAbT5y(OQ*bXb=db8DCbJ ztM6a&UHsoUy>86xx0EseTv80oOg?)2wR5Ng`=|o&u(FrW3=yNxD?gx%avD$Fi^U?c(0Z2*fL=kYC%DkQqY7y`f4 zY5{u0HQ&DggKWyb^Bc|+HLQRz9v{A*Ssm%xEVc6C2DN)x>D0X$1py@RdG z-c8Dcr`Uh1qL&aRQe4y(o!;MAWyK3(fjnR(sA;2*i3H%|`X8)aM+2 z+Ssv$f5EV>%II_kddBTK*1I&D;rInfV=&V>fGc}os8ZT+e>}}z_q(n%*uC}%ftbZ+ z5Micw!8{V;NIIK2D<ePQWaixJf)8VKazHB7hZM%kgW_T2U zO72&W1CGI{qRMjBx_4L+-Zy=lh~dtqZE}XOo-1A!6#@~tt5(tGD2bEmxGYgZlIj)m zqfg;ACxSE3b9|H}x?(AfbLlE3Kq#(1snRqn&n*p9*6D-koi7-eaW8R4mrMXIiUJN)rPYD%eRZa7(jeWl) zlI}SE^#h_m(06%$(~eZ_piIgB@<{k(W+cND`9k9#Cv0F$1Dvqe!+RXU`$-a9F2reC zXC9-&K~J)?dMY63)k;JMKLk}BV(MlzOg1-Wmjbrp6P}oBvYDE|+vIEd-0h|XRXUjU zz*{W7mfX4pV|(#op5o!A(#bU;CJQ zd;t|5VpiPpr#sk&Ltp-rP}%fDaqI$-c*wYF8$wW-{6lg00&&uVSh9{BTm`MBwM=dL zFNaL)h&QEuZ~l{xu}?ZAJWlzsGXu34!VM4s5RKaW+K1s{R)k0oQYnal_hmsOSLBpr zz47{$a8FJdqd&4`_1}s>DyZ_pesy;9*m|0Bc6?>scFKt=MxXO*rZ?-5Zcyp>;UU?c zW-v5nN#Af6E;F9e`i{g}6e+#=ZkbMJ&}APepkKB^AB?ZEKDXIhb&2^hxhxZUsvo*k zXkt9E%lvw_A~-&p)k1lqM%Q2^I6k7rthuDjroFctkB!D`8gki#^yd;nr(8XaRO$J^%NEjmCRw;6~J?v%$?=(+)Z57>7|0+ z)!v8w=ksuiauGlE>z?Vpd0rYnlJ|gRm_37CHGn8M-7E+W*ez4`fod z`(K%m;sxg6r=-zVh;p!5{J#DBL8r;~xA7bKw}{`@h}z2s$~2drw#shM|9-dwj6BP> zANFq#nq2+o0fb11JBgZxlkVSI-e0Hg!)7IsWN-h^Cl9-UHYh6oSEk?B7@*)_T}uzTG6cYPO|V?N zJ;^BWX&X_%?lB}I7MeWSV*i~U)dc@m$c=B*Rk-qa*MEgUDYy%bb2o2;q|E(qXZ%0? zZSa$yP}sQ1jb5O@!n3DI2|7`;^SRn1ThVoZH{)=w97AwrxUFkh0l7*J;ONaG(qB%a zFi04P3QK@9GDUk5`QWceJiv^NUTRe(-gdWfAgE+qud>}MR?%Z)mJg#dtMmxe%!!u$ z+?jz*bx$nGK*c$@Ts0;3RMQojm8=spCh+5mhwJZ&t#$T^wI1L(ogN5yGZG#wmesQl zLlhkVmk&t+{2M#&{NL_O2LH3g-B&EJlJ}mx^9V}zKvH0#t6nV0KHwJ0E&{~I)Pi%J zcA_$2sbpOG2{q>V7r}N0%yjC(I+hePF@%-AYwD#@QI z2M|@HvvcV-^;$45%d)gND~EI0eP-?_6yg6^cGxvkE|I#JR5K)wzKMbvAA0X>n!y{L zr)vF-`K(A}*9X|iHgA&&y@u7lsTJc@LM&GxE}nTRb7j;{`1z5c>P4RW0!v$_cc4X5 z=$_CGo=`=Z!3p(OaQt5Rq+)M8`1dhv$&e02dI28(d(Z}%?_h1Hv0DB)r0d&N3{~}m z^rP)T@oayNoE=?DWwW75v)O--U@&u_RO!Dd@y@~cg#K+w|Mk{uh|D-@T(C)(&**Un?lmwMk z@T!5D9GYqW)aw31Y8e1Dn*NsImoPAvGovOGWiJ07mc&(sCx7aGe~|;ywKOgh`V+cU)-@`NbSfxT>%un1`G)pyU^fMU?9auW7<~a-lC=f5d8+Ek z7#6Q462f&;&=OT>9PQt`p%@AZMN?8UT z{$>nu54h~^P5sD)t`E)>&7s;ps9T>3F`k~qiCd~l$K4rP77 z=}=Od0Ou3uDdWwSh^hZM#T>VJIB2(ed~rlQm4hE%kh}1469TuFy=rE9RS#=asiz^B zj$}Siz|#s$yJQ3oP7*NGZOSXkD@0?(HGD#ZT%Z3a(X?AH-Cs_{trg$aGoN%7pNU19 zt=tsr`OrMRJ(B&woZuUDB2r0|Xi@2xm79nrs@2{svPkV4%QX$!qO{1|yb@zro|R=EHY(K~FZ@Ol#R_)Plk=8e)dh#- z1&vL=dGraatf;~h-@GUI7I>hG#;Z^}VrDldjykedSjCcoXCdnsmuU3hjLpd0@tB@V2YNOd06krQnQQ`|> z1!)SAD^kMk-26_Z$$C|;)Xe2aA{e8Z#nrcGwp2%1SfvBV4QTqr83rw76t<%oV9hDm z;+TUKy)aqc?qVl&_c^419u2q%6T+v4@+in`m6Xm9hReUwhu1MRWK*Fb7i+pTW@T=flHhKJSPa6varls}B ziB_h4j6Gu-nnJUrclg<0r7`8^16P@g$xDuEzw>~cY>O{HzL7(=Q{^m^y2f4~nW*Yn zjnKuRrDG+DBLCn3I<4%#TYIDkW1lwUEP5f9FDZ_%Yt8|*pU1_; z14?o|#;MFc{S_@(rGze6FNyt;Q#%Km*fUxcl{wjp*7ZF~6Dp8l1Xu4nZWYJewPnie zkb+?x0Y6gXp_I(%+|kGrTKh54&mj*+X6ui^SHko|$z_Rv(R2hn;wK2pguA|>NyF$7 zbjkZVA_Z11a_&mkfnMN2SLNbihet&|V0fQkoLiGu;Z+-WC;Y^I42e7a_K7Kgm;Jaj z0Q?i}g(K!nmkg*2%S5J2?W~4+@juA-srS%KBX(;zPMnkdgc^@`GYNe8^wLDmd;`9W z+Z+H8TbDBgV?nV4+rXW^^JqwTrgXd0O?z@7@)Jr|!9MwRe#)^yMz528c!-rtGOgzV zPUe`bmsfdk3mcMHDWpqua=oo*_ED=B;MY0bU>ylM?)JT<^un#Az5|CY~=KEtA zcX~F^om*!(XI)cNYEvcEl8j@O4%KMA+-_nL1EaXtAq~z@g@H)pb=OZQvVb@?znbEw zOT@Spn~18zcHb@yS{$>cTCe2?N_rinxpU&AsQ{#MA2$ml)&)26k@-fWi~RA>_KLP{ zgXs^w=on_f4h1AzpN82cRUq7R8kMjyTwXVJ)H^JH@-;NBqSTh*4$Ck`IIgW{(}MAl z!}8)Qn6^7a`k#tXGD$In;#74O*N>L{ge`@FKP+G3EI*YOIiOzo5ZKpEu+vB@{E{)j5Blj5PJhCu`P?y6ct%X7%PCzoJxuY|zJ2$oeKa62OBCL=iJkV@C#=!;JN;#(RZgfKqk%VA!9dW~UU^SZgGTOuAQ^A|=(tgqAd zn~w(aDa%0Nr8zO3!~;W%3N#~qW(vpO)Sa7mE0g)LKcZ~Fgj~8fvV=s0ud#1his?yU zF?fB;&h5PT{7hCm{8^`Y$ks#EQtzmz3?~&I{WJ#J*Nd5{RBa9j%wIj?h(FP|!T2D9 zOI8+RHA%Zv_X^BwnMca{u&X|uRDaMc#oI=0qkM~Uw1&HNiCRG4SLd~TTyUzi22k}p zMgJC^7CgAuTi~?R94qvWF1?Z)CA%l-n2wK^1cF>bvqLe>LA|VrgDS(uw$d$LajZNI@>PSttw?E|mO;4L4&Rv?-b4s7+{>h2i$f zYG0rS0QQb6g#bc!bs?L+krn&`TK?HN4KRX*sGmJzHl<^wbA&602Ns?(v1+rhs*Rri zKw}W;gt|hh@}D3xoyNB~#QHy>01*~Z9`l)MJ+>CWaVr|mv_(DEtns^~S8ut&F4Vpa z9IzL}QzK|#sLA6{^VzBAv)b+Ewyw+Vht>#iT6UDvLIxt1AfM8_z$$%H9lnpiXl-i583CObMGX2~*En%#{52c7|0sWp0v<*3jQ% zK}Y+xr+3=56RYMAUn=M1`05TnJc8!iHnnRa!bcrKs_mw94cZciUk2GEhl{*EjC~-g zg9Mu=sUdFxg3UV$lUSBaepjhhCNY$mr?slcegj_#-}-*5>@#}q>?(Vwb{!DnH$Oq_ z?)3FyTI*%Dc%2hO@(79}1B#EFx0NBdpOwvAl|^L4Nrl)9KOPhO5}&p2JZ>XC_ePzT zsdWO$Uac)x&-Mq-*cT}EMTNeXz)7N${7Xqc)F(jNNCPvWcT)*rTM}{V7IfE6?|p(8 zJ4l$zO~F~W^eun>4#1y1RF+1njzs9wmSnW!ocki}eBur(M*%6(yTs9_hDaLk2V3=O z3U<~sN>2h~J#v3SA^op2LIpOHLpa@(B{J%Jz!THVC#9h_aU_xCm<4b`R5U>-3*D^V zTTkc+?B#@)`AG9X_8e3AO!At~YQs1^dYhi6q7tPMS6iCSjC0Bkg=WC{<@QwJRKnP< zdiRa=`QMYw_vIMd5acS3{F(co1Ji-kmrcX-(TF$@6`C}ahV0l-w%8}JmML1JVO{RD zZvSTb_ex<+@KU!FfmOY!`o5FA^^`f-cQPxc>(kp%y3)-k-#0IhEIbX-vZ!?B&6|;< z>ElQbe2(5Gy=pXyXvgk->eP%94kNW!ERFn9a7fqaywGS5cQxc7R|6Tr_{*fTX&hI3 zEDYsz2i)wKNmlSA{`fb(Y8{6@l4ais`)7$VFR?8qbY8JaOqAnwCsuuLb5#qUqkc26 zzmVxRNFMa3u&-l!RjosI_0>iTR>xFdS3LV{{WnUxY1Id3nV+$s`#&1&egjG;PJfL( z#_Sj@u1G=~H0jFc__sUz5UTsp>KGKkO|93DIKkBX|FHMoQB8H--Y6D8rCI1zL3)!G zAbaXiY0Y7}DUD@$$;8Hm@>TV-5-5_he@uHtT1@O(% zy_T}}A5={M7BZ@DI?lCjT|ly!9cw5#z4{_)I}1 z6D?X$=(~CSowI92+oT_{bn^mZ_>7jUZ?M<3`{l`;%vR(ZvF3OuK48|mBfqG-?bC9) zioDKcsA!Q`4^~4*pke&%@Nb4k!kQMGX5c*x6uFD9dhLvV)hq4+H2SjVL;m>OW)`LS zX#Mp9OLy0tGZzH+;5+YPSh;TzILj~jSa5bZOR&x+lRbLS#`ESbNwIwlx1p|Fy`2dL zp`4Ph;u$PL5Y?;YPq*4uV*dp*{LiQez&Pg)%>T?EJQhBbnG(GR z9#{=ABjX-*rr3+nRA=GM++xl~8sqW^zOmTX8#1CO`I@KJ2`=^`CH;DRaJH%186-;L zz*{Ihx2x|6Q&VRw!Lw-NnpgNpz$qaQn=%SCB{_KY6_}LLWj|Nf5C{R~U7XI;ihtUq zg$Ce49XGL{@tcskG z-XB01>C*&pzhal*jGcHhr@V{~YlQ70Yz*B;{KZb3vQGaj!~P2F`(6`)#9fKmE--yP z9#uQ;-Ib>Lanym&SZeLjpaY$2u47`F?C3zf56`RX3~upsKGT`9W0?(2rt>b%MHqFt znF%IXOkSmgtJnGr%6{&_H}yl@sJ#`9%7h5s>=FW?fhJYf-l<@Ly^wMwCG6>d3)8;@ zNSwZ`!|n@lzhn-Fy_dD>8~9MZz&7+`hhoKP6&5s~AC3!IPTouw7yisXbS>CxStUce z+46!igU>1E;*oJoOxlv?Yje>S6Dz_?GF+zpAV7D?Wkibe^ku25Rt9IWsN&269#JH3 zZFk_@y{8C0d&;=+`1`4J^AQT3E}i8|JtnXS638;3Nk(Dx$`xhqoK!qOMq(cP?yIu3 z#TPecSq9CjU|QXquq&|nPQ|V*F9IC?Uqo)-AQry*pxN?*S+|eWg?vemcH4bA(=m`8 zRIgw%;IyCN)wtP_^`~~_S@EvEc0I&)FJP~l)tD1phm0k~#O*y{E`7n+^cPW|{mKWADyLc#C!cTDldjoP zULFLq?~rS!68jAVwA0_yeZ*Dc?co7UAU$KZ&tF7Ap)W10EiCCHTAi}8?qnqtx=+jv z7XT7*h4Ine79-EoObe?B^z%VdgVDb&r{5;)-!pn(r$^lbYV=VkXn4lpN^Pt zG9MDXzmqJZD^Zc9jn&tuc}OlUJtByT*OHo@o{EE#<+D?v*ZBLc7; ztB`|Z$E1;VtrDOCllO@}tuC&@p`zHQpmOYEON>hrjhGuLgILi?fhyB8CkCJX2K@rP zQGJ`&))8p{^_?>dmHgdlXkl4@IzhEOoe&uO7m*?RRt`l_7Fl-bfjjMMV=4s&44pBM zl;!tI5%zKK)Zouw<>9ZtN&<@J;NP#fIN5C2NjRb&J0mVGnE5rL=Kl41`*+j?dQYG~ zEcFw*Z(z3a;n#1ge-XV!{{26zFYgP<5BSQgkw>aNiYgjuDXAHdn|95M@DuNop-UZJ zrFj1yXM&o&squr{(WBa+{cfW8wjlLq@etpV7*s{$bOtffQ&dmmuES8^JC`?lnr1`v zu-OPL*@CZUi0s{pY~eXczarhc@h-Q`-)P5Pk4ObbPZO`aq{1ljE%)2U&xxd9}tq@kG$Kmimef@Pr_*Cl#sup}Gnu$$*D7H0Zi042o~lA0oOjM&u9Ytj zm0|lhj5zokUNkL%zAtOCTX33I+Y9FvCwMn+Fy(T($#h+FAbxAZqyghP|3AV1c@F1$KULUClmlQU-dV)=9VnBIfyHkEk2>f`1| zpE9dA^l3(y7RJZ+I()WnNgOuK1>P#|um1rD?v>-Q2b@3NtJsUPVdE)vaUv5o^|kYP zsf4w!V*QMz1i^h^GZJrK^oj<;+*Pa0vf7^kDhQ3fMj&8@SINpCH1Hzju)|e{YIU+g zYs>4##96h%!^VN|N4K;ur=7uo7tt+U4Wcu{Nrj*G1)}l_cYh#wNa$xbUNK3eCUI88 zBnWT8$i=rk{On$qKCG^PFwOr~%6s0kXKbW4;GU(wz49`A>GkYKv;%?3m4-_r?zs2Z zl?lkfXbR?Vs3suQVM)IU6cF$~BH(WhX|&Q&Ob!m@=(rF&0;jmNrO%3HwxC}rQW>qb z;_tpbf+EOcn|s+Duq?c8+&E-p4(VK}hjY)>zVJZ=SZ=r2>&C+L{h7-J0I8mPhKzfF0}S<`bz5z)xNrPOVxOH>tf%H z0$1q$2g-dV_s!gq%^lFTA_Apk=DWu(gM8bfvQ&VEY24_;;gIr0Z@I z^+~uZ^V|AJ*vLpvm2*k*+bS78>TtD z%vz>n&}*0_{-sRYQ|SOgs6rmvIm213x>h0JF*V!Ft!246 zp#IEO1A5#MK#9eW4CC_!4_^*x*G|72z7@^B+yq{Z?9e;9^2u?{L)U(mydl8GVWix7 z`tvH!#gA844J5X>hx|P^#U8D(N1AXro|L70ogcw8C^ksV0JjL({U4XF%Mj{s9e1i8 zbOsfFH8LLx#C&YdfwdZx{p=#)J}B&sny>?hd1~OWAI8&s!j~o~LU!PVrXKxin~?*Q zHA_0Dhtu&*JF0i&0c$GG-Otj{jciWM+@1EIJXgzGeHkRcbx0;~jvj)sM(C>3EU+UI z2bjdu^CEiP-5d#hF!;h`Z}u}af$Tp%6}}0n3u^{&q7A+oZ=W)S+CV8P!6u2dF^uz5 z*6?40dQ9A{r5r5uKt4OPDixBFcJQqr{=!7 zoJK9nWpQ_P*^*5c+7z3CEJm^bNuNezKBvq1u4Cxd?jIvNPAIb5a#JCH8Hflw{+1q)HOpmC=>bf4)k&dsEW_K_E{d>W)h|VbY4Js@x*yXyi6WKyAa87fh znPc_Ik16dV_fDlL%$N_Zs6PTN7FPCFq@l5>#+jp@M%OZ0gPzgewN9>#CEw;Ufy1$| zI$d>8VLKmzMzoRTAC^Pq&xhN#^eM`oKVlH5fEBOu zroOw*H)}e|iPS?BPk@p$^ON7WC&m=ub|Fm(S)q_U(HYm16n%@L1FUG1WP&mrdV8&Y zs7+=1Ztz84srk6wCwCux^PO;ch2!lpoJ{QqI)LQVuMNX(~gM=3SL+ z(yiT8USS77NK}X!xmFv9W2?STJf2_1bj{B8h(tNGt45=DN~&k=PDxtN4vSLw(}Ibs zXonUcb)#bL9-xVKVKDQmkS^X(Q~Y30HD@sWlN>EEr^&ocY0Tv07vELG4bPMjTUEdW z$R@3BKK`29*1J51S*zWKvn0FRUJ!R>+9MPbj7D(a<`>^}9S}bz^+?bUb04#-#Tn$d zSNN)BnqtO}kDn2e?fh*O-(oysL=IFM`1D=f$Z8Y9dpNfr6@h zHc|J}0!k z+|J5rBA{IB6BIo^^srQCpAD4RGfbc|dnX{_YcIQoY2;)CDOAvwXN8z9znN`)pRi(@ zL?Kbyb}j4e>ReK|L^&dPbv@rO`I*>D>BH&yYJ|SJu}x}t!KZ4AYRo?RM^vtNZD#oo z$$msrZP3~5&5+OXv&g;+BM^faJ=0?|SgU+|tb7{wId+*hK-&i%(DtCp2#XQ&9y?~* z3O96i$J445KAjKZl}<`tb>-Mrxiv5y95eXM#Qr17F>3SkrDsB_I>%xZug#cPY|qC@ zG>cGqS&^9Z;`qvSD0>Z8<{eQ;JugG>%dh%EkIKtJShdLqv{$$4z7#ak(5SASne5nK zvJ3B?^9qYr5Hjcuh|6Ire0h^>&^+FmTkH;q8xAp^D!B8h?L#p#ctrUIlK8mB(!EBn zXQ3>1F$QzoXeCtppVpdo(R?X(#=gXM`X$ik?EyG^9XQqDLPj_ISx8mdDE2@!*}LVqxSab%4e$EV%l}7j)h4h{TfH2 zH@BR6vwC9$TEbv5vse5+S>aOHOQxF|uH@X-ijO?XxTjj_P9&12<>9eOIc3U^f@IDt#@R}n?>@oZpuK3I!CXUspf`%&YMs8(5H$+e3l3k0t&;0D?~=P_3^0d0g6 zCON4_rjM|d7Ho=xm&B(uso)w`C`Z!6Glx$zlcWtrU5rKED&gFzg#ZuOtC`f<~{d4bC@9rv?p5#wQ zB+sdzm0qi#wp@lb8j8dV2gE8F3-U!*I66NkGV)p1rxq0%>OFd0|Gh>Fm6F=XiWpBK z%X5-3{)@;sH9P~X|2pbehO*if!38Sp@#+LPZ9L2Li91KMurz&;AL1t zGkD(<9`3mvzbkqzQW7p$MMnEDsk(Kzu4LlED_(55_+VKf+IA|NmMgDLi5lXix3nH0 zHlnUGgLv;m%O_Axzz;;tusW%{7rbNRr=09$g-Adkr8&fBCb1I1I`<2K>d{Hnv09OX zt5|bTG+cQ(aX_s*mYE)icX4o_)7PRf0HvX$M;LDBk(dKm!_mc|pj|%aHx6w~ts153 zO1iQn1xGed8)^@X$_Qpd&YyRA@;;0`N>`5GkV(IfJSsy|*5RpV%31>RcFSK--7_Pw9|CRJxTQVMLdv6weplolwb29A_D@Wvin z$7cyc&-8IJvTsek+(p^G9g2ixMQWT#DtVLu=0);4lbq!Z`a!Bp zIhS(8^VQe&`m#fvaZlRp;x+euHsg1kxtD3Udam&2Jr$hIOo-Ik- zUdYBlC}`>79Ho~(zYOMMNqL;{x}$Jl+hHg3Hzwmx?cxIfCIN>HUc_RzhTBw!*}Xz| zVOISzyap_=q3@$ZU`E12d{p)_hR7zD1+Ytao9_bEPdf%~KGzokGKeKen#=JEM%Inh z^oe*2`|IiG+tBqJt<3Jm(+Qp$lxH*DrRt>y$kpeP4kdtGHPhlp2%AqZ?ovtTi8PJ4 zb{ZdaCgiOXi<6pTkvBHIzN;>@!c{#fmCDs+&@G!aHQBo8Hs0pwDuXMM^<@IcPBSf5 zrVxsFM_OQ$S^8?XUT5{5PkFZRdLzM~e4TN3!WvR%MaO72JS#d?NI2dr?5!2~f-D(Y zeKV?0k;&!cZ=w5mi86hFwENyJ0^D3PSm%?N;>dT&Q#w`-mVaMmm{82uZe=6?{AMh z4pV$BD*p5@B8M6KkJbK0xn+D%*)7&%_Ym;ORhxrkkibxQw zRdYQCrPTL59;Ybf4Sbj^Z?#R^KVQ86T>T6c|7OL=$M;79!RKD%0wQ&IWoNM}A)8uk zt~tlSC)ZTt#Ygz*D<2&3b0hG238k0G;dt1mE5 zaix1&=A^vdK_?zpELfrcgWepnCg4S4s6zja55J z2d9+I=b7MX*DU~^Hda_;qz+)uLTm7tu=+Y$4(+WH+e~vW^TnVIyE!U)w}4H77(?hsGeitzpcIAn?2fhOKpIOsp$GXiq#hG@*$9o1LGAFViHkz*WS!+#&ef&cA$>P z=6%BI7`>;8X>fsFtsRFCcNAhOrC2E(=g6Uyw{dZnX$_QOij|{ZUi(e*NF}6AT3ipv{ufc5 zJ&Ey?Wn;9HgIzTy;>2&C*aE)Ndq5E$e|Xgzpm9DS(53~I*B_Tyn;l zmd$hBeA=NusR31QRJtDF5+Ei2aF1Yld=2+icR4T)simiR*H)`!NB43>%4%kcJ)+pg z?rJW*Ea~sbAzEP1;qf_ z6RlW*XeSmE8dBb5SpR{<4qIhe6+s;-ND(6O7Gfj(CUG8P;{~5!0xr^=>rI`-APVot-4>D*{NT zK**2fFMTxCN&GXcu9^a)qj>b4a-Wkd^*E}q=^ErSfv(LmLy#7T z-?f`;bM*&}EvJf6^4$@=>?evY!3k@C@@fSVoe=QaOlTgar2EDil(!LRkF7^UDy}e5 z#K9_#B2MI31O{J>=j>-TKyjB0NkF?AQQ$sKYlZ-1gK;ssr1wCAD$9~AQ_oFw!uv_- zMx;I(-Np|U5Ch_w0;3dNABE~l3#BG~;mpjK4egP;dJGUK-Yir5UQ^x2Tem4^fPQ9j zqAo!5B>UxXvPY}IBwbqc9E(v9G&gRJJLQ&W!$x3ciSsZOdC(3tNL4S2e|R*j%DTV*l1E^g zCOlA8_bGY0(O&QP2s&Gi1C>o3QSQ~|037FcT=cGhDVZt-UZgpcn#UDDP$`zL` z$b_h_0o_@8I%?I!@FM3sZ>{v__44|)LN7-&2f6bJbDIHkCa0^@F!?uDbrxFmw0+X@ zfwq_o`bf`L+f^k?`qCxyBPW25K;qr&Ku3qA1XV?eY?4%XsTtWvs`Fu4H$2f8jWWHY z={MZ{yyv~(6S=_Z!WI9gS@*LI9(vycK(h&xbhuHcPmVl=8Uh+P-g5P5FtpOB`2wvCropd?#4tx1+qP7mTGt<;(AiT2cIZ|{S6XNEhB$Fxkh zJcFmm`Z!;h+I)NJ(NQ=KM=Nu1`H1cGsj?PLV&x3i({)GM0WUYrkV9AuxX>L5=jph+ z3Ls8#%DojKvo?vx4S|l@(p6PC_2nLFy=amz4dLg3oR@`y6G~^8rVixDgkw(JxL~j} zvvJ$!9Pou?HtC28XTGf9DL1GNj+h^f5R>U|XvJJqjzZb4A)-IT*jIIZMorV$+Db5O ztno{0+Zbgn1B@`waKU&dddl0t-abfImV&O}!vq^&NB3HRL^Dm6^(L;^q2>)rgeHg! z1P@P;z6@`Ixz@ZA#@FtQ zlT;h#rYRV#^+tn|PVd-Q_#0l1{(e&Y{&hsqt`2VhQ{=m^V$V?~w3=ykk@mB}swMNO zcc%>e*Vg&Dya!tXL5fwjgcm2!e771XwUQbs$%ZK~80$`MgX-dCq^*gBt(zb_rMS&k z@zeB!mG%h3`-tb@_CTPCkI`qxYJ>X)5w$6AyEU{6M;4b9daqxUe~gx z4O<86%H(YD)tUkhQaNuR0?GzwvM*Wi@LD3HM>RA9Cs4Y%pOC(jiXPss)G$L#aL)c;@#7FGuXO)WihjneQGNJc) zx7cFBl3St~Vt9!sB*&hMruRvo#Zj~@trmEF&TV=m2H1FQT_V8_U2=bHV_Nx2^0!|k z0l0+i_uj=&rii4bE+x!MKZAlrIoy^n{WqIN1z!* zq998xg$&Ccl2TI~V}}HSMsSDEdwbRop$Mq1KHFi8sL5@6t=I#E%$iMH?BYwwRC}kA z(p^t}J{N1=xx<*702^Ta_VuTjaqvm6T5&pXWOfMKK9+5E zmhrADicx1?KNk(J$dcVWcrHrc_Fku=+(*vrUX2Vjtg4F7-sZK~K>lIEyLP-jwf^3y zfr37l)1-1lghH~XfBZXR=O5Lj^T3eam}mW<)OGFK0uC$$?HbP;hR+g$>}nA+RC*}X z$rdbbheacg4kpmDcg}&aZK^JQFgq^k?+B@rH?7||?Jy>olE3yW@MizaZ|$B`q|48> zWanI#w%_JE$7w5&jOJ$Jc@jujh2~(Sordqbp;kS#11_UI{~AxV4n53lp+%^ZgIen+TpZUGZ(Y#0O+|m!%?bQ+kuLs(44d z6q$BWfsL-_C9C?zw3p)f|MwSeA8TF+dq zj7y*SdOvbD^a{ZwFdS+Y5ibM^7{B{Gfw_U@AtqwaZqIV1Z!VUnJC$#p=i$JB5*-bvdhGteUqmryx6?(Pga))7LTpZcWT%)VJ9TJAh}oXZ8PY2D zj^DU>;up>Bz(R`9q~KKIL-Uz91O%!5SSkPQI2pGsn^iy z(!Yo{yUUx>XN(zbnw{%Bk0j?ztTr(zP*vLD%+*!kNXxclciA^tn~2 z5)at0DI%pCR~Tf(;$~dXcWaq$Vr8B^WVOS^aT7wVwX8t2iwZj_M;vkLfCS0d_&+)4 zENUlP!Ff1bbR$_c6>p8}VjUkP&_(lfYhJoziD#s-d(kCx6E80f(biX_|d6i}P zy3)P8fXwF?hTSM1{zdf0eu!yzPSatZb(qTz5|JKi5{A_OjO6vFU zX*n~S%BK26Fo-&FfQ(2^Gq25Er(I5SDM5?Yi1uGi6%%*jS@QM4s4Pt`{;N8O zi_ZIiW+^~LQe9hIRV*-dho|9(q*f_-RAlV<{=8|zNw1VO!I!Q5-Bd9$MjaZl{rGO+ z`1A{o+*UntY142n#ladT7cxl@+3m#5u9*UPF*5cyFrTg|N9bo|ZM%t_ezKYISQ(1D zH`EWO=!sR1Eo~Cw<}HI@O(A5G;*T=(ZMSL#{vv`aXr|#WQ1t2E3y#8SLI(EKJ z*Pv&!H1EUbEv^}f+f$!x`bmcF!kYYwtbp47wVTROrDU*jWdHlG%l zhSNUigEcd`#_A?GWQyCHu+AuEIaoE`eI4I+YUNGf;858juAmLiW_BSss~Ry5Kfcyc zEHHaRl>Yn!tVv1;qQJbho>b6oij0(EIJAZh6=xrGfg_I_B&iQ2TC=WaAC%~n4coaE zI`{v$B{tM8*b&e$0~^KPf3on3a<6UHAM2ag2|k+%qZDA-mi+pyWQV}mp4ll^6c5_? ztUmWyeL>dnkjvK{8l+MG)!fBFi&UwS*duGA_^S+e*X2ER!cIEs92|y~PZ1o-)HRr? ztbq?yh_~)U4RLQL2&!5kx4&6!%DO=pJLf-x<`-Kjc~9FCY38B{h_eQ;Sj?``by_8cd7Wkt%IbHTcM%z#J-B{>^H~1G(d29z}h&&>ge= zen8;1vta_qThq@zMa4y+{phOV^nN5R9hW1(K#RXYN2DV;Mg#YW;}maV^?_ZysXy*81*;(;ZNy^ zfEq!xBXiar%kA((Uw*FP>tg%-=P)V)Q^3!61D=*5@TG|p05@~@I>W5&t;kt;C~1$v zg#suX@EQypBV{jc&JZw(AhJZ6DS7wVz0U;=(12WfEu+gStrmc1xiw( zU4|5UZuIwBe-@KsD}MkLLIWZ-_@FG9HRsyv*p`u6xuM3Fs&34Sdv{g3V%;~8!fG*_blaJxq{Wth*$zx%L{?u;r9}$zk|A-k(_@b^HW6r zP7dR_)BATFi99an{J*Qq?PW}s{$0MAfdzf@OE}CZ+#B1A7iq%r<9ON=Qk=QJ@!arL zKe6#^x4NdS^S+vxP`7#fyZJI5=Ry26Tu}^2Z%Jk4$h`4p&-@&3VkBm^StXv>SPO{( zY4YzF&Gd8r>gxMjjY?|+f1Ha%+8(fc348s`h9K1YMRMAHpKh@Y(`U(nRErpnK4jfPuy@Z!<3dJqK3?l_Jd#s_~l!Qn2SbN zo9ZoH-c3c+ToZaQWCqDL&myomGNL-A9>&p?3<-h<3csZGhfeCy9rb+z>Y`$MIRWE_%PrUK-07k+h=%jNk zz7z7Ga9HE9$c$@6@0HgX;XVhfJ3lq(IqU+?x)=Zo*#n<-6HjO{nT?sf7o?<%QYA<@ z@@4ezzL*M$o|&kswgw2w$)MMT7QBpIC|Kc$_;0|xLIDr?0=0FGt4^=t=0u~QZf3bo zz!zH8Ee@alzfP$A(%CJp&R5WPcb;E$nmY*Xg}H)*o|$^z+u*HMY$v#If2@fV8u+uaqn+rddSY+4DZ*DpmzXlt|M@jHsW z0wFAx@tsdpR66Re8FWh6>e8+_&IG*WHK7j%cNVJI(n^;}gOm()K1C;CM+U~6t$il5 z6VAiO^LF_F{pW%3ze2#eKYWdR=Vxx~;Fd)WF@c>AT1@2Md%n zb=AQN0?qQgw&|lhD=~&W-)rLgV_~DVdCN4m55bDI0MXq3z2}DlxwlYGJj=UjuSE;e zjHwSqA4f-Y8jkf3@J+8RY(VO0#J^o-VW~(vh%nF_uN@x(Al_Z##ygB*1!W;FHeym= z=mnSgde=2n6j$D>H;-hdh*n9Y>dU^-9cmx8LLqI}L^ZPmMUPC}5BLP&9ldDy#MDqb zCNyEBk3N`GYGhFPDG02}mIP6p*@8>bB-!N{ui3zMG~5$Xz;F6*s%T+A8O_p=DcL+} zDv+LYnefk1mh{yF?K07Hu}I5pn=`FCMe329tl_ z)+2gFV7IYDP@C z$w%KBt7`VdZdA(@jTn$_8?iM$4qbkFn%p@-!9hUsC!ToB!W!}PH0@6kDd%LBh_>eVDu02N8 zR!R7#Qh}xKvQNBEZfvSE2&yRi)vs{eb1yH@HC{uS7w2E|pL)+*m0$?6|9_FBV)|X7 z-5Vdh;Zh>;BkEek2s+276W@5N6lDfBWT7&%y7SB?E+Zue|He+WtIzT-<2W;Bk!G=|({rUra$L(aY1@PQ)K$HP>q)~Nm>N?gQFz1kHx^z05W z+i=;34k3Cawu|UK_BW;=roC5z`t^>YO>-Ojk-+p7RNRvEoC5PyaUa8-q-3APjSper zXH04Id(zT-;np=ufl-TPHT98V~}Jajuw z6NTeudDyKv+Jl(_8%6^K@=OiiSnOE+?0md4d)90bi+&xfIxccK??Bn1zL)KM0LmnT zeeF7Pn5H@ImRhG{6q#T(XmpRE}*LZ7iYP4v!O|^MI z|Jyy|y&F}0boXX_?{%!+(M&d)XfnzS**Pg1!bXCDR*9#w_c69MSJuAlLK8eU8{Evs zhiagz!kS>MgTe%o?jSrZD4a?h{{dg4QfyUv#u4<%H|ok-*1HfV9!GjF2o?**2M)F> ziMvu-XI!~soArACT^E39jsT9RN@>3NbFWL`qc5OO+!PmI->GtFGaYvKhT7dFGGT}2 zqQE?CR(9WQl(Sb--COAjMnT#W8Z93i=raznN0A2z%zn|s=}Sgkw=z@V;$-rjPUM;G z2UJ0b)c)~+uF-cg0|fm2MclMJ4d@8=L#oQ}^+HEic5A1fl|6MLh@`GE@*mklkf(9d zAY3uXq~v|Ky!ywGhe)MB;3zf{l9@DGyJ|gGIn*+zg+#^kT>R3k_1Z8iQZ-RK!ZR{w z^kHZf+gW0}JwN}ky;=BC3lh>Tn;4`MrVC& zjoL$}>eq-YESsuN(|$+x3Rx(u{z|hx5(r&RoUPu^iy(aePS^E3Z_xX@?5}i5;SHbP z*?wjS6P}F{{*%|}tY`NeX^8k!&uMaF2#b1}eClLn>tSAJ;%w{!4}l3whhPeV=E}AI z#+OG=6@YjA)3u+)md-8k>;H6SNjl8P`(KT9Z-YHgn*KEwR*U9;q`f7yOjP*iV_gE< z%0S1^{MA#^3l3en>BKm$~n~mfyU@e;)E=FV;6G*$Z%3aEGD$hjDgExeIZ~g@E&=A(KH7^3sk7B z$RIbA$aQhUWUPorqE7#%3e4qZ8=U6yKv2=H^yN5bAL?~=^ObuqVNNdEm@;u|!V$IR zF3Oa5%Z?u(wTO0&z{xyZa2wVD6OZNyUDKb_9H9H5J=zG z9ATV5@^mt>XPPE=-iL5{U88c+uluG|a?KDu_}ED5h8%o=JAZe>lNJ&K~1bk{+D=%4`x*iC+9L<0EqU-&VWum&wUgyj*ME>mx2;2;{5PxQb>imOzK}0x zksFV8{r9X1YL8y8<rUS4M3HHqE9T1%qrR z=QPjKWvX!A>h25NlJ6^}ftNDVFiVlh61dZzqL`{~d9 zv$yqD3hCuKv8D~iCac1@ZMJcG$Is*rOk)gO=_y+2n;Yx8qcugExp@D00;=%e+M8lr z{JY>EXYHkdgCDWr&{CV{0$G8|E*9HR|J;B>s{O27d#&3I@BOdzOF!~*B`{=wOt>-0 zc-*>2w)@a4h6YxRDLI$CpRuJsM(b#>fq!lVlj~Cd-02=8t$r8VusOg6X7|e&|H+0t zM&4P;`BUraG4inqw{c|?Hx1TvN=~M~S1E%0~qY~ss*YB%8~ zRPs;#`?90e#Q#b{wTw2=;(j$dCC3KX`Da(0B)_1wc})A`xk$Y=g+zV7_v9il^^&Of zfY%ZnSJ38fTB8mbHq^>0v6abB*oKFFQrB1D@+MhhS6t#pFQ@d&Rlp{u+`Wx*aC4!7 z0T!3?4=bAb{8j(!_m?;P)+V57sG=SZ7BP*kcP9{CkNEl5@1yR@JA9Q&wu0Tg9Am)3 zy_k8KqN(e2414Bk+9mHyvnad_@Y1K5szA#T$Z4 za`BPP;3zdnW!cl(l>ME7xi`$_+wmHKy;9j=7ByF2_KjjGhg!mpGvu)|R|*M$E8A7v zeQ=K1IH40X_Y+%szNRy_wJX21Gmsg46h?D?sLsP_M+C}7+a~aG zJa9iy0{QT}v9t9HDe0{fBubryU&*UBK};eGv&xrYQgY8|M}gQt8{jqG&p~C>;$37} zbx`5)qwHZl*}&>P0m)wf&jmRhF{h2E4TR8YJ$}7Vwrb+G{LiHmu!A`y0QdjLYyIrs z%m1uMGVf%;7S@*f&m}YH9g;FA_oEE<13i1Ri$ONt$~%VyAAzP!JU%IANzO+jEmWOu zbl#@eCIIjgu2R_}i}8Qj{T%@9Ef{Y8P9}VpJ^K6(Bf6_va32@RaX{#i4sNI~C+Yik zq9=0|SF+}QP}P^rF6}o<*_usFgy*al3&~w5N zG(#rh|7+#5VAolM{e_?&XBD^xHN*X3E!XsrO7(sA$<8&kWi_9a0Rfg4$6s3}$5vCr z5a~abcG1THwlHfY;$lfaQFb5Wr6w^;06gWAG;L!ySn(!AI=S7Ar`E{$`6~;dcR;+C z3`^6VVJN?>^1E4U6vOQx-cJq5&M;0Oyp(*E?pgLhIQnUKe-&Bbz}1}Ik}k&G8S=+P z%QO_3twf>&BmlW&0nt^1cnwtz*quWsuv`-}9!@PPJAFhUOI@vSI}zwoTR)x``~emJ zI@`1Jfh9oH8mX%2vClo09`I?@3aeG)Am4hOjrRbYc?Uvb#Z@g#0jxsF@O!`mN7kCG& z7$_r&PrA?((p~tO?_bq9-}uaCoI=J&5~Tr5fypvbn17b!SD^prAEhPjrfIwwey7nXM~m7(!z+K#1BZnL$?o~|SV zyN=@I+N=xoo223lVUEO#E43|=maf=rvNa3x=f-5?Vb)=k*AltY0FpqD`mtcdW>Aw5 z*{vKO3O>x}K-U{j?^~r@7h&GP;U%Z<&_NLdwLXt2y}43V15`1(b{iQyCwd@|PCqtb zlskzyIOl`Exr2(oNm-S3m~FfY1CFCyQCj#$#5_#d*4&9jEST<+rTtzQb%O@wlRGXAx{H1uo)`KhK|Hqi$k#lq8b zs?2N=rT2fa_a0DDE!(0fiXe!9hy;luIW!0~Ip>^HlXIrYu@w;zkeoqsY;u!>L`4A! zk{W1oP;!*0h=RXCH(Sry`=0y$|M!jY-q?&9i>g^wbIw(3b*-vdGho|P<%ufCu56M3 zXv&j@C64`i`;oRS`X-ne>DtI}z(3@CeQEc>8Ov;GgtJID=XA$OJ#DnrPN5EIA2jA7 zu2|~@Y=Q58)SKk#s8`wp#6ME7n7Hb zb1avg<<4x|Xaq+tP$Pk`ex8n+sL(=0bgCn6-8oCi8*A6I9)}Q8y4H4#Ys!Tpz9N~V z`3Rgfw&g@N{*y6e@0^26Hh(4Xq78moJSR?J`yvGK8(+vm!kld@rS5MGlbSh&|GAY@ z>FY(TyVzclA`94j1;@8rXBqJKc;{Jo?S3j9yxoa8(K|5}{kiEJtBNuR=hk17PjdajA~b$$z<@Arh1T4$bo-%C#nSL3t(KwTRR;>pt_?ZCY3!b;WED<1W$qFOiOk~u!~ zk*;2;lfm8iI*yjK8c=WJhWnMLf{BlKT9*{&(x8qmgx}>Pt;squT9a^PQ<{y=LUN5` zRTy(L)^*;?M@@?QakhPcTUmYPfC5atDI>Rl5qmSr<0%1wlmaQL+3Qze5!SiE*#B}p zRvjfA*t@M5+VteZ$i=4|pq)`8}9WY4;e}U@}m4Q z$jBG`)5HJ3^McDloJMi(n2<%P(&lw1l^+K$$bDT+U=L$|ow|pwYL?o)gPq~Yc7Q~k ze-F8(LZTh8b!nzy&YN-~WwO)B+a-5vQDN@b_v+)d52Kz;k3SN!zUey3WHHd6%XTUGXdFVLGM`?*Oe6 zDP%zdiBpm;^#^5J%I-Mz>s9^El?}CwcUNip*B<6YG{UwS<2n|O=K%I0Dj0uzviEHF zt>UZ`cS3iQRMMxt;`6DwWYL9Lnf2&TN}7yUUhn`U14=QhfFs~%`_YTTd_hmi+@l?sR=R}aAfe5LGxZB}fO z-aNW;`4YR(Ypf1a3UZ_3B?VE?!`jhgy+uO$mT*5T0%_%Z_pjt$GR5rEmbnQBpp?!MF_ZA=Z;`@3J;=#i87`-fUj^ zaivtGy!LeF*Hv3!{8y)VASf-UKJ%ga(jPCA3br>La*ONY+su7BH#()3my$-u9v@E~%1{gRO% z8TF>G&Uw_Cvzs=v>jypenudqr89JJdNI{)bI~VMs=nWO#8EfccSkDRq-+WHE;TvaeNAFi=kokn#DYi$#;?~LzPl3r< z;s+0cuaN(3xTOA1Wle9wg-B0zk**=c^J_NLQ9j}=aLY~$+?jLMch}DW>o;B|6^N)* zhCEg^3hNP|)ax=av@Cj11l2h%|C(wY+n~ReDbpQUe*+*ZdD4GxgtRl_o$dBP?C4hA zn<=}^<2u#gy3Y%3Rby>#y<+5YHCJ}qBTwfymIMYGknxGrf2WQ6FXcMNbIj}>mmDKA zRIBJzL-nwkUsJ(B(#%UHzYKc@nSV9ghw8B^naV@)clw;pVn!CrT2BcdR<-!fETe*1` zuX1A}5zxa$uP5{_7MWrR2wW*nmTTCEf?o7Rq7jLiHIz#a-q`pBrP315t+iju$gEVi zWBFyD3(^VoSbWB~F#zY5k?eq$bRDfQfRU14ytP)-)=_~*jJbm+@$B8onktS0NXn*F zw4{|8ANdn14YGZ>X7CKhYZsh6yO``lB@Cgvy$;9 znpDthS-cQf3A?r3*J0qvR=&}F4&8y^ zCmt;COXEIg7pL&mdp-eglKrx`N-W5>Z?+5`os5)l6wo@4%}!F|I)Hi-oIn%$eoKhT zDob_fNL!(=%Y8zw&I8b6>8?^@21yB!kzIZKrSC%SK8_eXJE;MfEx9=V(@E77k42 zf!0-TgL$r_I2@oim+Bg6WS~>l0`u5CVuR;e*OZFg{VSU_GI9*gDdU;lY;m-zoX!j6 z<1FWx22?5Pa=pJ*w}hI5IrV6g2zE#}W-ZvmeI2Cj)T{q+_GLhM$zH!ve-~ibWc@_q z?`4xhq3L+=nmehoiV^nf+Y1+pTMO{=F4*Jvb}Ba1xR*K>+wAz}ndfsq^+6&|*U@9C zjD)EJ{|r_mV`H0w1_{XN6}|`}Ju*?$u(OVVfj=cIYxsYqt^{C`D9%CQC5^M;GiiDo zdlY@B=%0$T@Y@u;zH=ss+@-jfY`@h@{MIq@Q8a-Agk;2v0=v`_dV06a2j%j{gCZ=>2 zTg$E!_wOigf0aUNDU^qISzpywrwTyyxkIk@Hda|iw3=+{!mq@sEA;lMiRE&0e-&Nh z(X?8lQ(`+TD^6XS*hkC2tnKfi-D`Ya6Q}~!kWrnk=C%sQ)hdb0C5;ottM-!*>9sXf z(7W;iJhZH$1dI7H{*B-!>!h%edd!T|{q0$j+Om{e;leYz%?>TufSv-Pt_6vYk(lFC z|O%&^8%-|y-u zzc9^@Z|W$|!QKzwT3Ci?(Q=8hhp4y*JT7@|g^WW(FAY_hAtQ*de=;VtA7jymmKl^l z3gC$s;~nY)!*jD622XMIq< zYRi+ey1d7BADXE%`ck41r2SZjuTCAC=W)@-PZX3wtmoU(OBRuxX+DauR@A1fLif%w z738XYlkZ+mhSZJS)}K@Jvw3{vkQimM=~iaYQXscsTc@i$6*`Mp1-+~p5y~rHKd#vJ zpkp}e%i6_AS&dyH<>S9POi=g@qJ@y7)*rp`<920z)?IMRej5P=-Ze2lLw|Wh-#rv; zDdb?C6Gz?|vQg{-{P<5R!zP$VK#!|Lg06usV6jA4<090*P^ekBe~v73hL7n(#yeTcE0Kmxb*Y6g?7(ck`k0m^U9yAk zD5{%v{FtSIDZ}O-pNg`q;1SZVGRbo;|g z2;GoGv#43)xfGL=FcY*z5J7OJ6XVF9=hz7w7Q_hKB;jEUzp0AW`piu^v8mkB%g z=em}4X?BOZM7>{QdFEP@LW7yqD$X>f<_sEqRG5w=`>pe!KEjIrhzz1FcuOHuw}sD0 zTEm`L#1$9`3fXP1!T-)T{QY5~_a@^Sd#YU~A9X4GrLGPZyjg+dE9*VaM|?#10sQwCd|`ppUKIjQ30Q~rHDisAe2Oqf8sA25`KP_=e(8&}e zg_G4G+sL*uO)4WSGm5E)k@_9K&hxF2g7ykT7E7Zzq<;1dZPLSl{dqRudok&wYPGkn zOO+hH;?Cu1x+~#2nB}T&Z(CzQ z3~G+9bxPwzA8P}-F?7ocCF0E8=FXJ90#5?=BT6~b4+<9eeJu?OgQ}usZYzKJ@Un`t zCTwnZQEIX)xpnso*&BNk_o*5)F(0Dtg)VGIS79X$0;$vkC!DJ5nuBE6LLB@ZG{Mq+ zcSl!{r^bJ^w-tF$hA!YV`Vvc+_GV!M%$Xnb_WbL1t_d8X+y?cTB1yllZq};7X?BR8 zUpAmr$mTM-iW@Mn%=k|6E4|SYt2o7Xlm@!E?8OTOyG5mq4}a<&d}e=nZ{5P#94Y@R zSZ18kz3CYSiZ0wtP?C^5!jmR3r?DCv+2)r-<)n$3+Wd z{%7NNzw|>wiyJRm2nI7dR+*Z4QKdGGI^VpxF`C*XLu{{&?t~;~Vs+?q6H&=-Vs+%7 zhCp}HAx7x0sow4~f+JDai@rVxdQ!m#qp;2?l=>)RMl_A@4(fhhLCGmQ@=atv;|?At zPBOXWADw^TJCk5fyukHGk&DjtZt*W|cw*j({Zd3`DXCJEezNm}4fS~x(pmB>DuA%d z9wr)m#a`XwMR6*94G+Ggj)dWmlbtR}K=!JxX`HBsU@c3E4i@R1Hq~x9n`%U}R5(*V zr(|$?HAV%NV|Y&2;O*kVr|1&E`}UEeD5lL4f z-~JD(jKBzmA1nmNKD(qlB_n>pZ5h2S<*yr7jFe@zLVI#Z*Apktg6Tx4o6J~07e@i( zyn;Gnc0dNs9~PG@m!+Y;XPTs34rPlQSCYFKe2{BZ>Ps%q?@j8-jdlHRq*f>x&Q?>8 zmQsZjcs=86kseVxUi90Igle3OO*_eND;bx$CiQ{roBZBeF}zAO&DISn?W-=$@R*Xo zsSc#JF-On7bE(E|P4MSQ`|W-eLWnk&qsLM4w?$eGX$k6+&RG&d$+F`5?o_jsd8=WV z#gv{-XgU)qXCE-Qu1Q${v)WQ{?9S(s)EAAKdy-VRYcBO8?clKoHKH~k zZGs?EjTqoWHjQmlWf}90*SSlFNYDj*9a*d_V-i+DToknpYKzOlp>|bvwWTO8)wfuT z%QzyTAqpUf#+Dsf;(4b#U=mKdP5mBiLy*=qXWey8lQZVtQp~WdY(L3%GO^MNd=x73 zNVG`YSv@Cx=MVE z%P@ZJ9m%Fv5f#k!)j2CVEA88dmHWV{y*fc$^f{eP{W=8xdhTdTDG#iz_&EcWmzRB1 z2Hv@^3}meNXM_IkIB2Hp@05R@>9P0pXs5ekb#exD?T59}K}!ZY3~Bt!8lVOPt1917 zi0gb(eN?rnphy}avD4*>lz5%|YBv4vG3eQTL1bB@?u%Vz?Y)RY` zzdP%N-sac!fVE2O>h4(^FjPKOkKT_Vs6V&k|NeRY`uy(}`ObWuW|*<4D=FVmpaNA7 z;>sL@OO@loovaJK-ZY+AuTeYACOA16)T%EXv@P;F?rmMF3O+KF8Vn$;wWnbl`$BQW zeT~iDBL|33(1bkhEj-%G|Ev2=uHn7YbzE5s$krFF_Xl51zaHSAdUY3jtQO0La0rmC z=F?sT`&l=IR!h>0yqwoCd|T+Kl^@Ibu;21ITBe$SI?fYd4NU3(gb1K`G* z_VwByjp@I<3Q0fj#bUw*qQK98__3KMClaCUS)kZS?ST`m?(o3ST&5L#b?VwjUM@PH zSLd7{3EiON<+>z(?Ocra2b&bmW^ z*g!OVmC`e1LryM$Qz zd$Z!$JubWR(C9Zc?A>9+6__#0sz%wBJl zUK%B}Wc>~%H0s6$K=C*;ajr`7H=%;>0eE^{T4JGxWU;WQ26 zi**Uj9zJS9R6xc)>a}iO0F8BE=N%5+9}W+H)FBqy(3ei@(`oe)Z;XQQY<}Y9m^3;mG~19*>2O;`{P;)>@sOf>hA(mOEGS$PQJpHs7B##T7N$xflwkGRvZM$+1ihmj<9v;?oU z`JghlP-)Ao`~+rRTUa;6f+%0*tUgV$)osF1WTz~f->7L6*AO^+#eXY`4r77~eV4NN zc<Wf zXIdmE)Jt7&8rrsj54C+6wV=GlY%lGyG=-q-^gszGew6E1J@9nhcFOGOkqyP+q+nj9 zu?{Q0y~rCM-KDIx1iD_1=b*>MiU71>!i}O^Z{?NaQxt47FwrRhkxM|DV zj^3qiwWkxv*Y30qwOl3l`NT7pX6Kuw7o-0{@S1_=GGo&=0C`68(-7s=X|IdNR8zv$ z&B~)6rYRQ&Ck%}^qY%PB%wU41lY(DlcINZJ>PRgHWcf*%wA~mra>%Znx-Z8ibf_{9#q|!~8|5$FmLU66v{YYaY5K$=A8>8=T8f)C@F|9Bmwy8Y{j& zFv(=RPC_v8p2bPk?hg~0XsQYRR^~sM?3oF%+fZB*_|-fnBxd{4p5~$-{XTj4ujVZ{ z5>+@s{?mV$;UFz?5?Vss-G8}F>Tb_J)fE3`$rdn|K(+|;xNoo-%cCFWL5||=KQLm| zKt-#a>>E?gJC}f?$TE&}vDK}=bXZs_;dPip^BrZX;5!N--i9)&R%qC8c6rVC>wmGm z-1^Ya!aMv!(O5bzpYt6@rZgYoFm8UW*lr!gSBewX|6D|EkZO)9F;R>tnp7N2dYPW^C*FZZY3G(OAW4#H^Gsu zN`%gxa|`26W<&j0*IP|A8h0EN8{}Ln?@s_q)4uVOw)#{pjTRp3pUUGF)0HU0l*D2y z+4FL8I}UjZ|IGb zRI*5fAQ#+JOp~R-n>y(wF`rmfdz;#G44lgHux||x zmVBxts6&G*E*9lbgfhv#noF=_GBAT8w*d~iP2O*Wgy^3k4G0)7nwS1JEAqe8VMKZlTSAR+eJ5ARL1;v)b$x_C zaY)X&YUkQULynk?oNRBgg?T>Da{rU-MTI_=cYwo_q4D#`;M{lORF^(76qo`AgzbYq z**CGAh4DWJzc>rAzn)Kl_pdtt9kpxP(gf9Q1MQ7j%OojZF?fI{p_Lbg;GQNVHO1_=B!;a8* zI_w6WZUoGh`TdoVtQ%@o2|nFhNG}Y_hr9gg`rwQ!xIFmAqpK4zFtPI{n%kW>O9rJt zw2NN0d8Ff<1`~!2hNTK-)_d!Ej2(q=cVx%e2AEKelm8)ud*O`SdV}Fg3EWB z7Q-Ov?4Ru#!#t*#-G6q=frPEqU0_BMxUbG*kU{PSXQDq{ z>urJ*i~uE4+r2mIG%Zdh(Zt+w;ur%Rc$>u7cN7LGY+-OKHriblHaNH%U~IT0}DnsWEoSmFzC6c;zU1Kl(q_m;-_jU&d$(d;cQ50ZUZ8E{q7uLk;zEVER=H$d>C2M8-lkDy( z;^84)tG{;g@r!Dg=;c=`B$)MeZBSn5rBc@a@*FB7fbVpg10$t-C%G~KL*mywW^8x0~q07@Zy+F4tN(`nY7 zKK1iy1v>U0`v1~n!mnR#u@$i@TDil^`XMXNHPZU=i?M|TxB$LA$G9x}Q{ zaYFA-cD4f;xkMEOu-ZQy(6iH>7+W#(m1yNBv}5RJxo#M+ATFP`tbRns^ooVZ1Lvo? z)qnBTjrvH>a{JI4Ek~r!@XA-d;Mn6tiSH=3Y+qBWHKJ}$l5Xm7DdqW!WMncPBi#5YZBvUjkn)QUY})ESA9oe>ikw(`1<90?twtZbCD!rzs)oH^oeDHT)ajH-#$~M zaRX(y8agxSjNW}=f?Uq`dc@o9wzZC+tutCH$lJ+JBjoCvpEkIs+o{t`?4#v7I(9TX z=v-5)*v-s3Izz)KG_)^xIxqV%zDW96z|n9)3vK?Rq|oamj%*x`Z1eQkrCQ<Ceerq7d74{_7E>;y{(b?8G@^Kl+o zWD6Wt*Wqyw>BpIz3KxGSC%fg&hk%rUWo-Lj5t|EcQ|tZ8ECVa+vb|6i>!Yu)6RZue0~0P0 zZYUh^ZP%zNHgi6o=0Y2w_*tQbWJH!<=NHyWB?lZcgZ^yrWtAU(i3K@w@|**NJi@ODTN#CezIq>#|Gk-BnV^uOtg6In#RAe!EdP*Tv)b zNm%{nK~M(sC4$hwmJ{JWOt_FSr4G?6H^~juhs&~gt74CoS{zoTl3el|Hc7qnJ)R;m zqX5HQp{Ljl5$pUIH4&Q-Lnng%O(om44^Q~F7uioyBMv@Az7&m94!P3lCL^olr23&+ z`3dj1iZMZwJE=FE_te~p;2p^zanG>l03#-h|1BYpZzd;foqJD|9H~g>C-EZx()0+q zZ2!}zB~lp>pb03BOG-`Z`jTg!zB8s3Z%50yGBKH(IMt_=`(~4pTHPYkj>%@$l}@UM z?K=uVDU-%%x1AIbgSbU8MnY%!bIZ3$)TA{B+FF?8$S+`0~^!)x4{&(}ya^X{ptcTZS6bn4-JX*K$BJG!i4vlUUi5$P~Gv`S9K@^mt% zpic#62`cp62-6{s2$b5^Chtw?m2>w;3Mc_?(c~RivLx{wPgAr(bg>OPl@r~kgiN+l z0o6Gl2%Gu|PxF~O-NyjWA`V^CB+^E#)g$ZoA3a+;<6^{~S_UhM?RjHbJ*rf@-KPS88|7pXGhMOl4x1^(5< zA1vtl4QVZ>Z08}lkrh_XHZ81EBOf&fQo6>2gKKTw!W!1gv%|bb#IA|U8B^SsnFw9TAH4IyD!gUSP~2E{El)sjVRuf z8yF>~hVLXdwI3&JAeJ$qMG?ksf}oiX{gRn=8HHOF{)rfey&md!kghu-^e33Z0` zg-Qh;InU-CBI^uQ1hAR+v!1-Aef85Jr3YHW`fbj7*di?DyCfFD4F4PADbK4%_OQrFzY0CJ?T284=nr zg~`o;-5u=pfQ?-PK~A5M`llCO|B*BEuz=B}PwXPwHUNT~>d2R7! zcMhlWj%<^3v~r?^@=UxIsjdcI%V*96r|3O<=KK9L%7tw`DK4M%aOq2{EGD{mv^ZW> zzbXWuXg^1eG%3ZW^`$z|6NNTK{qE+H0l_nkUYi0_3yU?+xMH(>M?3!K^XZ{Ywb*C| zC&T^BhSTZHhdJBuo1bu1Z4^)KhL<@d1ydIGQED&__f|9eChLhN$IUDi_hqe1Y!U}T z$MrBra+;D9lN4AA%kdif39-@pw4B%7Sfk)66e>-0K)P|>$=uH}P-(`LwC0>G-oPgC z;xb!f^oJ6_DfEt)uZqXZXS$4e`ufz-RzaM?HB2M8$ZGgRYe zB$sP1quWMc&cLz~n*yBpFg>u;0N138FbwWvJ)%^(J7Dkh0vmf%D{j;xwNOhL`wN?l z2|0>u9n-@@xafqwvXgKh1!`J8NK2zq>rA%I46H^2$N>6c1r0t#oJ3@MsDR9qt z^Js7X%|=S5cZp!g#?|(r>V7n#HkIdX2;Tq*!4;C4n-t56$D(fYM#kh#k=6X2TG-Ct zQI6D|_)c-HrRHVG4@crAY^m*EnKrHBuWtA&&CY9C9%VZ1!pZLqsvpF*uPKv&d>?mdbJmR`C=MN#QB)8`!RjX6TG+FD|Y2eyTg z-?Cm0Z)jFxnT4fF%f1@iO%pGC-Z-p;3Ui3vJd7HQK?V)z$LpSynyUnh^Eq7|H!`1a zaKi;D6{i+JCZDsI7gEPhplE4fv$7R6dy|WIs+zhzbrU|b->5yDoPHZ6{^ee#AilVR zvvQGp^_b=h!$D;*w+G}-jmFsM@Ll)rd(KsFD`danS3hLqAi}1h&;b`Gd}JkJX{2@m zM#Vg#+asqhj5L*If^78KAzhAa?X?(E_~#C97)hj3C|M?JvSyYSRK=tXzvT=Xg75=R zM(isD`0w}Y4qMh2GgG@gUzdt|VCq%a5G`clRuj(!qs|i#JLic7$&(voWc8e>7Le8R zDTj?iM*4t~+|d1PP8>oJZw-r2a}Mft?H}vMZ+oOcMrpiOKnia)(d4FHN z*rJ5wAi9!&UMq~TglWTGk>4Eu1!!bE+kRVJd(P8kubzXSh44EH((${po7NX7d<(5a_e1z?Llyil{2qsTLh_l< z_3u33D>t#uQx8yIl>)NX#mQ9E)I?1M_W=M3QZt9>Z>-hU7{}O>^hye`8rS>tadL+4 z)QIV5NLr3in_N6c^`#Xf3l3mi2ZeTqbscw{!nm&G+Xy9UHHrj8GEodWMJAUL2>Bd> zLr_b_BbWEZ1VMDqCRPeZ`EBh=j960&xzk!DvKg7~l@#U185WqnetYsveW-djO4Lbs z+5o&>#~r3HPK;vSMHgHA2A&H4u#Zk%_8rBw`KOb3A$!KJWK!$} z@?lg5x||iqcax0YcIVN-76RTTu1N(uMkNk@M;Uir{f@HP$jixS1mGs6_YAs#S?nVk zUmp!yKhS>RNEW?=c2)0s5cbI#3XXC~oWOnVAF!1WVY z0%`P-J_8Ts#sv#vSb0ymK4NpDo_fn|-0-0)WuN7B3(8Y5yP##*;gxdD;!eEA>tey z1u{k*A(2x@8i@cDuSn8e$7qSP;N@zwM}YlPF7orAQDg4d>i z@wLYsEJ1C;h&pLHfkQDLXWTDH};bG-+w8nsp!WAo(?4O6nDBlQxFU*gWby_}2Z zgmk<pBi5`wDY8O2G4i*+l{=h1!t5m+^YpA zx@Krj5RRBN(bdx|%mM;CoLVIbBR%QO;3ix`C;A3<@0s>uq&8?lli!G;$a>ME+?mez ze(2>ypP1$sj(54{6fz^{LBf4FOFcB<4_0j(!7ccons-lU{)o%f7`fPXSE|gjuG%Z2 zpz4Jl3+8?s4^hjUV0HQgi5?l4JI(uv`nw5Z$GDZL#&1TQZpW=*t9uU#20wgaxDk4- z`m`XN*QBTb{8=V~xQ9*ffzy_(ZuMQI)|No7r40_!~_@F8%;-A9*4`V>@8k|pf9)Sy~sPy5F z%&KQu>`eUo?k&pAJcJ8UqESaZ9pv)MZ0oTIgwN;gOohHl#Uy zd46``QANaEL{>z5xVkdM$7|LyRX(+XVd=Q0pWn8pt~f>I0Sg}cWWmOW^^sCJ-z}2+ z)1LzmXE$}frNm}%iA={%jh6R6rw&@2I*`cKi8m*K#P54}Q00`{w7TWI z`PLfQZ~c57c{FS_R4&t-{xzOl!0~0&C3(3JW#`AV+7M*eqrT4LI`zQPZkewH8CR{( zm=8nqw05;s_YKNyXdPAsC>(g>5ykt7R$BXw_lK*S^S-sRc(=gAbGSQ>mbDBDGpGue zD1Y91;C_^HFNeiO3R;{YVF+V2SX$jzvO3LepL<$J;Yj+?gO!PQ+jFJ-Lh?*lpTqCn zny(qQEbUR8Jk#}Em_|l>)w8~RqZ+%f-cY6YE_ahbiWVIw1ra2^0M@%dKE?7vDw1;f zD2gyzrTb;wh4-zT&6xLfPSFc<_zjpn~#UBdt?+P9jyO^43&fYzj*xg$X z{HxlB-;0{4IzNf|K81pD??R+4_&KSDQ*a{w0sb=}{};Xt zpk4LdNvl3?-Rk}%hZ@T?mVXNWr;mXSRh10V=w>A~`cymvHie*lHW}BaPJjAL{MJ({ zj9)J7iy2pAWkeglqr3vBd8>)+z{)2?jwy2=-usRsM#Dot6J5=;wjslIAM_oi@?&@% z?Gs&fszR@CPqU~s2;W?FdX>JCfDv;7P~n=E?Bu!&9PzKYmL+CR$%H!~Uwm~4l*0Dy zZouqQBO&elX@iOT&3B+r%Qxm$gL1~SD1)sNZ$>kPUK?ejwx?T;HLX{9wMW;(RIgxa zSqnBl0&n`2WtA9&1XmPcn60}msH{zw>fgnLk+55ZOHaEP;nhyniD9@(_)B*`(`4n{ z&*!5F*L^@!ZvgjZc$vh`!LMVNv2jXYYPAsdGVxR8C1p z2Q*S?kuJ1`Tjq7VCU@21!{GP#V!|zNbIA5)_$siN^0?uFNBRfx)umJ$Ff}`rhwpd2 z4!?Gd3aBn$^emCgMaRXmupRxQEt~5Y_S#MPPIlKUs^wd1g-af|RI{y{?FqGv;Fu5b zA8*EeM#;>P6&3e(qcKIbmTp()o6%#8h&ODE5Xp5`x?XK^<6Ey@t{aD){msygTi8@2 zWVrw_1%r9DCqRIsje6Lq3xg}}1gOL(+05*1+6VD3VH?Q>d^sVrs?Wf)&m&tx2HRo< zl}&P)F=BcTIPZh3@Z^Gc#Y(<@Nk{?q?xAszCS-|6Uv<~|MACp3HU;Q*s=dCzU?m_V z9cJ018Yhy4^iIix@!AY+gZl*$DrbH&7OAEN<KV(MKQrSUg>!;vZmp2C3<@>O9{H+^szxlP_SgGY%nj!`a3{3sZ#8(gM`D>JQw8^_J+mM&ReB@; zIBdv%qJsw7M58&vDZ{=#re=G9=NSjDSkjHQOyZ0m)-AzocrW`@j!$m-Oh9s1eK6zx zr}3Lo!v&vMyv^>#KYrM32D&TC_(bzILWsSOX}&`J)|wN9V6OuekpwtFI#=%f$X*yB zD+gEh_(YNKVylgdrG$^|Ge8{c{xoJ?c4H`{YD@QlBcvtQdL;v$y{VpV^9G4_l5jvV z)JQLw@zx*}#HXY;T_yabxQ(R!J4%jI>N3GEB{@b|Y}P>bsEBY4%mOI})iPPf zI7csxNlIAJTNU5jy~%N+STXGd+P)qER4&HS=TlHm0wV|IF5B!N2A@Wbju2XwEV8qh zQ<1zeIrxGJ5hIb+y&9b}`Ayt#ejl@7KP_fo}s zU5@8f@;im@HW5!Vr8O82WmS%#{Nen1A*%u|W=&GXL@X|6ICYzElHZN46)r>=wDfh2 zs^e=i2!b()l%`m<^Ph;trK3A~RA4d5)ygJnxZMrppeS9_9qrH>_#h!5z&&+*tA4em zgI=2cF&KxlQ=x_hfkA6Mk^eyTHH$#u9d(Fx?<5(gqnuK=uV^T5VvvTbjZ;i~1h~KK zxfYL3k#lQ`;4Qtz#+(J-XC%_@SU0G(J|8&v3~YzOZiq^F5WR+@9#G?Tjq;Ajy|p#ORS1FNkmSE2V5~;8b>M z9>cof8HtSUuVHcwT=TIHfmy@xE0R z#o)ybCk|jphG=6ppU9bWU6J=v$;&*Yb# z66i#9GcSJBu^$&->JIJpYIRM5*V(mEYp%Hcp2R6G;uX~~60Yoz$DtgS)-iSUt*xHB zmV&f&OkYgsQvAdOTf3_aX%>0z^yEt22f~xKPT1d_4)u~)6m7@ydydA*Is+Z_!uN&@ z2_u}*?CR^xGq7IXkM}++vx~hCK(SAbaaY23t|BjQH9Damvdt-Lb!=mAgKH;7g&79m z1L3KyWU^m?7Mlz3nLH=(&6%94V2qreHx>8>6x_w?Goe@!8afJcYkGWqwaU8qj}r+r z=;0mQll9BZ$JPc5jabr~uCy#39R?Agu!*R~FeP zv6P~~dp8G*yDwz3+o6+PjYV{cA-S5Q)pmIy%~sQl<#wx51o5X17f%A*Yqn9(zTJ1@ zh7J*D*D=yD-}fLutxpDoUM=%al3nMrh!WQIv6eWMERuX4POO9-l|XCHd50*w9QaHN zYF3{Uh^^P~&Onwg*P~+PIzN*VV&1C2>MMbQIJxF>+YaicC!Sw z_ap5kqt080D`+pOW*%^`zgup4`K>!LkErU_Ab`zzD%hsNOlEG`^(^mI z{GN>*xX!04p-VrMWVY{Nn=*PHC@B4SB!nr#Ky?CkbRRVFE^)Qd;Kj(vm8SIW=Z7S; zJlLom(mWkkhcoWrJosP^4plOcOv$rlyQ7D>gy!}-YIqG;%e-+SxpMOhxy+f?4INJG zeSwPM*~d|vhhD~cO{e7JYdbGxJWl?<1D%%x8h5o5O=+V)Ymd508+c^EvKeU7+-uyv zg;(APw=l5`qlx>puNI#~7YwEN);9b;G6`vDR#v;sK^|$sWA{}xX1Z&Rd#@p~1-$4) z>ir)IrZP>8cz5yr&HTQSjVO7bOIk?(?@HpqZ&kfUzU0Tr(fpch%B0j=-gQSj`zQU~ z2x#W3>a-_V-j!DS=kh-~2JA!&ljveG7I*)-{7;U-)m%@JGs;ghe#d^($RiM>UI6&* zH_I2a(l^F^gym7#uYi>M%_DS#Y51z;^d^ZFTSXSkmEUmh6PRNfN%ENkF?$ogqwv#I zrJ}{>Slc6QyHP&ANcF+bye6^UxMrtvW4!YYh}8eajsS57t<&i;sXVLQb1~_6C1d0| z0r5Kbz__Uzj2OdfA~&|O!FVzGZKi7Pt=d$R%>{9|a8}jevx!1^*OXt;Im$z?2Eu#l z_9Y<~qw$R}71Nl+THwlP0v3^XhCXtkq;-_)i>|Nw_BDCl zwmu_{dWohYRQXN#7hIQyJ5j}A`rZwU$0Euq&uFez!}4Iy;!1$k@>YQQ_E>$YLa|kh zD%0o$%huxRRYF1{J=oxQ(GVACwI(C*omNR$>$QU3gI?k`T)t^5PZ(qglJoMR zu_Yb%Yv45Maa~ZtSMPUO*%@M1du4JwV;i%0pVA+d;AhX%#oH?-gbK+~5tWEvv!Z0o zAs^Zbo9v}gdYB&_8tc+(mi|Qvs=GgrdBct826d^bFik?eyR2=?u*j8br1%7P!>05?9}x@QNYfqpW2`uQ|MA)zGEdeb>3p3^y(zYZ;z6aXlbTChpn zxEA*DbKaI)iS@pHs4++zTHI@6fK`Jup&HMPRJx0{E zZsBX})LjqSrHxUP97?~V%zvIy^da$|+us|@C# z!k83me$%<mvMY)GoX(v~qV6 zQ?-BW_6OfvjMeZI#+(e%_xbGy@@P6eF^cc1ER%UPDb7NJcc*i2H8YGb6%ERpaGB=f z5Eg}6VqN!cEL8id8%A>kc<*K_P7GC4amvwb$(ParZmzyP^mVl_zF(P;L;o;H9#EXR zLD=Ik%_QFgAmPkT@HQ;-e86z#btnc}NixEfqyZpIkFnli3N<`T4imNOco#V4pe6H^ z$iemgZD=XgaksX=+5f}dTR_FtG!26U3oe5N4Kld91QM7*2ZzDkVQ`m_1R31j-CcrP zaEIU?f`y<#0wf`j{6n7idFA_e|2=!ozk7Di`JfNnzPGBYZr|HgT~&Rnk&+ws!t*`Z zpO@5CJl7>g8lP8AWUQE%lzFjGEy^BiS6yyLgSN}#EX9P_i0~1{0fXJp#;O2*N5Q%w zDr)NK<3N=gEu4;+>qAN`+xlfWyTRp`{Uz3onu?ggo|sl=3Fn#dh6OR~%2)(n5`f8U zbUbkmSQExo`Jpqwf&zeH0+F1rUP7sct>L#pJuBi*0f&OVrAD0DvQ`TvA;{+9F$8)F z&yn@N=wdZDhf-q~!eg^h%hn>Z_J)?t>%(}(Lk7GTdY?iih>EoF*qC*NF&D~buSDCF z1&`^%xoR9^NvjrGWBCYnBa=U*6Xo$#I;TUvy-iKSyBS*~=v75=w8reT3G(n(o8?y$ z{B=}Zya}=!oR`!Wa?i#+~ z+N%>xJb@9cIf2om@t0otr7wWCy=PBw3B8}1wEq0{Ga`;b?b)WW*tTZ?3@*9Td*s1;yT|D8vG-HDpH2#_SM&BM z>&|FQk`j{#9R)V$fLc>NRuU#jxZAF!0+x8W2UJMfHo;ax6|$v`r{Fk=5)e?@(Lh4t z&$o(VGrK`Gg!)s;RL#Gsb4;W^$id5LiBJWjSVqr?^z9&3mcO9Ll7`q>(CSL^nPcRH zWMpJe)h$7+P6M_pI7L@Cu_bfz&A}+=DItB0dw}NNxR&9Is~j|-eD<8iDYlJVR6N)n z18;6AkjT!Z`kb(2nC^~>JZB2jo|Xbtj^oLXs7NyRkVmdwNiTyfNjqwui=IVNTn zk1CoI>E?QQ%6SUj@Pgk+te-`RU-^Q};schkfANV(NHsgKU(Te;Itw0AG2KhU`lCh4 zIoxG+-BG#1vVNYX2S?hns8v!_p+><%88r;p2EUKtSnKgo%|PU zB!eoCx#`-jU0A+%(T&$v3LPF@kzzMc#8~6I%Ak3Za~$cO`myZ;E)88MKe3)LZhW3g zhkC!lDUm9zjH04FQ?`;WF*BpAZQlbBT`YyyWPxXOJWAN=sT4Ss#!07?s#2<1tGJ4L z#C4Y-E2HX7`^F_Dbj?NqAmcGXbmXn7t%?b{2K_FN-cCD*Q&1PC&<8DHni}EccHqF_ zp*Fd50TmCi=S%}&2t8Jw)l2^UGY*sCrqOC<@vl!~P+%XQyX`ZDqEX?b+5@9%$akZ* zqK>=xzA}4xWazh%PeQcf*+f~Ba~?Q(W6z2ipjZPnKX=k#=<=P>0A^bhF`~ThRM+a>yu5^AAvVr*%Dt_+%C%Zyr-pRlK-;=ZfUQki0mcYSERZ@xa!$iC(0x=19tRW* zulwsCyjWb@Pl*h=zr!588~YC@#=ko>{y%WcJkW>`%*YZ&2L-=2NX&`Z`_TIfQbVXq z-l43ykilEbo6*`Om{_F(1sYur!rTga}mNm%Eke$Aa2flt^Jj)+K4(L09Nv)S3iMiVbLSU2>Oje^Qbs=8F;3 zuvCVO3|x;=b%h`lUx&KO%xX@Z6f6!|fTxk-Gg{j-CAoPG0ir0fnKvsbMYEZj9<2wE z+UFKd2@AjIcsc;0lZ3fZZ=@qCUXp3B)m{-lS18BnZ!IR^lWqJ~-%<;uB&AR58!eAr zC6>7HeUd)0T(=MSYO5I38jdeLY&Gvozf!esrV*YZmE5FU4eAG`4%@1mhL7q;l;1e5 zk$i|AP7S0n+i8k$);@P`}iLACh#i%R~f_+T2$WoywZzt?IEUmB5@Eb?W z+3gKzFuKsZt~j3adUSsa545&#`8I;FyvJ~!@O|{o@-^QJ@~6>p!*aU}7yo_Y{}=rM zxC^SJCSP~G^b45&0%?-rqAo0Y!`Gc!Lfu1phi})r5T;RDBI}`DR0Jjj<%38BcbB?X z_A@cjM50T!o+!TWM6Ib0-rz}HEh)g9NpeT`e=H>32`OeOc&ZF^)`Ja3ga;F2*W7TwRIS6gZ7y2T_=o3-WVR z+PqpB%QwO$l}+7~I(U@(0=whPw(0ZRJ$`_~>wq=9ig0@R%W`46n(86EdERtr+;JH4 z^~?%Rj9^S~ZFq`1d&K(jwv%-HamrI*d%1RjdRzrZVpsc5jAP-H{_+0VW%~8^zOyO< zxgLb!Da;r;DLMO?3Uh@SMAoG#J@p9(5H7dgk`b5Qv+opVZLMAmHGF87`jqp>hsZ-! zjpT2L=;z?5ZFps28l;K4jYR?ppq&?T3IUV8Jf>2-c}GcAt~<^BMCE2fNdo zFS?v{_nOa~nBEMoaMp3nqYA#bXMX1NVy6Y$K8yh{ugQHh4;tluxz8R2(WgphrMF*_ z;VRWxEDh0s7XL>2?7JaSDqnr@Y^=+`nJsl=&{GEVF?8Xgt0f?#ab z0%~f{GWd#5g)-+u1If@MWXY;)0-*vo2IJpI)8Db>o8M0e#$i5-+4HNoL8wT;X0vgF zUesoG@g;xH?k{%JmNu>Yxhj7A+F+!XnBXY(6*L2~@2}xUGUKZirw5E1yU%TNk(uct zGH>=?CGff=zWG{Ka~c#8jmeRX%Z<)w07<3=F)H%;LH^P87Z$Zc zjXxrZ0j$r_r5Mg+JI9L)3@tGotdHQ4NpYw9ED4IFnn0{)o<_V>_GO)BwbJr*w3QV< zlN`28XS8`ZVETsr+Bhsa7&Sbw9SI1&R5bQCWaFC3@M!G}%&N$*Ub}S9h-(?-^|fkG z^%%Q(d=t*;bt;pGjR96@9PRLG3hHta%3vKlMZ2)iQK?=unP)O{Yk7o9CHcwLI&a&EY2@4`Idt{&E znh=(5>hsMnybfEwylslgC8bRbkH#TQC))bISp?91}=y1L-ha+9NQoy!rDNE>sCZFThN71&(Djx2_! z9kmlP|C7GP2-QMja~LdWk4q5P?_goto}i9p8ALo$RHFVwOJzOv=DaFU8N}2rK8~et zYfXfcNHN0JQ`{jg-U^hrFwhZG)94e3!a!u1D_(#Ew@Z#Rz^G6Jp{o-l5g3!Gjpo4w z9k`j3;#jK0by9C9VHGB7CKCh^zGgoMS5B4t-o7ZZej_uCiBI2~Vse`q)v|l%SpzUP z%(W3p0tBCsaO8f*`p8!@ohFrL?y?plTXP$^RvDoWRj+4?g>|`&NH=DW-(1rz#jn81 zM}Ib5(tN0*VDU~J5;YIO^QYV3phWSBSl|K&gAcmF}L1)#wgUH+x*$5jDmVpIARp>4Bbj64XdGFQSzo3x*&M2as%PJuAIoq0ybBIU3dFigD1!TZgeisH6E5Jxp zOPLt2mN>L5 zawDn|^zpVOtoz$pc99IY3AZ2er1P@aBYCT<^&3gh@lr** zR#vSsB$=bY3Y$d_G*0?|R>{wj4x?&2#eaGB8>#$AHsB2L;d#^=%{N3EP4$JCM-Sl+ zvZlZKg#x8z$Kp<_;Oy01N6jl6eQn8t_f!o)BjXbKyzIi(dY7z%!M5~478Ax#7y)ELI=VqDO$HenPwz-W zwxJ@k=KizmU^!?wQ=8L1GIsA0v|Wr9u*&5d71EtaG2Sg1!lws$jayk@nI2$}nK)EG z+?F4HOJK$%uS4=pC;z=3@2u=+Szhr=5|of!DhejG0l;A*<5%=g9X}z&)7}zPbCol&AyIVkSx10KuW=53%9& z`SEjAFKhPVoz+UlN`h3fL-;Ei#e>Ryv>olE8b4i1MnC7IH*Fw>u4G(N;oyZh>EAM% zK`P4VV_$QcUNcjUVM;bUy)q^)qc=>A;xO)aq+Lp~EfI6G!Z){LD7PcvNwAnj3yi^V}a0HpHq~#pLgrD<&|TQ z8QZJ*73V_Xm}UxkYa4@&9_8d8baMs3RVC~qG$R=+F6YyPOfoC$<#K{krJGoZ4xS$M z@}INHNkmg;4VlON+M|rA>c#FUJ%1_KbJ{0^>)0cgYiII~ z1Rn^no%*|$kk9U2n*bzBT~;yQ@g1_lhFR$@Ur8z7hV1K)Gxl-0is2PD@dj|XazUqB z2r-R>wVQaZ{qS^~br&ukkF4_m!i|oU5!@s}0Wn(@qBt@*$tqTaq_)ML=L1R?-c+GN z$<{+cqu#c{r!pE^skH582f1TMTEJ)O>PzV{j7@T6q1@&MhGR)fB@Pg zTdgxD8+Rb-hM9pYx>Xiibi4fe`Sp?71WCbC`?^USoL>8}o^oldu?mfie#sxI@yTVJkIZN335y zVUqGHbx<%0SP4*=k9XoRSOdNiifOCJ=T%W~GBZuf75zrCme26zcw|A5%_F^r*u&9p zsV{*G3@$WXCl=~lvqQ+(YB*pp96COvE^h65*C(Dy2sZ%rx)<5XK}sGo-?bFm;vGi> zvso!sr0|tO$1^nlHy=vVLn2%sWNt`P;)bsisK;oEaZ%542ZfWPfzA*M>W1sP_!RkZ zk|)QGZH;WeN%?Cm#{sIQgq6LnZPaMwt0)fn~EDZ^lDi_#9NQILas zl76@1^bxk(L(zudD)&ubCahV_i)%9GP?sx$viroEJwj&?a`E}vGfPF`jRl?|B(pO4 z$L+XGa0#*28vRkm4pI1X%Ntp76?ODu2la$lBdERaOJw0Zr)LaqGwuQ(=cM1=_FY38 zY}8{==`>&I1%;2;Sn{kTHekk6<;*}MIVj_*)#W?)EqK=e>IHg0+Y>}mAFM2ipC0LD z>I!~dD<#D4kdH2dfv(T!wBD)L+;yE;msa&+h2{mVmrKz&sFRq>pH7=v_FfL@cf>H2 z{EF;TP}0V32c?9s*ghMCxR^OwVVzKth9<>-PC$J-S(d@}U8uN*)c{;qNIQQQ%5~le zdHdKr^voJ;jt|_yOfT7Hd22m86Q4bwAyV=S3~7!3QR_2I8ecIRpEc~c&r6H?Jic4b z+3t%0aY@<_W%-#m(X}(*dhS@58wG*0Vky2n6$>`z*CY+GuJYbVuM4B=VgU+*CEAV_ zUD>Ra^pt8HY?Mtmly)Ds!-R$){UN- zK)VQQe=;)2(`U5oxwS45Jua*tOqIm|1_MCaKZ0$j4Svxeor-$fEZp zl`cCbo0XqvL4RZ}qA*x#IsI5w=*#--nbk_L#5c>$%C=dCW2n%?!U!{Jkh91-8z6g4hSoxL3ueS!xWO}B*k_ZM+mJ}WD|t5XSw z=uAD8!n#P_Y*Qqp%g{Iv0xnh!4PZ*DjYU&)R*R|jzICJj!F6^!-J4j@&eXqBgwYXk zalWYhm2%XSZ&FJG1nn9M;~bcT5YwsCnM?ac98b`bqUPj1Mqzp{MLbF>%1evy|Gp6s+blV5XKXYmFY}i@IOY#OoT*_* z#ODHg-D68jC)#sakQD%(TJnk2O8~ZNSiIIk+;)C0@~EAMC{xTX zs9*Fsx3+!^oh$ARV*lMgDmDF5jVWZ+=*a4wn5QuW1(=oOs@s`?cR9kuQ8eL_xKG=m z=$82#sRAJy-`vY-2t4*285mk9D>jq1QEG4SlYTdP_1PNbAK{S;ibBV5GH*O@l3H!mE|S zNsvEf?Iw#sXN78CBel8z8|gqFp;CTi4@9{$-#ib2+|YCo{d_9@Yh?CC!`q;v?MuQ@0GyFg|qIzwYy)Vi{ar zQIPY==E@8@^sX?=JnPLr?o(mTsj=7(0SszgdJ2AtNEHU2j3_rj(GdDH3{T^YwB^9l zqUg^?TOAUgwsHdy>iyldq1sY*3Rl8%Gj*)SdH>)R0MjYEQjh2S`w9K)+58Mr|DQQ6 zBJ}8i%h_c;^x4z8>gVu4P-w|FbVmbI5z%bs`3V)yP;gOY~dPeLg}| zI>WY?&&jiw3*I9iC24Aj5pBjkw?Zkfgg^U40%Q-SZ%RVe=EVpNQ$?@N7xKVZT7+|$ z)<~{O=~R9_Pf`f_?56AWBxlU$9(Si+vNC4BrINmyS4ycE4Ntjv=&^qkV~j`??6T?0vpPZsKljVye2NAIY9Oqo30dKEKl!CTdvQNvB*=7-yk$vKpJlB^KFSWK$v#iK zHMcVqQ~iQ4YFAx_3Uy zdX)y}eQ)0Uh_2`&lPPmBi|6`Xf&^e`;SztXuWkyTb+={LSfI_W&@!gYcl0F2IwrqI z*X{-wc;IMLFcepP(5KtqW7lBupWxz}f6nP9Ktl}_|8yXq<8f|l*qLT7P)p7H4Ize4 z+7R3Fkvxc4iP-8R!YFeQlIT)aWf$&72Q6NFS%c)eu~6O)$i0!d2*=w;-| zx!X?9_AxbLGf9faag?3WVPTexN_U=`O=i1?G}uf6rOM~}q?-)(<2I=Hcm#9Bj_`Jo zPNlrQ{GS)L8!99AK1Po@8b}sCmEnsr#hO;gv7yFOXq+#CtNecRKJK;EFE zY8hz-wZB}atuzk@lfDfODiKq*!q#+={@JOno0~Qg534LcFbvZ+B2tLuTnR0?)_GPg zr)>D82ZNDE`Po>Trp}wk@%@YJA70ne{M89GIVfbw7VYeicQu?0t_K3#DG?*+4`gBh0ljg7kju&4hzJX`xJw1IKsTq&|7QPWjz!534Nm7~6R%rf#TxGAH_GYfjN zzly0_idur$g`J)i!Zr~Vy0tdgSnK{ucpgy1t6a)ewi)MZT5646lS|hds%&4wq3z13Z}?Kp-#SpEal%gx74{d}d$LTdI21u<-Rz zrfoYOOkeN+Me>$zSlM*%7w+SaQL;N;&kP3m(t#LWHPUTq_|FOg35}^fXeh@88=XF- z1Gp@FHz&X|&uo|GP#34r9rH1xlQ;KNDoz!tAMa@u{CX={RG$d-&FKjWqyWlcHTNc$ zMrje~UDUto7;x2cfCxl!q71@-sI=%_0?3!@b-%<82bP()&*4~kai zh_j*Qp#Cw*;Q}QOcKO?5EDa?5N?w}zKbAV3mCDlw+P{o;xDd~S{f-{OsX^KjYpn{& z7&z&vnFQa2|15B{DG6}itFSE@9P~)r2`-Kf^~rdEou%-{av_(UL$rpbhi%dHF)*1g z1pVU|D+~@5n1ND3G9Yp5F(NH{4%U+X+t}p-dtTbCmE4FUd}l0yv0OkpS41EIOX4=e zKimcY@WIywO^qVLPls#W!LqryT3)xP3He;puhF=#mLhi&-n8l${a|~Cy1>C5spVrw6n-9(oTCm0X~g}AiRZ5{|5@EVX5`b`xxY-l`UE zo6YJUI`;n;LHT6#AfmPD==GkGq`1$s6=|x=JH`PKCRzM}4j~`nYE0;_5@3yQ!Yt=l zchwN~`#~4(0JJ0f zvx+WJto&ID9lWHU%@3A@ip4L9E=x;#9BvefCXKHicXM=$WM>)lgFKDo!8&}F%BkG# z+^;c;f;y+1iZ zN;qW8zNmB_@gJ2%PxQh%)r<1%wSLusS@LvN|bT2d7Elv7xg_#Manh)XqKOzoJulry6=^7VV{ssA&Lq+C56#P z&uD(>?g@FF@%NAqsFLNDCKLbAUL2#Di3XbF_PZ=)G?XwG`~XtRAUw8M%-F z=#SNYymA|gU}OB2^@7YMDp!<4&&$SsWN4~C(ZlC`oNN3CSBChDMU~mG;DN80y4+7D zToMeP`eAlYjYe9e+fPmGLBt)gorsioPWj%T2cHcUxjv(=xfpBW`)IUX@iXHf;`ZqK zv|FHPX9h>u&hj|7J1758tQ(M&y+sd)e_7cLSgTxwU4(tDZ}hoFh^?rD$XZ;7 zTG$VYLfby)Kw{|xb8gE0bC9Cic`1fdljvO`02$v`m%T&1qqcGS>J|D5=>kYQ)}9M0 zlcq_(%|B=@Vn=jAIr^GH0HRucTw3k(dj}6lPWnKnSzO``iGme@T$(JrUzJ9 zv?C)+cSm@~No;(}&nF(Mxr)wYwqW=-Ru1HHRAzrnyuvI)w^l@sGYEYks7h*W{e1qT zn=cNEaJ($m4Fk^CWMdVxs^c}rmri@3((Zx+OWmor=vlg2tpKKKW{w+veCogT=PuSw zQ+DV1!u=h~OXi``ZsX5ije>ql@J#PZ$MAYP9C6~6s)I`DSL0c7Yrrytp3<+*$ zCZ_6RO4ZnQibp)dj;))7+ja9d0dqp0f8nt0_Dr4;107SgH z`{H6rw)}|+oZ|$v5({OprH=6F!%zWjwhliZAnTc;Xp07S9XoZ`1)-&eV8ytpToHVm zaiXt@P&NzIGqL5XkDbVa1LZdox@j&nBf4CR*WR}2#$w=*emO33^C#Lap5_?$sgwpL z2-+=Tl*{GwDT=1BC@!i8jAns$i>DEgPbpq+?H_Wz2$DsW*M9 z0;I!`>ho_Ej;7sbY>yT*se++w)-J-$=+LlY_P3F));*zG#@hJZ=z? z=mJN*FY+kicmM-L3jTqjPPY|M>zWQL|;zH0XMqs$0k=4cw+AT^OF9m!XUV> z!JPch+;YvV8U|IfX%jew&cQI4SPUm#L=8?`5bJD@P$W{n9(7ud@hK$%Aq4-3=b@V} z@QV@Dyt*eC^{ao5wx_FxrYp_M(X|2?o@9@s?NR_izR-7y8=l)gp8fTUX+MmatB^$F z{0koo6-C7w51rOQodY5G23W1?B5RAC7G3#dq!q2eu~*$C_(Agc3XNEK_xiw7rl0TS%P`HbqPW zMCkxD_N4hFD>T%So)?qZzDtqlqhq@<&pV|EXAP@2y?%OWApow(E@)j|_%{>HIbm62 zHMD8hfi;^xtZ9O^y)pJQ<8#rC>!+jsoDKm0Sb27Xw2~k;uL_!=bI-HDztu$WuTF=p zon-B%J!gxTlq;LVH^L3wF(-dl0Jy~>%AZCaY?c>HDw^A{BptVz5FzDf%Naa=qIj-B zb%j#epT_pvC{+E4-!V5*7(g9TjPQC^Iu83)_z=A~dqgT~T`Yfb-odb;o1num&^B6p zxwR3FpZ9-f@n>cn|JAM}FeVC3!C*KPgH8_Q zwwBw__%mVYn}%mB8g`O7W_lfQxlA_Jl>s@e@oWl}HwOj<7P($|O9MZRW#6YKbbYze z2gS*pjh}lHd5CM$R*AR(d!~}7o_i##JD&^E6GoY=k!7|#o(DlEeXWhDhviD-&?^N; zNQWU+zGz(v?#@{h@wzIi#U_K_ULk1??m1TsZg&xzPkL6n6&hGk7E`2j7ty%bes64@=@|Inqz|5-; zh_34mNmVc(VNzHdLm!mRY$QXI`mg<0B6k==ilHj&FsBp5%a))8tWQ$54C7N&_RzAd z2u&U?pO)9gQqrufmc3utFlug3>X`5>5#O;=f;9W^p@m@82ot#?3R&G}%80hC2$M*2 zbr^c3SV~I}R_vUm3i^%{gZ}G-wd+S&G8l==C_eM)n@l@SOgrFLE*r{&h~jcrm9K5 zHCex_Fo5!SphHyy`vZax+IJqW4*#%Hem4A$*Y939>w&GdtnknH%@31{-h<@5w09ts zT}(r)ubO<_s~KD;lWAVoik-ZI9GLp5{Hgj4KZ0r;(>h9X58})3`_hY~wZ{0!(dFLwvRVjA0 zaK+&`%|W;D1NXS2JYx!HxrXI_W6Z>;H!}2!0De0w>Oi5;@ z142PPJ5n53x_|_4h6=XES0U+T;1p;#K};HI`d+v=6N&sX3%kia2iQBt$&15i`95t96(1#+gdtQI2ObT?HFc`#vJ~@HUy7IsfVn;_8-E8!Vg04 zJ3nH<5qy6*_XaLsnzfgc-Q7A8tT>uS@V--u%r2Mg`A8l!``e+#x=tB0z!2sPo`1oU z3pb9-;XIu%*96@QTO|xMb62vG2Zfw45u^YWe>Y@n;1k15$8PqRt%ARv98&q`K6DWJSAF^R)q z#x!igG(|Rx(tNF){ujx@_##??lu+cTg5JD!oGm*2Iej`#Bp2la$3nJQb59>UUBLs*zu5lxGS5p1GHHefimH}l+~Rhd~OPyIyZ4JU4faU1l1kusV=88Dj4d2 z_Bxn*uFK@D7UKP)mJ4Lem8S50E)>!lJ_8K%`8PEnZ?9YJ7Wn4uQV1pOi1LgrNH zv0O>awenr*Jv*__Lb#LbySdrx^_t`B?d6v~c2`|8i~yCg(u?T|JK z(W;G#EJ|!EMzNyhu;mb{L0b{F>lng+xUCRDkvql@|5rcxuH=8h)<5F+8=z~83x zEyt;>cG7tWx1#)oi;43C{Q=k7k32Ux=p#t#wN%D|)}6-g!(|52$_+Nv8EMT@uwqW^ zHpS)!&U-0sm5B_ZrJF75Tg;bJpi%v64+`lgBz=1-ANm*lqodzT@lK19=Z4x8@aM7A z=7uyN7{qxG0`&yrykmLb7Te-LM^!5Fr|=)3e^92+EBQ+^5AkOUip5)X_{f3?6)g_} zVFNcK3g86GUxfoyGg_F%@}~FO8-{^DX&n%2rAVjF0-v&rK!Uq0ZqQN0Vp{Ah7%}Nc zaE_F%1b8wsb`^iP;Gq^3o^u&M6;!MR{Rsd~yCnD+EQa{hpQNK#DV)R48dh|cmbc|? zMI^TW);)-&B7Zwn_nX$x8Rf?xVbLMSk-!uMNH1dg;aZZ9w#cj0!W7&7kc`{2kLvSU^%sP(Zty$ zTMD50qe@&?tV?vNSn~QcJD$X!Cuk+YfqglakSE2u(gwD5&>_XSK38hLKn0#h#(N#y z=%t!ai_pfR#eQk&Jw@Lm$RRj9JUsrqAW!|;L#D4m8D!v1tlgj+615Ns0ydR>5u}G> z-AA}-cvaiKB05>5;(09_H}_RKjgXK~Y@PY!JX2=WuPTEXpT)ZlIhEdfxLB!+7wskX zX}u1=cNB>ga?K+1#Py)GwE7tCoYqt7$Gy_>`z*x5Dgb&QdO~!d3n8G|@{%MW-UgEp zz$p6eAsoVenw0pDc))j}$I%z2iIkIH+%8}tR3Q-#gNo)ufq5&ZPl)4yweQKG?cZ-f zT36z>RCg4QMbpJ0YvX>S%H2uJBM+jHq&4pyYNBsoh0QKPxx{G!2ooMGJdyPk?Nar0 zR^)_1!${Xc^QMXFfhtA1qAB`CXm=Zt(G_fkBV)nxQw$6G#uLSJN4HxvoS9;4$Iul- z0SCrgYs=^0*0295rEA{S#$$?TI<{G8DU4%UZw?R^MP)TXNukOvf7V-$HV&MAAJP|Z z--@Kno!DH1Y_EXzR_5C+lx7cwe5N!03t=EB!B8rmx;oK~eoQ@h1>c^?L1@51sE^9x z(FlaKTkS7O>!^OHBCcGMJ@j0Et^(OP`$gz?{{rQRJef>4ec&*dW`O~2YBlTnhlFWe z{Tr`8E0W?av>!a(xfQEpj`8KYGAUz52E%>x!UqhTlU z1ew&QVGqdI>N0xS){s)Csnb`Kb4m?wU|AIoaYm{JJUFQW?MCYY6d3jJwGgjy8|a_z zRA4$ijid=mSOPNH&{u|$M%!_zR+qyM$A`Yn(#?_7-};K`q$+-9f0S~qc+*9igyY`W zhxl@y8<(5N=#(N^oY0pv(GkDAej^wk>7;SHc|Ro0T(;p-0&AHIy(nozUcu$gct3b( z!)Y#>eSZ=24ijF%6c@_<*gs>czZhg5u=ukWtSAaIS(p!Pb3Lab}(x z8vg(>ua>K;SMWgd9jlrO0M=~&2lIi8v}Li8hZri=Hi&g6vBUw!jQX9$CqK|a!k<&Z zxT59a;z*g-vPNZgOzZ1YMlFklxWw9+XCTxgY=Y>8`j6T4#}R$j&uR%IJBp^5#XFNABcNW_Vp)42F9m&O`H)9X)O7;-W zlr$GRfqgWN0boo^ixpGdLX#f1g(;AkRLVqcvRqlVp3H87jy{5J@^X~4xy0;}3Ol;u z)FzFmCWNZMhP9xJfq{^2NJ{^!;CsYSg!v*yqUiQ2ye1DNor>Oo;Oa1{v}l1=y1pAn z6_)aWS?+)-K@d803@ZtV@FZ)7ek5%TMhHq2a3g8YYc8)E8PrMKKa`3H*czM$E#)4`C-c4P$CLW$+SSHsVi$ zvG@eZQ9q1cE3imbrT2m;DzMYg(;{kbzUwrc`*rb=n{vS-eKS>C@p}ZPaLbYgXHE*C z#BCMgwnlGGLhc)Y2Zdj?B3GWIt`9)#n%DQ@FH)gylRVa7=jl}fVe4UBJ&3LB} z;A=5A(Z@j3a~`aJ^Gp^^^+9z9XNng1FjRYjNvlo@H=a}&9-eI*!bNIeSB%7&sXxzx z)D62sRNE3pw-Dkl;9`ta+(rlI6YRDFyG3oh8B32y);-4~2au;(TOIFd1an3}XR)BSqeuMLl z0Re&_Mn*}4gc9ahJ?!^Jm-odjfqs?ef7^^^r85)5{np`1gzE;<) z9i5Q-3s_Q`d0HKztGzkf6&SQoYctjgIBjFUEI+i3QOIwvw`ZL5T@4F=UCl@qYKybm{vD}uTEyG zF5P-ILD?zVI6gt%AX~9IsfOl?s6!Ktfj{F#s@T1YW>Dy>At(GM?PGcJS=C3_vx(0ir6v)PpIx+1MrbLn>UAX;R?%j;;BFR^Xjo{^F+nP@KKG69e#lH z4l~%AMuZY z&bCI-Ud#gRuDRQ3^uJor-GRKNVVJbnKC_VFY-?Dgdu+a2fLimLDmp zPx2W_&lgqjg&FUk(WFmXUrN?j#6Yel|85Y$%Y4N9?FUORR+YXU?3V)9Y{lWldSR?3`g`F>-$&U|2tS9X3$1W8d9_%^kmq zW}qyjlA?8Ti@7o0g76NfC^?THva&kX#gd)Xk)pBe3tZmWi%NVqK4BGhBdpR^vo#ir zYYqNqgbncx1~uXvjNeEZc=psdA2m3ip&1h(Aq6c*B4OJG7)` zuS#;V4Ityv@TngO3A^!)G?<K&(tBe5 zgI8J`F5bu>EOHAHZs2SuW@qz!`Z$O=;rLs-*gIqDhgsmciL3x(-)O%^3d44b=d_L2 zfvJ0Up6GJ6&@WUrU;FogZv7KqY5eV-REV51qZDtnSY+V1PuEAF=Tmh6W-79msls~?(+m0(m`TRsEYP>+!UHV zxjsTf&mUD-1Kon`1NT|Ns@9_OdiV-HtkGLCOb7^7c#*mC>(uoDts*sLS_9_zSaKxS zQqJ-Zaulr0RJ5)Mft6hBO=Eit!h_k}8e9w$E@>6rzXo~L$G5p&XzPY-+f-i6yt=bS zrR$`#8uM|tSs#!+FL5YHl2;Dq@Wmd3I`SnHum>DHrIDHC4P7&(Sy4_Y##+zxd zsxbWS(WUI&+GiENiK?84Eq{g96(E_+aAjf1lQoM!QH*M)8a*2pjfd|S}3Ko&kcR# zect<9_piIYZ+(A!th2&7li7RrOyttKTN4KlUgA$lmcU3?#=UY?>Qv?h6?8mQg)+s?V{t`!h|B zoL`*Gn2lqKwbwy!fPt(_4Ql7rd=qR$jW3J8XOpqsULSOS)mYDpu57U?~S2+o?ecrcA zEe{fnRgL3JP+Tz@vRo8TKOR4631Le~P8v5XSzeP4V^{V_>QJTp3KZcYPJ|ncqP0{X z)azEjEfU9Pv)yD%U)f)$WBpgBN50@;XMExLR`mB6j@C%}Z(aILe)tZ_ z@7X^>9N18j+HjpK*7bd!q^;SIB#LX!TC2?~JC<$1c9@E+I%0#*eV)nt{x=)+Qsu{A)_szAdr=n<&$@GAv= zkrE;)pbQbmtzJaAPSGH0Y}HipP87uvO%*HZm1=rAl5dCI~$H6P#75Di^9sc2<0zfTYEvkKqc?pv^^qPYB~_yM0m{@ z^PxWhxUPYESny}X;keSXdKM4=%NMamYE5U7t zGO~2Wc=*n7$5NOBbh?dz*E`sFjdXFIfB`{|CTqU!k*Mc!4cJm=~F3Kq@3+VDO;{uIFtKwxWSmaWrn& z(YOZ#^=ir!sg714wk6GKn z1w$luSA<@=Gvg3{p-XUJ({g_J-xn?#zN;RNbppVm}#>FjA0e$*3OX3*UiySa^^Gp5U*^oO)h8nz;`apM- z$MklcE?K=93{y+oR8-26x9m+YX_ek_G2&K@29S}bhyQIP)^9aC97#jt>U0srT8Oca1EsFV-c_>Lfh@OJPzW%-S0muTh7InZ@e>wy{wq;-AfUcgXAOhhA|Y#JOm+C-MlV+$Gr_$Q2B-g3P^_6P~H6)p%^(zfB- zRsD{;$H1?$(gje#-ma?WSb+eALaMu3K;rMnJ-Kih!a(&-HJ1R}2 zQG25Y{3Nl)fo6F`Avr9c1d+M1ZJd;O^7*rvb?xUg+_*vLETH>~eL~A8| z5N*NQ?3#rCF(2rsFHBWNTip&<{P^tPoAv2w^n@h!uh{b3cc$j?*QFG71O6CSbG>N0 z0>WuL`r%U%d^3{M6hf{re0T*hqgvRTt?t9i&zI7_$p4nN;Do#HIMQ=De=U`li-jpf z`wt}yEVQcH{^+Ar8OnK-Qju_1JxV>iNDOk{+UFHVvX;a0u$pVwT3XA3%Bo{)j#b84 zobP*W)S}Fxf5T7bG_*r`nu3HsSGSxSkc;G{MsbJgUG};Uc<1y&(WLJn-0kTiX(5=2 zQM5jM9g`=XRKW+KVuyd+Y5jd#fB(qeD%Y!}xB$gMrqxaM%AaD&h;+>ND^9N-zZn*< zcHK`j%VjG@PHLlnZZ7JyuC64X%sfIDAw}VMka=CCxUZk_Ytp>2h34H?zI#}0j}yZu zp*E!wk_;w~wMDVO&xMZ!T3ZZK|IPFDh%S0BGxd_2Awv3klx+T=3=0pC$(X&AOoiQOwi) znp|ajM;+rfw}*ap9($Fez`(1+^zeTs@Mg|;+K4NTTdD4^@1e7MYI^b4KNvZss)z!j zh%qBdC_m#{+3UJX72i|SDm1wyPZ(Gu2vE;NRe%+Ay2+%zw1x@Dy>)+`M8DtiFq2B+;dP2;p_zk8|lXD=nc z!DPDtgR`K9mW4lU4%U@(=Ni#Q2)7q=NXPip-^GNNO_{&V1)|y$393eZS76-+CYX<* zYk1xr(pE54d$}zT_H_JDvx3C$J#Eu1vnsO-b!h z;HoHUyJ|U)Y`v?K_0W@BdkNCCBLE%R1?}PL{t^0_JI++GoMK3>P)#kd?)H0fcX|Qa z$G3-3urNnT#|!e3@+!2ON#U)OU{8M`N4Q3qDRzi~TbSGmeNp9pR~!(XKzqjGq}Wb# z!gS7^SqDTsIE$-}?pZNwsCt1|l%|%QeQb%n1c3c#NdwOX&`^}&S)wtnS>pUQ=8@k) zvp&?Hr!5u5E8H*wmMqDCwn?aDeq+AHPJW9WjnJ8;M+|#QA)OAdJ{zw-M)#Q!v~$1U z#zz|eKXYx}54S!=@Dr^)tL{(R7q=Pc{{BPSE;(FJlGolb{hOaBb3CfU+%B>(coRMN zDRfC1utz^H`;a4gr`%tq20LjDZ4nV9e2=*iPu!C-1z4oDw}kV9i@_Z%vNWvfNTS7+ z+}~D1gcC|Qv4D$u;lwDP)+;y4jHrww9zT}Sf4Sr`mTrX(zQkXZ2umQ4TkEU+^7)o~ z|CeLO-y6hc%Q`nc)nMKHnSrIfP0YRGk$9OZUb-dgfs$wTJ2UswbInA1%F{u{7rjcs zyPIcN?jS9G!mM%2&E`$iNbIamkDll%fI*V%NH_VH zOy9LJscV&g&lIR9=ZYDKc(XRVWCVnhoqZjU_#$6FDnU7xYPk5MDMd7ylVY`8jQaVw z+=uRY6r^0QU!+2L#PS7JO-@`uvSOy4eTKR{v<3}NI!+?;HCU&Jdg$iS63VTki zDa>09CAEg$G~F`(PyKz_C)+ilGgviyX6L3(ntM@4h4zWjVp1fOAk1Hg&7x7*J?EtF zredy^i%B(XCF-%DYGE;;-OJuQ-7%-|HzSs}z{AWYwuWxa-71Z6f7ClTX#ZufF!mGG z_^q_wSd|0q?G@O>pG#Qn0!JoNZ!eUS#nLWhP;Ol|cD8^;VbZLPsf;_T9G@sSoV=s; z3Q8`Sl*;eRsW*qVW6zcMXWl(gP~UatFXZFnd#sED2nlo4)=C<29pX!-v|*073+oAz zjTOR|Hpi~Q<8w^`z?a`Xs`-Mo75N+|b+X`8)uB(+EW(lA!`n%amF~V!sSB3lFY%{T z4#{Jx`*^RFC8kB2k)|0NhX;2L`M=1>HSbeU;I0P+tB7c2wKPvNf`rftuva!A41Rd zse$2B_3=+d)h_0)3;Y#Y#lC}Qy-htAxu2>(J%18##Jjn_`JV!t{$*9m5DxIU7@n~B zN#8#W5(tc`|5Eg9V-{OWVtcau>A7{(rT_hU+vky=p2HjdsYyxbCQK!duJdnVDLrz= zO-JLJz5D#3-yV<((sb5fSMh{7Dz^oL6%~6lJ&I0oSen&3_hjLRnsqYO`i<7E=EM+N zbdZB9TUGTjS9uS*#C60spr;71gYfo|h@n&|>d#xc#O{()-|WQo@E!EG1Y-(Yf0tM6 zWKaFBO_$hemCUf$+VNK{{|VEf9WMy0NIyuGROQ5`LlzCf%SE4oh@#6B<8QEbGo8Aa zAg!Ok3NV$-g^Pnq?1Ws6)n`mhgSY>rC47fVG?qqSm`tyLGbI$|LYLU|m?XD>Kwpwi zzqM}ORs|EzkJ;dR)GwNGast{WefD}ea<9WEDfdqlwk-eh4&WbaW}Lf#(o3+ z39k*1Od*!9@BJHFaQ}uE^{0aLum8dY=mKaK{0ruoH+aMw!;Qn^@AlEaq}!507x!B) zmrMqB{-yzq;$ihlxm5grD8(i2{7Rkuaq;KBsKt^W(vU{?;%^erw*;^7$wm`nCi3W zmN#VzcTFOZA5_gZ??Dz-V2S55T+Nnxg#LApBpcU?VGXBPVn`OK7FxsabvQ{Na=484 z23b3GiOI^k1erLTJ8V&ROOL!<#`d0&_1Nzb=J*R zyMUEFv5gVaTaNl1gCZWoy=!3ROybWzJEPbtej4d2RXySbrMoht)}+~?|~;r2>w z(%jIgd|kpiop*TSGPzb&XL2xA@lw$XC6CIkuHllzNF|4SI+GuCOW|7jL{%U9YhWc~ zNA`)$x+?#ej=mfS3XplC*q~&Cr(L1~Zk1CnKThdYiRLyQm#HiW6=q(nwN;}8Rd5MH z5Zq+_g|NZ~)PtX@znvHcGaRJ4#osodiH<)0LVx;!hXHLk;s-;k+SUKV{@+>;$LM_9 zp8G8H?j37ZjRhG4?`B1eWT>Kz^z`;Z6L1pqq-&UML7`_oIs-j|b#TkoYM*Tp?2SmL z>pQ2Y2e$vQlfIJh7zdkvW#Oqq;i_B=rgRdh5Yp5KcdHuX>7UP6l^kC&?31hiaR>IN z9&L|S^C?lwu}`dIQJ3VXMEm3|d&#I<^FRDkwikY0HQx#!qg*6L3C+t8g7Wtoo=ci% z-i+V(OQ)kiuKU;}#J)Shyx_^($E!LXurPBqN9$k4| z2zO|kdW7$7Y2ShAEilxnxOdo`zn}x2rY5S?`-BIy}c>EZ>cK{L?ffI#V?${h5i}K zAAW^K`?_IWZ>S>DHV%O0|1m~Bfi%8VbU~&Q#HM?rD5kBFmIC*Ly-f6kZ6K>MZ!tmM z=@U0-HOt3LlW7$su4~lD$^1HrU4Pr>qBCtwrA0bOJtDGcu==O%=d)W5`M^wUFTCBL z*6WambsAZWFYeX@Y?xC`9FBwtes?Ra-UBKO98=Z$0c$C~6zmg3eX*6-G8CsS9td3E1=y}6FUpWb?_><$rgYw&9cuL9qlf9ZltY}$F^YK z>4xf7=@Nocn5v(xFzM?=wGJgUS8wM#U~*ECc~5033|%ciS@Owy)XJm9Ot~P#aMFT< zIkyf7YV+1voso3(s_OT{O%*Hk9mRZg0sJP_Hk_bL>EbC&!{iOOdvZI2Gnn%5e|)Jh`*HM7h+< z$heB4U%~{;*Xdzr=adB%B}|?gw(QL5Pdk@XTm~_5O>;qG<^!YCfV&+fX)(SVlw#^H z6xn?G`z-X?9mvZ&Q&52c$a;wYVL0-wBZzXTRxRb~=u_MPg3Gd3g_~4sfDcChbKn?O z(@+4cKou|9u=MfILHXoH2ve?V&J&*inS5GHU2LlP)jrFmxp;uA2p2P^JIK=pQfrZ_ zPit7Xny$E{*i;$G%&SwE7td3A;a)%o2u2*RUad%a$?Sa92wqfgs!+LF`Z_B&YKs?xbmEO9?zP+LX zYKg4n@@kqH-8r00ZW0MF3XL(xN2y0lX=M6ZMP@Y{JCNB%ubL?O)tfzSY)$c<5dvG| zl#MD(7J~1iSVy}A0Hu0-L$NS&ZJ^Eqp{wbVWFTT`>D~kj@jGYlEha;Tl_XT|YYyPo zW>B&k6TTd|A~FSyH6lqCqTWNPo<`upR5`|CoTXHCjnokV{0Pm*;J^p21>?zmuyz2u ztcR6+I#sPfJss-M;_>mGVIM7ypIG))xY8#HOQ+q8&*jQwy3CsATewFz=;)v|IXa)| z^GYQli6q(`qb^Y9Q5+sG%c%EQ_j6+f5u8IX5j=(G8QC#agG@q#l~7 zj^u(yv5W&XGisFv#aUJ>8w8JRLz!Im!@#O;snM7s)u@U3&e&E&#jfGbc7uCj+N&-k zzm6u{QKs#XMsU@on!eH0G!_|;k6L7{9+tpHK_CzJl>}3*pj$Pf8;3tsUCM>EJ?fQR zMkogq2;B8g%k0QGukj%7HarfB($Wk+OtVIhbkO#Ae30)Dxn~s???tgPeBZ0OHjMtq zfSGEDU?Z5Yf)~Zg<<>#^vV+^)dKT$`+nt=px+k@m^Kd@P4#cGury9`4g#vyup5cNj z+sf5W>S|^yBhAB@GnEakJ!j+3C@2Ige%USH`6dq)#nK6GPTYN#EuNw^Yk?8Sw@OF0 z4Ja|Y=;}x#9~;&glZ|I1xGUin{G7dF)D!z~dUfPJI8VT<7@Cc;CuxN1nW)gS!B!f* z!O{-H4-%IqszS5%j3l-amce)C$WrE#ut~gCRTIUBxxMLTAC)~aGK3EC62cxvc8@&? zR8daJJ-gZ{(7{MuK{)Fyn4&lgfxX&`3Z(ie(3l82d;NGXM7(p29gFferx8c2{`F)B z_YOF~pnFww+31&tPM2|6K}>L}Uv!Ryt4(o_zzz81&sR@!J*8maqt69QOmXO1zPRUc0<6g>jBmLA-Wp?R9@p?UJ>Z!={cNB|qIZ zEd7R1dBzuS=g-#~9cU9hwSsS_*6&c0IKiKPA53qa(R?IW`1Cl*1(_1BBi|iMGT*=8 zDi}V?m_u?5W^#o;bt0nkIa%``c3^=

F1#i1AmU@L`9v)2Yd4cD7wj{2MC?jO->f ztShW4;73k33~Yk0nHNeGGl%@Cs4_k-=8k?D7)`fvFm&TyYYj!;_+BCBhDy@j#n71X z7ycVf3}&*U6J=@s_h~fYqscUU|4&~a?ByRO8S)cnnX0tvwpOJCP=Vf@Zyy(l3lv6=`x9A~ztKIh17*j{=br?ns``Uurw zt;ojjA5=j&?_%c->8{{RzBw{ZDmaUOF>H*Uw{b+;;EL!{5PQDG^4q!gVSKDO*)Gv^ zKKpgYCkh(a-VF}2ly4DfMz$9O9!*YV(IJ&-4X{wl=Y$i2`H(Gpxl9g2XW~NqPziq_ zQkqB~I?Fk3A=vebgZ+rH667Aetpah+7U>y^^{qaO3X%ZmS3dhurmt!JxrPFDIwKaLu`7| z4_(8#Rsk*u2TqOv6IYR%Z+<#0-!+hKOm4Zd!)K81{$0CC z0u^sag|EV|EEmwKAirUEI*e!^Nxcifs}a)eKu48z{LRi^$N%HKbQ{Twk>Sn1_(F;J z$J-~`)o3^H!-s!MHvh+e2HnRp5kzQnvM3vybRK=Wl=aeG5pak4gsYB6_W%Fze?JBp z1D4BrViq5mA#c&{x2p?y&KADfM)6{mDO}zR1cxT(jO1~1CS$IkRrTdp2r;BX%d>l8 z=_Uo_G%Sg(3-BRNRtI*oKkg8r!-F5yXBjRhj_NcOv#m-YF;VY}AXLAIyRttE<3J!2PO{^h6*5MIDt9D@ z9B5e*FL=2byHdjGg~IM`kDO8nA?usF`aynU?!D^6w$m}C2;<_6>9ATus2<$JniG%D zDWWdNPu!Y(`r2h{jkfd>ZYL9wYzlQwx>Sn>094+g6D5ZYcSW4c_DCzyY%hYcv$S<% zu`^2{TTe0qLTOaP!(@c||5|TEnV{ES@>L?+dxPlQ7{I1}y9Cyg0dj|!VZqW?7dmnJ9Zr>-8jKyp_4{ng zvUo~+ZGhcAbQXUwiBho=K7)M1o8Dk(K*$SEsYAc z?u+_9Grh-YpWG{A&tJhnTLu5)4*jz!b>{a2HX>Q^AVHN_hZVpi65+Be#9Ifmnw|;r=3?6ywGO!xvhv1dtQ&T- z58J2)RVcf%n|*io%MfPL%)Be;>H0>t;**XDkP4M$Xc>=Zd1K%q@j?s%nZcSOMe8HWvKphTA*HvWm+6aK9yX_x zWs$$Kv^9BU@K9hk{;Aekvc5$JxnhdE6(yix3aHlN4LZqyX(DK9B|*l)l5#3;mNta} zIg%3EY`v968kMTUlz_438-u!Y_TYLXNd#tUE zmm*`n_Bdo7=r{ex9RjKeUS?+F?Yf@?SS^FLXPXGQT#zckTm0ybdDXPnpmqXE*?E-< z2}QOiLv62M8Ra^!rErZuX=)`Y*~-2wUd(`Tv2!D6Npr zB2#?=Cq(o8fEtP9ykarIqLpQxk3o?Qx^43_xk)M>(KgW+0>!QeH_TlhwPD&vlz^hV z06z!Ah6}kxKEcLmp{%HptcK@fg_sqchLN(>*VLT6 zw2YKD0eWY=7&FJz!3T2n7CZF;SS{$BOv_Rs=+MAh$9cC4&8*~M!PPm_qABF8?v5;G z4?wmbagRIp^hNYjtB_ ze9d%inNMI|k5oD|kX9>9x`M7uinw!=l4d)kLS)O=6I2whOJXub?rPI#qw^+Qu(WJ8 zkiPHzkakuj86^@SRBdZ0_DJhzEV-@)H|Eo%p(1$XIn=QCb$i^(xU6c|H1|r8WsFU^qu?V83z+103O{7>dVR`71LeRjX2mp+ieEIdSHK~+&ced7nZ{5d zPtd;LKCE@!0_cV}Khn5&m*uJXu#z@rI@m+Wb8`5Qn!Ketn10pF-#v;C7GPY54E5mo zx^l=W;)_|oF0ShzO1e5lc`}>~IW%B}w=M`0=92yrTz*FDeR{*sDPhjp zjm1P2ZX!)s%TlKG!{G20;CaC}_EQcVx6HCgg~^%l`lpolb%b;*FKu-)jqB|xjybb8 z!Q7yF@HQ6-oskf{mkE|@%1nSG@A^nBa$T*i54Rjq?i@yHI~Ar1I6L6vfR4?y>EQDl z@lMI^sdxE|Rs@X{ecv(iM|rzY7Oii(FHQnppPB=rbF$N(;f*;K0Byd~ca+2)(iN`K@$7 zr*)9s9p#Mo*gwQ@2X z!yk#{!>v|12w@)8@lG5|a%2nelz7DZ#Ye096h!w|6}F9)hbBw@tTEe6 zxhMLvavg+M(=*>7=@1$H`otpK&p;HTSz=#(GOtG9LylUCB2CMjOv5l%wajSbn}~X0 zwJUi!vdlErCUj`@QJE~Aj6$Lo9N8}#fC;z&SIY@;`B-?5pINFw;uAO56(Z5wsiZ#LRKwf*^B^2k{s|;*eMr;5f0&N~)LvoszU5d1cLhiAEc_GU^ zkCah`d~y2cDCP)0?*`VSDth4`@}d*QfJ55;1pH#|GF6jJKTT7x`&cWc67$A$$f1eI z9oEt)6-P}c^GxS*`dapJ0p+bKTGqlE8pwbvQgyQU$dHU0KS<8PPRdLDpr+a2dWQx!b|M z$CfIFwSq%_$zees(HmP)TQ-4c{#b|d`0Sne7c4p%oNJs-^0!M|pIV;5r;*1B> zFTXyVqzj}DqtP_3;r1b_?NCRBl)qUHuStOL(1%MUs&0C4kwU33w+M>gJSnX=NclFG zWBcHxVwT&+3t2;NEMu*h6fD^FtXcZQj38WhJ|}lvQ-J^-+3^~l6Gvq##iE|D!5o8r z3m9$<1+v4SLh?)Hq>4mIcQx{^!1x7thWgnf?!GR4TUCQu`{$SxT$davvHw@KJ7 zNTMstV7r@{uVJpwq>I7DCIj#D@wfPxjN~aP5hw9>*Vh^C6&GS@KOzZB78l@Sxht55 z`(*O8-y0yjlXg3->6-*<`3>ypk~@u0J11cx08(GOF?d{IDc%q9RG}S~3OhRcQYJXQ z{Vbl=Bp;PF+ebl8H?E^c+m0)`@qsJ*s|`&RYZRqkY|B)7p?kt4d)o0B8P|k|?5|YL zIU1^+kL|+VJsPc`=M#H9Bj2p-lGRlyl%=PGxrAXQyb;u?%w0KermCt@9G^r$)(bXUNP_DVM& zH@V4bZxy|gg*)bF&C(_I<*v4#T=Tz-RrqYC%$aw(vB8!paPv_e&06>$e*2C_Gm%v???HVX32CNf$8mNY#F@ zw8CFSp79Y?jtG-^K3WwvHL?;Qy;$(+3R{3V zn6n!!BbIZgfUC0;Po{`)gbn8$yx6mv040}D^TN+p8gD%L6a--g>>=U|`0YKE#oMJ6 zUYsJ>uP`nv-{Kx0jYCUSIdYBGUx9i_ofm<`weKC*P}h7?bFu3NB&R#cFTapqxL#yf zIQH*PlwVD(FaRXlWpAxuunte-vl?C%9uUf`8**kj>Nmtm1oE#n6iF&ZZ^ zXMX)AES?Jf1u2~QY!M+2lRd)y?;mto9?wa7q>4r#O7S2bst;JD%D_1J-0fbJZ_Q|p zk(jZ6C)}jfXE(@ZpoYdYw`%*QS3#?P|dDNlQV3S(Kbe;sLA5b42HGdJf!1ns}B zs`Yp|>1Wtt$4vTgmulqRiUL%?aimx;J)7_w(^rs$q?IFQ5}jk3v|AW0f+EhW(6zKy zR{N>>fd1UY+Y0?w>MYx_)Y0-)r<*VdD9o%pM(M!#Q^=ZPJ|{k*lyjMI!<*A7Ql|%2 z4#1gGA)$qup!+`^1;W6-j`k$lve+u3-RdvJsEq4X%{mQ@q+QUA zz|B{3cH@VQU@jYdbhs8Sa22k>6@2mlE~i+ikbsEL6SOUQgVWu0kXZKMvrEAkwRY(6 zo;?ivxoN~>zpI~_@DJCh)P?e>{A5e%PEjqd`?`kw@-OJbj5(MC6mtcM$>g=e+qb% zgG|cqlb|?0ClNI0=}*!1NHWuziFQ*ubV0$3%IXj#D6*7Nj%v5UVT~5}hd=J%J>EHP zT0V#i3{Tors;?Hzj&WoA=-5$VB+0;N&Z1`_+r)scewQ@1v9Kt6_*cU1`sV}(6|3b& zl>)ppL$U7@sj=~Je9vzCwR=l~`x+j{;+Bq%TO-)M>A^G4HBxP|ub`m%^2SZBCm+@3 zH+G`xT`n7S1jE-)PYB;r7O@DKT}4N)+Qd8^vC*EQ2y~|sbYO+S1j72VRyAajyzQs* z2w3pwv>cUmy4hJ6vf}8P5^iMO;pqWM92@I&AEmTnud?#?La0qDJG>jmrvjXe;3H<+ zMf&1w+*6zWv#p7Gn<$wyI0e7Di|!;ww+`hb*$r6ZB^79X48r)+ff7@!@;Ptw>mMXv zwjD1gov|%4bs;r-*4^qg_Jy<*@_Ue)YxSvSIdKPc;MhOzm@R#{Q(pBx(My>t#)x(F zo}AKcrNx(Y{9_qVWlo#fdY!7s!yH{9 zR7i5@CGRe;2oX@wk%1Cq`F6AkxF`O3ren3jw;f~NORE2s9O+|O_*gtU%m_?dgT=J4 zXw;T=jc_xoWZBd+H$*L`+Y&{ufQGosn;{4Fpme{Ds1#v=LPV;rVWEE%fTh+O+-xRz zR_7y{tge+_9l1-Dj!CTr$$`q&V`Z#o6H(w~=;5?u4f=wI;;xl_s+rjqasv}&bmu8J z;*CM)^!Q{KRkOe3Zv#{_-zr(}i+mx`u(bRsO$#+(6UXJjj5ZpbGa{={Mg}^4+O0FDcO+srUxLD}1~#ad=) z)zvui%J#D!(X{L8m&-3s!}SJl!oqsg{3q}|&&z}4BC%e6PQadn9mI%{-faW;_+9>U zUfZPqE>%W?)_BXkuz9IW(`&pv8HCd7{uQ=_be_-d??G~!nIEWRvEsjfjP+IL<0|qK zOn0M;x=Hy=O$LxkNN2E+N53@D5{BE7rf$WAu~+oKozGk8hk?J!a;+6p`UdC{LojpVh8kb`MpVsrWbCdo~GUbzJBei6KC5605@D zbaS&Bd<>Y-mnzN>AW>30N6E)JgeTeZ1CEM(TouCiTk{*0es>2rY4bv}AY>*Uj``y$zWqsis z*-I6a3ozd1GrOWCXyTh6m>PK@JScoa?PCQApz!KghCxyb~U`HJqEdyu$ zY)*qQRG^cgzF-4DSy;cJ$x+?%APf^F;mH;AbuCxj$o}m@-LHI&Cj72C9Fr>8O!i^vl_M{Y${^8x1FSjbmeKoL04-s$Xb8<{CGf$Lz4{x!* zh2q!VxR1|qKgqfB?eO27=2+^Wo4Xc4eSNBW@GgC(3kz`jfA(Kh@tVIMCf4Bzgj>edO*zy{Vz~-86AYhLi~$y~ z`tPQgl!1oqU)HkSY_hHykjl{fKS$)x-l$W@sreC$<1hSI3w%i0)ISI4&)&(Yf8s5I z#zPnXT_}GA<*EAL#*X+*yRz<|K}RO}-asIm|BKa=I%Fgd!;Lir+z17k< z4;+~}I{s_qc!0Hzy-~j=51t89Tyq-16N;Oz0qdkHm*g(cmgfX8exa{SY@UCC;D@>B zRUK#tPX4>4@M}W3|L)3n{^{qpR`-(iKQwspppG%|R?*Nv9J}Ew7JtDWx0%T4{wYh2 z*?LlRki!U~W9CnlpXV;soKg5t|50&16$`^$5IU)|pBOMypz!-XWn*^;N?%YpoImAY zo_0mJ=_l;r-Sbufi3dz23ZnC7io)EK6C#jE4|0u#Nw3ZK7s^d2=qE)vC|fCej7pd} zm&?F{4eQwetEZZYJ_iueNa|`M8r=xAB6FoGC|cIjO%s0yN&*#fMgqHdace;8etYvk z7STdEg)wV?fufO?f$oMJn_mvmyp*h7`k-pH#L6fldFK3VjWlK zA9or`w~ndP=*2%hrA*pqrpZmRwAsMn+u~!f;|O?@xaLy6%I&!3YnTl$&#M{7lRtV{ zmf(2)MVgy)<%Z#vG~-)AhdfwK2jw4k`sM83b_(X{2!(16Rx`%Yn?q~3gLt3;x)5De z+QiNVB*$LWvL0=XcKJS2_5-`g9p&UVz9Z=z1^R9u+l@7U$2g(2N$=pM`skuV&)_{@ ziBQ!lH0?IYc*ZYtG)7Qaxsz~xyKj6smS)Nwy}R1O;PTAO*W7s8v}xJemm1|C+)Si_+;{ILA1HAepV%o&@C{ghJ?6z`p`g7uv?K_{#hRpB;^J`utZAhTpvX|5%!dw*a( z$KT0*2KeG%IyHa*=^STyQ~l0GAp7gl?}QAO4@fmWygbg3Y}4;)D77#b&MP^n#EVEb&H((AbMu+wztQ0NQ4e&(UFb5GGf)9=}hCdMi5L- zt>0c&ihHhhd#*O&j7)?}algynxXF8p2;5#$p9z1Lj6AXpveWwfS7AT)ZxB29+f1Ax zG?JK4tWx%>{$SQU=FiIHcNR%GsWzXx9qW7*;S03(LpRIzjs+jnmbH?wF?+*Iu|~=d zr-5B68+KVoZ)$5I9*-IG0@@#gnNZjSWE22EpHO`$1E zwu;B#Ia$ZpV~UQQ1yWdT3VXkHO%2?I1(UE#Nt|%qs}m8nGL;gCY|&@ykILCFVWh?G zvA91qF^X?dNoxq|rOC`_;i0v&)4{aKqs`1z7>rh%n!Mr4%=D^aIkk-lx3eQu4a>|_ zr6y`w@tqo5=@gDM@mw8px2#ot=(x-s&01{9*#U5I4-(+tXhYYwPMgtwlG*r%F^?=| zu>Y9NI3pC~j6Q$6n2Fli89Au`UeUVWwSYO1bXMv!%(pfIMQw>Ct)Fd|I5`&mSUQ(h z@%GMjUb1oM8PJByEMV~xE zIa{BYxd`ffI5u^is1i6w>j>(sw)}a+e<~#%;(^lhZ3`(j(A&QiIE|ZxW*mh09UML! z-g)99Z1iaCozxaXoW#rOZ@ELfH58=vWKynb8HGWCsTc8%e2u78Z+<=3vP!UGwf4KLiPu>w&!~CR9RQL z>~Zyz1W#>#2C$ppK2l*)p<-c_`ai8XTlc;Y21`=mKkqZ((%3d^;(b5)CT4TdH-MF? zX;{PA-uEy&vvkYhHaQTZ`SbV0RGW@ua$mH)4Vb2S_3`#@9@t+Akhz4Fi;Sm)n55bA zQYsJvA3eJQVWdn!t+GkQ6sOLhV$`p#nIdaQ_6 zc$%WoP!x}mdR|Ec1xnT0&r70AJkVO!kg(Z0F9*p|C+t{R{B+7=C-O6eb7Yach{=z) zwglor-6MhGb;LG9`fjGxd)bKJ*%ti#z6D~Yn{fuPY|om|eM>z3y78PPV_-}J#?VEz zwchnYuEE!{=776ugPuNCv&b)>YT%`_Y3ta_$c#pP#CFPohJFmqvhv7kRtxK_w>^nB zsB@+rRVUB?gaVYQ8}~5IMB4LhABVw2Ss|Hi@qsP@o}}yv9GSU$)GNR`vj<5-8BTmU zzj$iMt(s0Z+F=P;+DFIus-IA{v=3?HKSBpYg-3o&tHW=2>;Tmo<+)^h zapoJ9OmL6+V+IJJsQnJ6!gj5S27UZIZ7f280yJ7}#*D(Ju&Jwe>Z z!lB?f=EGJ$Ef$ASQNctNtzFSj{V@K$N>*cfoVmTnIs1m}*g^1u8pqI1ngFbhL~Wek z=&o=KRv7sjkl{&exS(xmq~%LIHwc*~yFje7>ygYNI;pBgMKo%$;5$)J^$*_~8#k9& zReLul`~St>dxk~PH2=a2A`+Io1PMx(C^?90AWF_zB?=M*Ng^OBVaXXJgGdmGk`WXY zSaMpjh$vCS42xOR)3XbR@qW(pzt8#bzGtr4*`A)B`gK)TcUM>U%y>EOdph%qj;7Sa zq~&9LhJoVa6K{N;NY)coxim4V#V5NzG!|GW5Rmo4kK`u&mG{SPtG2>EG{^QOOUcylCgBqW{vD(38c_KN3l1+v;?xVyTn$w3IUQg%OE&bcio%dEfd3 z_j`QhBIon#E|-fs5A0YC4cf);zNaXKhS!1?Db4pt{p2%=?3d1OAq85yrqCw5$}!mG zCC&y9lXueaW6mqvNg1k`8^3fyj={PIidKZKbMw^@dc^8M(YWjOyMi}v1J|o`W#-P6PW9yW0$d0b_8{`%G z8VQ3s0SG>m3VlKin<>r0a29fEra3CLpF-T`U9GFFCrg^G5RbyR|e?wTk>BeesA&yXYW)1uWfJ>b{?f_ z4)T8~XkK(U$*`ng)jB6@^g>aY6GgGjnpZ4OwZh@C-h)UncF!PHA0abGm_W?jxfI13 zDXA<`co^ILVlcGPK<3~rK9knbOhL=$gElN)A&d+58EF}Yrp=|;+1Nr?Wq8zJJ+lJw zB{^=fCp|pY6CveK=O3)jzVuW{e=P7I;q)4UH|AWvT1WefmUs0Nm11S24ysA6vwLmF z#v0w5bYe(tZt$eWR<)NbztdDcNx9LTm@NeK*f+A|-u$=sgGt@-RJ%F^;uuBCs=y^U|x!s0+$HyS=TRroTY4$)Y0^xyg6YpSVkC z87R`6IwO}Se8=GoH;*24qOr{LfhuPx;sO!=@`*~fU77i`Yoxe{fOl{5@HgSonG4}> zpj{)rMnQi+w#$v@13oE1jhAl{hFh$D;YF@UTk6c=cXa|)NwQ(7@$tpBEG;LX+reE; zXQuEWh&(|l+p&$aZI;SJL(7}6AFsP^hTqs`;htvOS&EFI`clr-1^M9<#NXO+=IxIx znRU%L#?h{3+Lc*qB)H4QbBJvCMpfdzu(Fq&LeU>RJN6WFuL=D;{@m`_493ue53dPV z-`X}LQbkXH`_USBAXJzuB&3o0aqaMO#H^s1*cflu?XN`224hve(Vn`CX(65&J$HNY zB3D;vP}6AlxNOfhnl$04cpP1`&~=+ATlt=vfF|ox(>+~R$V{g)+wcBnP^!&zaEhCL zA1!y819H&L(<18X`Mu&`pHy&x&Ut18^TGV8KbCY>Y%0> zMqi{NAzE8H`bY>QuH?_&hNCdQS18g7M`}nxKF|xH8>G}5c?iKCQOQe-J@5WOd`@Mo zphRny=~w_BMqlX!KF}M*nZALoJf|9!)AE~|+}dGMIiH3~d9=pBvoOmO!RkpsArz7x z_kd(%`FG;N8)1Wm6I1dj3|BEtRv8 z`+A#0rDp|AuZeTm+c}?xMP?qQw_F;hI7InNi>S8b|8*rS>|I-t9aYu%Y?so#R*7fa z#i&G2`pS}<6$JN$LNvL(o3@9FIiC?LRB9?IB1J@LSnD^{9iyHZgO?2>X(pHVpPELY zrEd~a>)T)?DaowA&`NP>yUuOtbFWc)Wwtp;8kUHD7>_p^M=WA2@xk&a};6-rRuZC0) z#e8{X@7QuYiZf_rGJ8o^L3bLC;t&}EpA@4#oD@I6q(Gf?tqlxo)K8+q6=*9ILW;Ul z!%>d@fe9_VZ+B70W<1-u&EOTQfc#hmfrKdH1x+hRdtJ>RdOMbf8V7Xh{GTP)3l!6JmBKkXGjmf>f(A&E^+9vgD zp-p(wj%BOu@w#=)jKHhhLsQkt?4fb{L*&-S|%OKXKYl@O|( zJ)Ao*T70C{$;Ck_X6VF?hMS{N9O7g<%KX_Fr-&@L)S^_$bb3^W$dsI@cJ zmb!U0ckFPXp5wJ_gUp5$8lxwft8e1|&?hZl_wlLnRds8(d1J9?RmvEL^91+^$0&u$ zbdW-Q5z*m%P&2k7OQgT!T`n-&4h7$Mf}UKAk?1kH!8!ebPmB4z zN>Y1)@>L-VxN=59R%asPIhVqFai)@Maz9%bkN4z$gVh!IGW^bEC_2U4w#?$Esy)oL zYme}YbuU|Rs&QSQs--141(WLvR`XHSEfcC8G*(N5+rtm;se1C4x-iQ=c9W|yd8TND zC9PMd=lc2fWp!E;5jLyev?-UiG{MCUGZC(&7G0F~la=+Q%A6lP#S~@>%=Yu->ocX3> zpr&wKU;?$$fznvaf4EOel~8&{lEs}p>Ex)jeg4Y%fS_5Ba#ci9HXg-6r;sP;D=y)c zLn!!Q$nv}6ukFd3YgepI1`)7f-j7?sG);M+aj5aA=BHhiZSLcl~?4LxNo#{*|r<7P@c?* zG~rtU0cL{Rl7#&BdA+AN56ihuw#igX8 zTy0ga$TQB#g3s8+hbP-iiKXGMISEQ$9}C}aWFyN%QrAbIYpMQ~^)zk_@{5b=Xg?Wa zLn^Bhe-)|ervWvJ1BKq!?`>YTlBd$)W>DOfI9ylYS2@`e#hB{qmb__Lqv;2c++oHF zj1;FYon|cGm$q2wX%iozdomd*hZ`IwXSx;Gno;@E*G5R(ZoIKnA|=EsTj!}{AG&_kt;X58U+XKyf5w7TBx9%;JNAMVfQqW)jT}y z)7$}##P_0kEe2^5T`dK2r#@=s7Ylnd(nq)m<$Z!@P(*~QrsRnz^@r{=G6<8cJS;28 zdWOyLI_*ouCCqPNUUweYcr`nLT6JfokOr$Wh}*>h9G65S1BnGE}7 z+R9TGKJ^v)X>9>w3Na31@3{5UEk#9DE55(nR=Q3teKD8RgboqJNO~;h8T*QF$5G~V3GDr9W1!C zqvFJJTtbwLOQ!WN<-K+FkB~x*GlYyA_!?6f4El7GNEMq2nH_mQ8!+mSHe!{nWZE;+ zG~CZJ$tCg3sJ;bWkaODHd8?lSGb>a<~Fr33hHMz27M%Livn3}oI z8_CIcX)8aA{1T?O(DGo=%dy)Fb@fhono8K~1Wb&uoL=3iez$^Pj1R}-Smw6flSX0T ziy7z7>4{W7{vs0v7JVJ~c{WGS!c3|1qOGPFpHh9hjBwx^ot#}Q5m4_I)0%3{ZLQj~ z>I2-bEX5^-vTF97p056D=n$5$)3^qqS^{#@9W}8IOG- z3M@M&l5bYqq!scyAV69}>p z(AwcKS+m4%)2iEo%h)^ns2r#zpUv_m=WM4`*Wy;1;vS;TCMAEDY_ z6zgMiny9x&+=VGz!SsrMZ@VIG76IeJV26z&Sfr|YUS#Ur;K#ZRM6~~LfD;t^NZt(Autg5 z1+8_Od7PS*6QxQSbv9JN+4yCkm_=O*`CY#Yt!9Fp(UvnMhLt5BPaK#b?e&8X_c2=} zyWJeHkVEa0u!_(rOe$PejVEXsKfe^F)rx!1 zA5tRc<V707@d>0Cdms-({54Z_u5-clM8Po9f}La*N*5X=9444F z69<^`K5c8=N2WgMIM%1t`ta+ZiBOhKMaY(UdhW`$1un~XtlRem6WH~iwDs{g2K7Rr zp9&KjnW}0zBP7_4_XvycHTLLC*=yT+T=-h4jWXD#rZ;DElIRBP}DK&R*MKQ2xE|+$0HMje7Swi-<=JSw|_^V`(Em2&JO>lPu zvv3L~?$xJ_TI9ALYfZ@K^uBe4zO1_9YUV-^`OO+kuETBBB;(OLL~1-_mOuIYX0yJ* zaIcn0;Hi>jC3iQereAuSdFsW@Zb8AJjKr(%R=jlSyE9JozPqWfrDND( z!gr}NJghAkEpxj@FS(>$ zZpnV4PRin_A?81KqN#g1Gj)ndlxC`UyCsi@crtxNFAddM(*9N4-4J5_$9w<Dx39HQaxK0cW`Zb}1cBS2rPGREUd1&LR zvHs<#yk~W;_$!att?*9QB~yNgDNl zBKP{cSk(xF3-i6%-tJOD4~Gk4cRs8PDPvNnm5h6$B2z~RmZ&W7dC|YxV=J3TVnmoq z&zO|PqHf+3S)qqI6R}_PIv@l7MpB)3-dXcC<-i?g#YGroAUKRq@#o^5xW3s~qmdcH?sY z0X#&TgvNSkV3lWo?|Qgv$0*sMs&zthl@{y3tO8eapwFVN*?9jv+*Q_R9aS%QEm@x~ zxT}5{k^P3P{ZxmFYVBJtN5d+iJU4r2&?ubmpo$1jRzSVBXlGn`PEqHUF4vyyZD>$^awVb;Y#m-QVk^{uHs89b&s#(pzJ_!)Z{ecaLxBkV0%@f&hnak74|T)}@-q zE!0*FWeYf;^T(l>mx@v9?|ffsrW6e>=TXd?pe4jtS}!%`@&u{Rrl+Ut%@*3*i7GMs znA_WX4k+1SCCtHYAVWB)5x;xNTZK0>1k(6yy^*X-+|oBUu8x)xbg0YtWg1Zw2dAbVl`3_j$!N`!F44n2_eTiGT=*yMZRz!{bF#S7oEkYuA?{I} zM9g*UvC{tdjMu42Z){JKq@sp6q_YC3ZVg@$H1*!yl_v0%oWxH1x!!KOoH3Dz+?&Oz zA~NSV$PO0?d;o8jG(WPs-ubOQ$huM4!&l+&V(ZLeI*GRCnKp}m?&4MoPCCY=Z-niu zizx%wXw!U%Cl<%0tH7z`fk_8NUE+66?IbRS5o(#jl?zM8>MNH>A2-}3w#TPAk?kbj z8Ib$9nDj0yM?8-+_aTL*-Pqo<3g$6~1y5iX&DaWV>tr%MiwJ%Aubp4}UPLcX`Gh!iwmrf}ssFBkxen=EZ zjw1D{-P1j${(QJiiSzcm`&YpG@zocUNNpWr`)TzJv#+V=3+}z!ZCed2b0BFzM6 zAQyxwG(7FN0S9o+LV`DP0y0>27|J3puQfV)HjnZ*uIcwjZ3IEG8V!{#dG78sKk%c2 zdX^_eE#0>MBjsf)rAC=qPI#)zorpk}>sn@$Z**gaj0NtWFTfcxd%5jrLg8NMypUJn zjZ3v_b>QV4@BEHG(;`9dp}w~eN+U9sdv<+1{&O?54QJJLzTL5FTc>%(0eC<)`QU(3 znd620rCdStW47CqPtS7?j`J5)ntQ$4$@&Q{87h0wYNEWNS7y}yQ}nc6<}+3fo>%gV zzNan(WT-8EHzgDswSA1~ zt>wBR;a1KjyKp^1`O-EuE%i=DnwGXyW3_W(^4Y=yPXw9ImaU!pzH*0| z^)6g(l$gk2Z+?-{1yzIoXS)I^y$CN532|yNGOIm0!YD?@Mx!>2^)M2+rz4h;B*Jy5 zs5ZTHjBW0~F&&;i#2x+?p-X9?Uk)!X)l%|$oIp!UCoOQm|J3){ zJfCEawg~fwE+=X-UlUdpJ&;utmVW={Gh;xpi8QCKp>Q{6=3Cx;`|biSW#s|zK&ufV zNr>+vNxQJ4!u9c64qR^=a!fZD)2=Pyeb~s*L`D53lIfnr1!Zo_>kgNr*IMH=Oh&Ki zXAg|-qFV_=FiE6JK>onTS`U2VCvMV~qJWdqm6(IX$w(p*d9vh8xI(&#y1}y8-CO&q zqpO`3KEC=}lNoB=Je+)4lQu$)L5 z+s1v8fkyDE)G7Tjo(bi|7B>HCL6WhxK79qQd~JgC&hnEC-nX}9SkxN#yf3i!SC#TlrNc>Ai|ZY&;B)*o-3xQC3WMS&Tvv*#${uH{8Bpy4!=)#NEsp*8*aW z>lfUR-Y_kEx~k#4S~Z4+Y=_%- z@(V0pTV2mnzx*nT#`?5cKz0{z`5rBS#bb-s zbN%!qUDd7}C$fF_h}?-@tKB(r!VQupJcmI&g99HsogjougvPel$R^u zC>)N+9S+Ijh;+Qx+lrp!pK@bM+L8DQ9&|g~xmz|e%)l;gR=OV`fO2yon^UkP~X ztH(=@Z7WPGcP#IQ)2VRlx&65Idee5er%M@2FLHc@vUk7Gi<;`*)?yp>xVHDiN@c~z zbQI21J*>GDQE`ihtGeTUP~A~&C{=Zz{2fENDr{4%F4MRa$r}omK2F-uUgl54;{E>4 zS3^E6m%U-od#amo|D;`Zinr<4GE!-_6mK(dnzSgKoXn_tEq(pcHLEiZr9m3#TGS4D zhHg4GedG1>q&ef1^LC@V>tB*}=a40CliFz5f+Qbl-rdA9RTv zwy;@6f)J*!8M-K4ZZfq86b%$z*_JbPI8(|G4uGeKIUJd*YwJle>bq}slQmY z5|&(8yr62T(2PU$=CCWxP67esMg9x;`8c1mUOW6BDetJG+qpBlA03e0bw&1F()=bZsC6`M<`pd6v#?vAbuUO9Zg~BIzRcv#KlirBuH>g-{qf6Z< zR9P?C81d?D_PynX>C^QEdzDnDbgmfk(bW`oY=8TS@Z^O>r$np#-nif!TH!M;#7-uN z7h-|6Eb?GC>D@c;ARP?te4mXvaM{{!*}a{*9v3Haj*;TZB-qL2PG9>?Lr=zKKjH(||TU&58kw9@lv!Pcnyp29;5XIeET_|GXx8(*rNNkp*>u~jucb>9omMR19#D0#3Ua!YQNG=l z(p!*6xaHmX6A>j<;h_~wmsh!;d$MK}G|?yD731%D`hl~X$$ch&pSrD0_^ik* zVl1h*kPETmmBd!O10k)zgjnN4c~muu`wzPF>IdDi4}W~MsUVk`PVR^q6SI5&JK zo9PkmVVz~~E#8uEu;j@M zd`X0=G)T2jhQMWV0QYGia;fcJ*GKuO7oDdFq1B_%;UqHHRT z2td=ko%|H|JJqkTUCO@9w86;8q9YxBysf09b~)W1T=B_^Bm{qk*#-Lb0K+7Z-tQX> zdLZmW2hK}!ix_-BdU4jd+h!+qe#mp~YdW;)Jl=OB%DRV+!1p*)&m{pID?e?6qD$Nj zx}ixYE;~n>@QL1yuK>d^3 zO_ZzpkvpXd*T|+u3&!5gIvFT6Yjux=EJT>-(6wnDj%CP5o?jFaq(l@8c)d=ajVX5` z7RuN=t1+o|6+>&3loHE;O42Z)8y+QNBp)7Xx(FUJS@%=nU$=~VllMn^3nN?ok2Dvv z9Z$76)?vCmURPG}J|Jp17QQ{h;q(~I-sC<`?23Y#7MV!F%ERDYSEU4WZ zOg~|BNj>_(;nzj)wSqtFHcg?uVop~!ClJm};H`q+B|wDeCc_lXT3n%o;tcGaqj+Rdpp-$nQWn9b)xR9rjl$T{3y8Y?z>9 zs82ODXH4Fp?!q*v7Zlp4PrGo;mN3TUOvRPi?t)2=+lq=gN5dZ3z2#0Vw>8O**%mje z8B`_M)gUEc{oqR(F}!ui_3_c%1LupRa$`R?McF$z(H~H}d7F_e(n70UWbb{gOoC_W zrjg=(zKwqsp(edu< zrzzz!rbZv@zd((yvPCSMq0YX0fvKb^JTXy@nNj<3Xl$uNiV%apr%rMT?am!XGHw!_ zXc1ht6O5Y`m*9TFSabb^7&0#}Nu9?RF=YX(_zHCkqZ^-er7VwX14LU z)4sY0>GR~1vb9MSDQOQ^TnjbQ_673wU!{GgEMMIC4c5M?k!uxJeEW+17~{n(5I045 z<&&?!JUe?-8Yw7XbPjX1+e&!*^F(HO=GMt;!St|n*~eiP#}ho~^XoW>uTMs{x{|gJ z>}S66)T@5%L{>)0Nk`++sffg!OGb#sNI!&_bSY_YjNElma_c&Ol7-~7_=o0XYI4U> zIrd==kFH2F7e=tAXVg_5SUaVLJWFVGQldJ^s8LPoNl*9_dL#?rOGCS5d@ zp`Cqii~LA$;Fyp(SqGjsx3ne_j61Adk+`Zl+!5AgXB0@YJNp}K>fM3kkvmF{YrOHL zkQ6z&a3`jjV%|2dl{Vi;ozINEX`VvF@U0S07jNFH>*ViyC0AsRs)W%t+rKeV+}}zh zwA-^fD}+PK$}Gfjkx9KH??{dUO$tk~|5?A<%dF1B)j1+!2kq$h67%+`7YB8n#9q$j z^vmPv-No(FQ>0vy*Z6K5<-Q#2z?-d=EfE*gC3eeooB2@iTgh^zF1!kT7H6*$uj`@c z`nhcW6%%oc-B{p>rtKUdr6tThG~F*cZkGAvtG?4W2rF>dcc<9w8;oVM@?rDyVhqqU zH%j)tA@e{qB}eG_?lqTJ@kvs5rgJ1Z>({JgpuMN$rCI|Ou|i_>c_UTNGo;G~v>`99 z35O+!2K1DuW|i_C7RXz*-D|5RAD#RFc1 z#MO>@o}Qdp+=*)46wS*E4!QAjAH*?_vh1ho9aA2@DDBRU5=)D5y^<8^X%IoFbWJ6F zKSNcR&;6a^EJfoIo&zpfZ6;Cs1hr|Z7X@oIq@vBl_uexr9*fN{c5g^XbrR4+^jRf& zKdbU{Y|YowRUhPUZAetx#|hqPim(?J({|dKk&`%9c$DEF>e35Sc$*)J6y4|krTCWc z0SaezN4}zw%Cq^;kb-<_Jc$}zwmbB@($CPma*~vOiR?6^EoO<43O3`IXFgV=8`00p z?s3LR_{59)D$j;1qu9BamCBTyvrhS%d;H41F9%0n((szd3}>J~SsO*kMHJc|I^$Yd z%voLHJ+@LUI4VUSK|B<#HsJmoO=EBc@qsdMQDFr2QhCgE&wVE~_u_r0Wap-_!mW}+ z{o>hK{iWdLj6tlb*6^zMRb3_03o+52`_i7$(7B&3jX!0z1HS0qHm5?A%cLhCN^tXQ zoz+N7Au9G+q@5Udf>a(m?2?L-hP9?eZh~HZ;Y$hgp3m?+X}fM(VX~yKw7Hy>GaCz` zpf@i9N2<%#*WlyYnwOmNO%hr95?1 z_c(^gu;#k?$PT3-dAm^^N4LQ87kg^6*S2-W$(~2O$-LE`VT%YdY!N`EasYw>zMQ*m zlU8^y*zfnColrX2PTzkIdPVr54(NL^7i7FWT6re8)J$wt?~*q+G9?tyxcYY8y)cY8 zwu36TRQ;F~{gYNbWqQc~UdO(s1_2k|OJfs9+y-Rq0e#Zw%o=+P5 zeX&AsZ5no6>yFf|G{4-CXB(19dP@5hVWhx_wVq(pnH*a~{!&V1t;_sMbBEuyHd?8t zWz2p(Q?5FAF?tf1Bx^1ODYGPM+Qt?P48;eJk@xPF%2Qvx@l|{JinmVFoN{S5|| z?^cl>yda0ET#&A~k(MwRa{ds*=;%ypTj^1W_1Du6ltZtlzj}@+{#a?zBh|P78#)A! zpec)QT z{c(i)wUR!oz&ZifiueGhQiM`njcu1SM&eQhcK22NlHRdVROg=?tn#G>|iZ!QI&8btK zgs>z^9PUPC`aI}(M4QM3f*&3?=B^B>bMi;|ez4?!7mtZKJG@s-GRa;8uO)WSC>DAr z`|f#h$UTj>J~E*JKK_;OANnEDt-m$XW*oSGzQLfkW_}fO25wS(6#-9K*XTvEaT>@Z z`ToECzmNcRu!kGIF zOH5Dp8NRp9+?AuB99YZ%?enr=6_CzHDu6}_hi?qYqQENUr9|Haxda`)O zxVBS$#%(lJ|6 z_`0jJjN#rl7?m|%0J+b1u^a^+8NR;%J6l&!C;vBC)B&qnnM^Ads|Umr(k=BAl`g@j z{@=;}SO!5NW%4|F7lCx^@4hwtbf|3tD)6urNYxG75(bf~66^yX$aYQ>DDI!(JL$8X z3?Xr4Ab2y}L>NN0bMT~mHnG>1zjWx#UOtD|f9LYamjCLn(j4?}md$~LL?qPghyTMr zN|7~IR#~@MbQtA-_}3Cf3}+IxY5^~5{M#QjJ1ppL8QFpTIj1mUQVTP4UunH-Qb)2w~g6S5&CsqUiriyA;9yB_P0&)a`#2Yp`||gT(Leei4^cG+cCm1QKH_ zH#oZek^s8&x1FR}p6CiGY$MJ4iIF2YYtt9xzQHEZa7;5?R8ds4oq8D!AMS>WwZcWm zB96Yy8na1VK68^A&>cpc5+%G+jCKg7kev9uBkDaA`N!Sj*-7KaO*pVt3wHGT>3ESa z<~ynsHIrzf#piHQnIh3kiRilGt3hXy0bC|1NNt zTApZghjnxcRrlg|!C*L25l@qt9StYi2A9JAcM$*0U6NpcF(3P6Rjpf26e-$+mXSPLV=VBS{Te=qlfr+u-ZZIO#LS*ZTokzFvvdv zK{CX!w`~7c#3=Ge8M)dcj>GzWJ>7;OxkCA`U>)@6UB|NRr*0+_5>?0XNJs>=61eSO zmSHa?pc(|H)`e4VBjN*L0yHQ-5&(Av$T0e^{pA04B?B@f0u*6TorX5hLB%l0pmB^upaRNDs8oKVbn~Cu-d;G~Yp@ z`wQB4J=~~h^Axf`<|A+KU;Tg;@`rYwfDkmtcr5l_>S{DxEL~HK4lY`XxPJsLo+Qb> zav1)6O0)yQ!nQ)knt`AvJ(qTTHb6fL@a(mI4)Q?kJ+hTA6x1HVz(?2CO-gE*Kl1g5^1{L6%d0gSHll-E ztr=SdU&`r`tUYc9uvRWitq!nl>PALqn)9}$>Xe$uvunh6FB@Per*Avn&^hoU7O-vp zxZ$xXF>t78x@cdz_ya~x82YYQPG9TwUhN%UFE21)1htOAFWp{7EEujFy1(Ybh*8S$VAw!T1sNkxrHWyh~N?Hy7!e* zp8K?GZO4F{0frh|&tLZwUjbr&O|J`!1oxy&m2`9BGeQ`m7&nv<2#weqWZiQ=;P5i&hc0wF#zi@0f{IP zyfP5Z{v$BHz^XObA*I(v&TIL5_OPs?IP)EtZxqz8&_0; zff)?kU7bc=Rrfc*p+JHL6hhnpc|sLQ&RC&kEy%dpy5X_bQ<^*39f9~7zWllzOd~(Z z(TBkB`rZzsr48ugYtaR^gwoRwFoFOghGJb90#y?L+rn4?1oo$G6YMaWRrKiP+mV{_ z9h@}}!aty4E)T5bx`IS;?Gr%sbyTyx$V&$?Hnr!q?8RPRPfH}@*Ka=<>exVF-mH6G zW`4ok1Ov>?#_}-S2b_rH#fhLXh>$qhqY6q8V?9hm>d3Hx(A^lp5daoxf%*nxf6!Yv zmUH0aB+_837*Th?SOt0!k+=1IJr~|>phICr6p^P`P&hEXMiuVKz9hMmv5#YxeVhA zQm3{CM&ibX$Ew1muVW#K8F>HtliY9|G6{oVe7y&|yisC32gT<3S228mPb?m5z~OyX z*6mMse&8$QFoXrn7YuTlh+TmQ*1W~M-zvmv?PMthjUv7;-f_qW7{q?e&eiq}tZ<+L zLh~3Q&}?BboT(Vmupa;MPsm%J&UkK>VF19Q28|5ZXExZ`{NKcatc}TUFt-u^8nO@X zkbB3K7LKw#x*=egxzHCZ_2n*}ESY3-I40{cB(YOmwy1FV1pm!oux{ zOou9ah!|Y`97dq`@fxHZ7}*hZyl&&M4u)aa?=>q7>mc*Ydw#$QT=^AZGiC#ey`|UO z{Sy)k=4dQGJ-K`L8?53RY_Oig@Den;2UH8*uoXBUf~Qngb00r=XZ|k@y;oKdy@r$u-Sb+Ifj(8d-XA`Ulof zcX70duC#B_iw(FdFT{}*o8Z;>j&~GX6KE5Wcf7mtsIj9 zzD%K8kl29R8-#!JzlpViqqB@bdP3R;#03W}FFq0w!KomK#Oop;yB@&DKnd_zfjGP4 zH#kt#CKjN^g+W@!BC+j4o){7uU@F2ySJABTaqDzeRY8E;7K6;~12tBN4sg){EC^Pc z%>*C@6#xNckaz{d3KD=AaK!J} zz(2s?q5;-7*c$kKgS~(Km*EH%a#d$-6?15l?4JTyR1m&C|9Wi3iSes~eM4OtVJCV2DjP( z1X%NN{~l{SUjSj?NQS|wZAhpCNkoH)U;*E;NP^nf|0DVQgVqaSQJ}>h*zBZ#s?^Pp zLlNl#HM#$D)`V7|MyzjxiQoMH%jJbx1sDbFp{-X)fKMJq2nUu3bubY-;0gxg`gip( zQXbGOgI%J!LOt8V#NNT07l{ZU5RGqz?C*cMexWo0%{KOCO?L4A^Y$-e1walDgc$6% zV_SeVGJu@#1E!1Ys*db$6%z8_fDyT-=sbn!$X~Sx8v;2YEQ80&0)XF_nmv%oBa$_d zf+$xN;t@E*X3^|~%CdPxe6uiw=kG`bc9J9xz^+6ib9-fNf0tl0hu9DbK$EsQRD2Lv z^#Pju4_>Oo{^(xMmmiU#g z7U>*l2q3V@6bNAP4n+!+AW`bPE^0jj?pz&F8!dKxzXGt9A&=1(zJeCbEFpGcrzF!O zh1W@;K{DWWfqZ5)81~hEu2O;4j1}nrJQ&NgUWl0%0^(hcg9aVz$$$(|7XcQII0K)7hLXWL6C=E`3eB`1|ADe5KTfI z4(5!Ib6c=*x%$r6)6OPHHxQ3zz{DM%FK`6PhT{vw%wtGa-?c%5b{t=z)Hai01HW!y zLA-3;@O-UCgXaA^RwfqG!*4Lh3!d$pRDP~tog%mMf6G`1WTwS`uk`CJ1gP>=o+KPqUT3|hVmYv8>bADOZETD+* zw-#6cW`i)`kNtZX7XnyVVj@srAbUWevCUU&dXYUxX)c1?hO7%}wTD$+<9lV!)0bN9 zlY6&c7*a&4Exd!<1A~8}zx5PgEu%pxtSbk0s(DrJ%?s@@NP6F8C>y8%$#4W}uns*^ zRbB0;Jv0p|J>cuRoFOc{_hBo-_wY|L#6AzF@L=sbyY?d%G-d{klmTey2hg4vzS(Gt zO+CbAU>y+~@=xAowCIQqpXUIO;l7E}wWoS)&R{^7FS!~9WQcRw03KU$x-o$4B-MJ- zXMbmP4PU`PBjvfZyh09qXXBR;;A5$O!n;_Ja%~QIn)aGT%GP?_~jIJ(l65mtSk23*aC} z7Xb-x%J7Hs3IM(EA2$dqX1(!Ujn_4r-r6yL$65^+ZAJ|$k{)=mYcLot1SSEKKx;`N z+6sYD*b?vJSeb%eJ3N{bRN8&^u_79`AQ8) z@N0nu;+I+cp1aKgV?kg16bFI+1`7j+3XHTI@$SoVFL=IVdTF)S|Mw*p_)l67^%Oz| zKx!bbpo`xyIe^a%Vh_J}K@}IT;Ex($QAIXZKJ>-u~b7%BeYDwtrLmug##~N^#)TQ9>)R{DF(Ut zSOLFP7^L_*ryE!(Fg*X`2C!b=Zu$m;BVSW)tfts1L45eU|0?{@H&~nu!2J7;4fxvt z9iTNWBk#fw6@DfE6CJ3mvSiA4{Lv-1c_TJp`Hy_z>hoNsf92{_^;qxbEk6ZT;@6hZ z(^bcpev1F~6dTy`cdTD;`LAPeHpd^)Hnig(3gSOR$A)aN1Af8$x3PeI^T!RCXrR0L z*Y;5CACX`%#eY)c|4@4C$y&Akg0<~0Ui1GV8$FQY;ZNH(3~~_68~zt$f4zkiG@pO7 z0|1uT))xI&3jcdCf8q&>-}dk61*BpWf3v0jmXH79nkM9w`qTDr>micJfI@PG3iJzb ziGJiD&Kb3V&GkDOj`**>ZhDXL|BN}fsWsp+PB7W?4R#(2*3_fn?wzdeozw2$3-f|6 zh{1`m=35&~h=u@0K)8*VcpCu$UR?-iU>sud*3^3Lfw@EMx(QXwD~H`$W<{;QqbMW< zAlV`6p`rZzBPy{mY|L_!UJgtDKR#u<)jDnyD*pNl$LP7;pRyk{&*dTuW0se1lK9bM zb6)yi`)tYo^>14i0 zq1Uo;sNhq5v;LglU>~Fi%Zp)#Y95QLzG?x5yssqXf*ERL9;&KG7v!jalJScP*9_&| zyMvK46UN}K2tO}zg-V9t{5RN>d!JcsVXX!P)9Tz>+(kzUl}~DO1i3n@4HG^;Eb01u zf2l_x%rgNdg3zCA50J&s`kl*SCsD^^Qz+`R<+&ZJ*q~a ztb0oLQ_&*(6<4wBsdo*g6mQxGw9*1%SRDc~(=lgv!`un1m<7fXm>Ir?z^5Jn{uqcO zXuIDKmn(ZsRlB7{OJr1jm|J0Qk+t=^1m{cjjh;?7?Al(%XJ_-^DI&!xEzQq=AiG;Y zgsG1On>}`dy55R>`n-jIQi?H~d+L{~pSjaG*&gJ@auV?{H$2sMtX*iO&FLW^xbjdq z*0@M$VvZLRPGBmPFr#qHZahlY)WDi#k>YaAQxi3Ke3Vn#h)HtLO3xW#y^A!OX)|`S z@^qfJM_d@f^IemyX>82yk_is@@bZ3MK*yLz2uAv1WHa>TDGOtFralncO>B$Y!{3^q zA3dz1VivtTt4V5YB$G;G^HBw{ot34n=#=dVOLZmthdr_0sT^FsvEEl z5W93{jG7He))3Rk^&*(|D3fX6GK~!?h$)FbCM|8cBa*$Hnf8@3&GnnQG#Ym=4BVY~ zKou6kK(I~o_`bc*V!y$bs-J7p?VeS#K2bmST3w1fx$#Q$S&JwMv7s5Jh=|G>ts(8h zri7K|@)dQw4mK0o5(NgQS@X#+SH%VT^Gh~(P}kQ!z7y7>HO|Vl*RT>n#5Z?AWwf@F zNXD}aemL0usb&B?kg1{LWxcy6fzQX711sz_vf7@`V=t&vo;IxT5qY#feAV8{L7a%f zW|%Qlut=XC>qr~#x<0mt>%xcP7@^peqlJ%ZCYB2lG{iV-{}+329TmwFrHK{}g}Ynh zZjHM)?oQ(r4uv)D*0?kdjk~+MyEWdpyF=ss_|43lx3h2eo!xVG_n$YJaVoMRD(hxO zMr7WM``zzu?R$pJSJYAO(ldXLHjA>uSI_b&JYu50DF}{$p_DLppNA((1(~`{sh>mB zWm{8Xz|sKL)QScZ$td8r9c1;{@L-3aK0>X@g8u@n|0LA5Bzmc>d2F)c>)w|6e62g~t{h>>GC~{KYl9fNy$^*y0$a z{w_@CS-j%oA^aY9;`hasMC~p141B7!pP*nyv0zg5N9MWe$MRpmR7(Ef{FA~~-TH2J zr4C{3B9$Pv5B&tsv+=P(s0+pKzX1M3_vh|8;;+(2@6~rFZ{$nwai@O)Up^=V z3;3R6XW#tZ3&-|zFl^n@q_CQx40tq|Wk{%qv>;@x`j%$`6~2oc)&N5duORbT22NjR zVXu~jG8EnO!(9@2;89EY%_U44NE&nQE8!|F1#Lu6<;Gzsg8Gih7!Lng>CnvZDuNm= zt26&~&BIgmB}4dDfhnh(1e?%K)n(+EkH5i7{}KCL1zIPN#k*R*&t%cUlnn}Jp${H? zqRbkCoSizx{zF9|!hq!oVTRns`7*U4#q}!GJvoYmE+}iO!8v+Ps4(f}?k}#!4pnH7 zUI7vLt|ApfKJXX7wK61tr=^*JLiiCW5_ne_BMl(#p|<}`AEP<~#Tsq#_9{}c?-}ro zRss2Qh18X&E#-|~w86@|Eez|-BFF686MKs)hnf2frvs}q2i@R?n*WZ&_F^hwdkH$zI0C->S+yp4x-l;_S{rzv070g{0XZsaJnQ!rSVprfm-*)%Pir z!u1>%4@{ML#g!n;6->K96vK-Ux)K2Lc6;ZHCSN1@!A3EjG_qI^^qaN;P&x z+xQSHK66&y+5E?1Va9OCnUANQT0wy!`ulWTi^eZ%-qMHPA-R86X(&T=FGx8Hkv9bM zg;)v!Xts@y%AK1vU$|}Sqt0urf@SCPlfXNiX08u8@vt~CUK_%(VEi*%{s$|`Q!{!j*AfeE-WH5WL~wV0$%BteyhY{M7>7dUwt@%-Cz_h0v&m#G(9#F^{B9x8q-helCOAbJ7$|*d)xxP!Kbolz-Pa3+XtO8*@j; zdWOtl5Jq+agITqCaQLua86vUn31&6;9&Np12BqZFhhBYG=v|x1V`MB#GzcgZpaf)! zr?jYHjJJhROX)JhJEJ<7!U#GaXByY#GHh$^ zmW}ppv0(@Md4;Q!2g%eMgIxu<84UX}EDs{j(4^w=@ickR3wb&sdfYu*J618pGc>3o zJmJJaq>x=y1`X01-k(sz0+A3Zk0=|X=ma8_XXg@p4*UZk3`QS)WDcIxwMC>$v04TU zX&P*^SrMOA@{s~srz3Aku{%H+(FDzsms*ZL)A$IniCPJ{g`f^JI1YABW=(2IQ7Fp) z0yOs8L|n@WmBY^lWtQ`PR*gjssz#_j&g|C8H0|4Yz$y zld%?UJ+B!AHPGyeYxVd&LyRIV&&dB;*W3Db9ouPRQQ$Z0B+!|b*A4YFgwh2Wtv}XYQ9HFP%EOBiZ(j~dH63Om$l1BTr0GJ{kn+;PY=>f)H=;TDiNgp z*{@bxN?D)t{pUXyB?N9PASwd+!7Zd=^H~ny>vCX22B~PNh53`Z*|AvvuZ<}8a%tIa zNloILg~L)Q&PdIL{7q8Gvp!BPL&;(5(An5tqG*v~hFQ2vzPUaE3CZUeJ1Km~9j$xx zjUNcq41+^1;0iGY+}ik1NoyaH`F){6;fb|yLcXt=BA$MU28EyO*i#7`uLLPJjpG(G z4p2T16WMAC!8tVflB#-it^pfpe~)C|oXYHPZQ|Sx0RtHzYhSfPL(DG;>P5=?q5?Y~ z16g@nHCe6Q@rfdBdrs@_wd}BzjYuEtt{aUv#X}@brh0QTM)K>U_^Z2C{^WA7;cAgE znP1R;j;EDJgT0*owBkhI6=j6)L!O^wnj$Z8t+79^v+UQHusyy4QWCla$piVS$hFa3uUp z6E4d)J!15J-g5@7KyFD2dy|{R+C}aZRjAYjaZ*KE-;QN+_eu?kkaDXIGj+a=R?^o! zB1HudI5tmSGAoU1b**x8-G%j>AraW0J*>BMvwuh}@m_t{)?5hDjT#0~rSp$IVT`rY~)hc&0WjQ6JvjXm1O9kW5Kz$VBVq+{D^ z-$qULoMu&ta~Aqt2L}`e;ekv7%V&U$hIzWS8~iCQHDDAVid&{gL^@VN}$^8 zGG$}o{6eHoI=^h{5{TlOTi2uOTc!5%uMlNR>haCkU?q&iG1Vi&C09t_C6Gm&bLK~2$Asp~TX?GQ5Jw#>dMLm2@79tx`yR|IH(oa<F9xkBy z=di|4%M4&fmk7NGMp!fT#!OSeTZQRi>PH5MGq@+uR36(VHSFQ%GlAHR%u3UlYkYG$ z*{Oo)MSb@_6*QScNLy!HYHO7Ye}0C#&tKb!@hq4gH@L$PvkBGs{E9aA{dB6ty)Cg* zE3yLx1s*7JfIQb5gXg)njK7qK7b2@^PgasYvN4mmN-Mp@b0sF!cPya@eq^!7c5Ue* zu!VQxNu;~nIN12*w$b0&Gq|T>=uh;aCzX%+u(jPajjT%Y>=@bf)s8Al;K8#e8-9J! zgyU#rXihraanR8#lMNMRV^S&to$Q>YwRGpHLwO42oN=htE=D<)4Q|&>u&=M-bwo75 zD~2*#2_(qk4xMlKv4I%{koEtFN?lX zv17}D)7$~_W$gqXdH#OU75b`s3Owybt)};07zS3a`5(G9LOFfbU%e=MjWDVkLvn;R z4WJnFuZPzBIk$g3U<0WK8`Q3~pbv0B5R{=q4>LJ5^Q@J6QtO-cxH5GM3vdPzll+qQ z_xB`36e6UQ@ACWw6F1G}cY;7p$1a!O=p)`Jn!5oZjTofrcdJXd)0Xcbu`~c3zY=bF z_KqFK$!Uq;!oIVu$=bt>gqpS(2UltRLn!Ajz@~(ulEVAxX&|+2ZhSH#pDZ-y3D)&lsKp#v!0e>C4%{}r(VEwI*s-`@7XNSLpQ|X2aDJt zKme_@vSef2Z+BXK(8PSx*|i2I)v7eNTzj_?HB*X`WVO+SwV49(`)drLZpJZ|A464D z`_=R9WP`1b)2S*Bx4;K0l%-t0vz#~Ut@*KLHn7EI#Yj;a)~S;^L?4c6PJOEQIsV); z+#3!X1!-WWZgk6eYhnXh=<;jRZPWHw!^th&jn2?D3#k?Y&mVPz+x3WZwYy5maMg__ z?hMlwI{NaRU*3N~Cs`XL4R;t%Up?^q_)q{8bNJ$r3h(NLJF^i zzt6M50At@;c6v=bY|7`@Wx-TsMR;ZJV)P^T)lQ;8!bF?CDf?d)(uC4 zUQ0TIKlfN{?MhA&>u>y44zboOH1f6WWPUZZCzRc?yAK`b8Dx$pm;z+aMJQE@Xj*w1 zR6cB7SEn6>*(L~oF8)ES%bGFX?)#x-o{puYRU}qoLv9u7d&4`@L(vE3l+XkJpO2t^ z7}2Ngk%Vt|3w?wTVYF~KIg_mOR44sn+wf5QxIu0g>D$A=$rkn3Wn;Y*i7V#2l1msk z$YJrT=baC{yzU*0IAsPOa3}P+$>_A0s%_BpB59{yK@o;XPC)97&gs!Ry^l z?OS28A8MKK{UjLEJ{{;z0jIFW#h!}Cn%KOA9CC@D&V^^kmTLiE%A=)To~)znCLK75 zokJ-p6_&o2R62SuxhaHh3{`@)Cc<^eK$F62S0O2th)E1L(%L+|yr-2S``*8&W9=sM z`Hp;ntAUQGmVgLv&{%bwX(zPnFF+Il3nS3Y!%y8ql{N%bl{SY7TH#kTtPz@l#X)?Y#(c2E#%Sso13_fk5gAC}NZd?~xT307frOjg1acYblH1S)aQG<6OU zlqxrc8~e#KVF&Fq^H6eOue%9+=SY1(w_uYIRm1X}$*au;mZHKGQ|BjyUF{?@@F#Xz zH*iG)S7fDAGva(`k2c0iGb+nTEW2pEu9EDWrzRklUh}AurVtn>&|UHI@*8%HQu8r| zL0Z7qe;20x@j{F`M{NA@5-cEZAcId{op>o$5lT(io6D?PtajTU-CukIuha}{w31mb$R)XojW6^exY?q>PEY(E~P0W2L zWJF5MfF(nsL;*6%#1!8;=04+A2q-Jdp@vv~C|YDzNq+(DSZr^|^VlS4fnun-se~8y z3v-$C(S7}4Ko!+gM{T}Fl7m4lVk9URWr1ED_7eGe@&xc3;yej;$zyu6f5o90%|=`E zy4Vl2`c`Iv^gzH`P#GLCxK!s8F5ym8jsHJCrPZbg4!SgyJy&IQ0Ne>nxWa`;LgVlr z8tSVRs>CDBj3ZeX1)hQ{ZWXy_4clUtoo=!fD59K_WY~ih03n-VuOuJ;ANEk3i8%|7D0B!XeniIf6)@vgm>?<4pT6peMLS zwIzbJh23i=)UXrnh?`MSar;(Q0(OvE&Gom=Rj3d!tqt8DQqryP$K>BTZg1q)FrqwHR$u&DU^_YQ6?neX`PxlCX zD>1y#mc*hSIk7b4@$d63SADr7kP`in3L`rn&jCy#)v6RMH|d}3g^YhHLK1sSgGpHw z%g63@<i2l_HI;yzavI$8v%*N|l9Ey%PoLE`3c`Mh<~I0lfARS4VBm4z+i`Z_ zN&k2_JhYuU8<(@g?25#L^j@kQcuF>SE&+@5QfWxZrjP1rmP1uSj<~_ooxPOhW4Z2$ zw8u19yIAkojd5vjK*9ctA!0r|}G?dKX?!_{ymJp0$6$P8& zo+18^8m=x*L-W_cMFQ$ zh8C3~SBjrJaN{#*(@<{7aC|VhA9Q6X4g36q+2f~5{91%Z0g}_)@WUBqthzO>w4)K> zS|d3e@{!l}s!{s78pi})vs$h=$f%bIv>1!#FiyWG-LX-q{{kp}?KU9wh9>zTpZYuK zm>k1PHKch) z6%4UvHAMw)jlu4$x^|f3E!1(r3RxvyiS$4wsn$Jx(qj4&p1(_AXse^m!(lBd$Po~* zRh#@i<_0}za>QYSj3dRD>@D~gAY3YN@F`l;h|QX=FUeHKOdB8eL3LlSB$W93gW~<$ zk|&}UC)l80q^F$K@~aixLji)DP(FoH8z)+izz=NIRHn2N@9S(W;E(HJ%oJH}cq773 ziA`9d4lUVY{thC`j{7~Q;ux?nLEj) z@k;4^WU|l!{9W^hNRL$<;`9XKs;ASVFiFZmUD|hfk`A65MFpriQ}#@vk29;}NArf4 zIefcgvE-ocDxjGHe-%$DK|+`Kd=?myY^Wfb3r0`6e8-E|0!K&A{v)=O2<(SW;s7Q> zLQbQ44XO*1X&5xV_#5U;SyYFGE74xCJgT6#v#VNauw)NAjp$nFUOp6Rl*RyF6>k>dmUK< zMsCyvuLchBn*8>LkaG$M42bt;bN=`XvK`Afwx^QVNCQLgHi@+w$*;-cwQpxHkE*;L4%Yl#$`WLA%NA)L$T4gb)uN~sQd!>0-0%1`N1k%Y%NEVzxreR zctx+&n_Khv^>gL#6ZMuD&kWq#N>D7GYIQ4YXPNPr<}nuGDZlLbEVKLIv^GA|sTL91 z?$e>F$?Kq*%P(JWb$m_$p}`(%-Uhwnk=d2R>fL*qlJ4afiwMUh0)n{K&&}3|s0yn= zt;hVzJf1KR0eAF7SxJS;?!-hf6!Im_c*4S^9~xNkF-pUS2KHfv_%-&k1A+xISxkye zS!&bvJZ2IEe3A6YtN8}KH9MaRjaSxdNSRwmgONuWN<1BJ_B)T`=K@ws+_^6XUFgY+ zBn>?hyCfr~S}WMj-6Twfu2dA2hf$)-a4}0=mJK?KQu*}@WDg@~AwGu;amuD<>SUwy zs_``Xwy$MVfPQl0jRa-p+}%%N%O4oFV9RWq68kN{{XIGBG2ya3Su0lcU<&Z3bi2gLsa zcgd~oW!?T)zzgI*fR_z952n55g#X`VpUMV?am4neQR_&?-QJ5MO5GsTrpmatkr5dt zv2V5B>OqqkT(&F!WUNS7@Yax6e06p8&#~t1Z5{{#HNM;N5C#yjBu_hYHD-$yEVet= z)T3dp0&HN5P`5C{R{}C7qmsa4IJltRReF(;MO0=s5wY=s3StkD37m$dDz&xjQAb%i zCnpXMf*MI$75~;P&sN8%0ZwJl;-_~wdx^etVF90M|@nwO~aaW9Rv`w~wHL zP@JJO&PF3`!=XGS2s5~u)pJh-AKO=&ts8nzQ+@^!R+;s^Q2bbQkh`;1qGd#K0Lli?p+%s5E&5zi3_!>U@l6QeT} zs+nX`b!}O}y10vCmHO4VFY`&3R~?^}3V+{~ezo58Tk6r)x24~Mkats!6{nVrY#O3- zd13Mo#*RP$9uYcXzTUJp*FKLO1RH%M3Skcs+mYNOOt7v#QaO^bH>tQ_3}_Y%2;4R? zHqpe@|4wVy);pb`q9|Y8`?ed-6fhphlkr_h#jT#tJk8fVhYInoLbEe2Z^FC?UL$@K z5i&v|9SpO2s<5G~G+l#gNV{B@)f2<`^~FBgUc1hzzbgQ%rF6Tu?SW9h$ML%riZpWV z*(=JrO?MdXzAMU>s09puXe^H5K_n%6qMv2dV3h12md8X}o{2W14yW-#7J3(V+NWBJ z3g&yO%x~?M>{wNU#zSb=bse_iA(mX&eUMy>O$-7y*uMPf7SJ0a$f+5X?ove>{_P~( z)tL+?CxbVnRe}Z3UG9!IX@&JRT9c6zK+OrH>R4;+GijK<)9VKI?>hEnzhsADjnEtM z){R1r2IBLsIP$}cg{>NPrLtUMx>EIs^jVk3NU1-;*h5imm%7pUU<;tf9+G@?sM&+(|QLM_xvRkH}r*y&FN*d7u_?m1uCf~zFL(M8~BI%pGcB9Z2 zuls6my9e}Om}b+jLG(m{s~c|9q?(YkR3$4J9|SuHtt(<)RzQ>7WO!U#6GLYWa2#3KCMCx@%cS-uu&z<{)^5>ka#)^iuEma4l^Brs*kLj< zP%Aa@5kc`w<)NDey!ojgLeH; zGr`jA&~Ql;J!J5t3^5C>h&}p88?>cZ1sYY+)n8gG%gUcFjx(3lbvwS&3-s3Ds_}jH zSd%Wn1xl-&(_Yj_Q#zA4>d}?H&sb&;56bS=W;NuvZ4>IeXE)t9C&=v!#z#0;A`s zXAfZyUg9r;xo=rB@6(ok77fDntE4OcRmA{b4=$Q|&kU~JALhgpBHRyUeeAk(YSjBU zT)AQi``{1Dx+9h!x*p2@+aJN?%YU907GrHnNiNy+$3hx&-sJ^PS~53PKlZ=U{I4!% zo`KDjJlamHco4)~zEd)t6^xbmuQx^V(t|xX|2H4V|IU;A-|*iey=?SIt8Z}=vM;2m zFXS0#p$>KUvHwL1_cOgE!F+YtzkQV#rdzGPLY?n(G~Bs8G^oGzyDjbGajVDZpilVqVHu3-J*DJ!@Et7gJS9b;ZqCM`M3Ge1gQ*3=H z0zfv!gWWhKLwfNC_PQ59@yEIVFjC(>(esO~rdmKUOMJsoORhU#6{aj9La;-T4(_V; zmOaB5oDnPIZ+gZ^HkC`@&lK*>E=U4F;4%Y?9&EKo96LBIw0Y6KGg_^H~x$u!?n z|Mxb7pX_Mc@P`_nRgDND_kMc0y77M{=vcaQG%k8sn2dDq zOdT_Splhy~Ts4Ug(q{fLxxYU|W2?XDcYA!5RBRjpKGSU3W@aXLTA{1>nG4W^V;IWb zm@K@lU4{zCb!Im5hJOb+1=pXeYctVc!TU}jISv^1R(I@ZqFP0=rdK>Ta5(#rptEeq z_1XD&7A-B6{f}k&eEcr-wrSf#+IfjO3lSacw9~Iq+qxg;p+;1kK`QXI?dJ>Ll^;jI zTUZ@ZsI=qgm<@N#57zK?P1~IB%WzDd@N1SR=RTx+f{`5B4P+|ujtL9;A~UJF^yMQS zr&v4Mdf0OunoIj*dOC{&YX&`r^{V_t;_Z>nHE$4oW}SoK6$D{Kj<-bSPnBaSQi}3S zWCE)bYn#%ksgE#qn}s~&Uq|~FN5U4FZUtEZMk0j1z;k4bC+ucFnrQs zcTjmv#G*ECcBB1$RDxj9U#o3gv6P9Q(A{$jGC$qAiHX9Z1Q#~P|5kUCi{q085 z^fMnYFxLB8OUS|~tbtXj$LahmFwl*&5b-VgnY9iAE^VoBkG=D=OGn(P&C)<4jVIbl#i1z&?lE84}fy}08c zBO0r`XDDpJP}o6D8|G5Syjk(s2mZ`Y59S_|2c+}0=UA96L3H*BQXwEksMv& zTD?)$NAyoNY%r4FxAq8+_pySUNgoqOzP^60zF zcryT_ z)wq74!5kwl8rLu?<31hXDr&-=v48oFbqDzp3*-HwouJGxsw#j=fbeW`awW_kb;4E+ zHwC=V4RWDhbOwN@%z589Q&V=9NqNI5LOv$;Yc=9JreQ1Dm| z-`}K({7lnaPnDQHA2Mn93up}9Ugfw-#Tut>b`zhBC}ntzeba|}LY5$$0LK!nx1Plq zdyY3I*OK(l%$^bvQxn&cTJeB3QJ_h&Oq}}-wugNuR^n+1(-i&W2{AU9QnGlmrVzE? zV+Yv(0)RN%`OI1Bf2J(NZQ>O@@6M;c;ZIDhb^qz%X$L=jF&%|d467PW{2YUbNd;kK zT=5q$n`Y#OkBO101hysiN;{*uU1M*5u;_0$dm~XlytajyGVw_b$opol@KP~;2G<*- z+x0^*E?hI+78CCL0IK29I+6T_VTqYGbM~@1E-o+nkPL@zW z?95FZ8#9=~kE3#dBi?x6vm+v+-#eOEn&9`qHq2V0)AsAD^NnTOd4x2fwZ&w*gsbP~ zcB5eTV#`u`7IG#_M%TG?6ekcxtQ*EWXf zQvT_uGpi=>jFPsyK5ro+;}RX1gbQ<6+L)L@J*J{J7rY*QW{3xKAlBwnT7wvtKL;@= zKWd;jnKj9j#%2Gcd{|)1cw5j~(4UrW>Dc+GO$)}*mP7~Aldw2loIy@Yxj9Z}>Q&AF zui=g(R<-nlF#Zk<*iR!JS7&+WubEZ8?HpvI4I^nN>)^1okx-S=(Tl^$8k3vx+uzaBqztVun2?SnlP?%;31k99 z0z{;nF#~HJ`|+oENWb^rivvvk_?h8pwQuC7^bel2G_Lo)y+1Ob5Wi`cHBCs<+9;8Y zoaGN3GiLZ^O+2z;(llDuCSL@bry>6Q3;0w6G8=ssVBNgd|Dr^P;!bw`v@@Tljzf}J$a2fKb0mdoqBByqM!VF)BEz_NiY=&HRrHA38xvOup2ck zlBvEw5F>CxaAU{be*co<@eN2|iv3h8(f2{AU zE##~7C){glahW!`af~@&xJ@_@rq$qSAXUI=Cg7FUbmu4=FkV){HVY2TkAfiZ*pkrclA41Dc;pr7NipD@lip_%i|Hj3}#JK56kw zKx*5uXMy2Jd#GC;2iHC0*O*}UU;*GdCV}&Lnl#bG^cR!+d(Z14wXp5*r&J=ud}>{d z0ZX+to|l*)e&N~Q&%~`k4wVCLFfY`Y145~8T=tgcsZi6x1LFXOdOpwD6DeeK$I_31 zfOXKI^9H(IDzlkKbK;8Ns+KNbnmig(bAlV2BZ$5n3xkf#x&%T#!z~gu0mKtzIdz&mZu}L1>Mvdx&`m`V|k@5}KN64!lexzAS5G!JL%9f%`*aQ~Yxp0B;QSJerKLfVg2ONdNui+nq zEuB_bV3(=|O=)*XTzXaS1d(g|w2>Ipd}RRox)tDxt8PfX#G`M`U>0wqEo1Xv?nWEb z4#@nDWR^pPms(+T*YL1Wi&8>=?3~gtvB2<&Gd#EH1&$p!tGm+(qIH3@!{RVk*Om&^ z=vV_8(>fjZX@Re_L@?Z0wb%^XPljgfT_fy;KC$YXEAZ220of~1!ga7t@52c%xyz%2 zO2!yqQ{W8+cFthfPDQZRG)BI)B9B?7lDkhd_u+d1ja_fm=V*_4vScd0LQ^M}{_ z9X&>LB|6zW4z$jDChrI!s}ZQ1lk1ir%DM8;_UXIk0>1rPE(jfdKd|O^EJ---y3IWK zD|ym{ziWyQAk@*(A45jfj1W6FzTjHewa`6hGoLc(ZD4DB+^y{pMN%eW^q!t=GZzKy_A{z!pD3kFJ?p8IdV45bjSBG++z)GmX}L*j}{mv{o-nOAY{r; zyw|IX`|dlzTSJ@2qYIx*T~rzRyMJ4BE9S@_Oi?J9!=0+5nSOC}?q;TjZ^!y@>+px6 zENzcp5gDnjZKx{;k?F)B9?4K7gn>yJrk)y0yHd(gCBwx7?m?@qDSi^X=Hl&*3;C7W zlvXhZlO$~C+I@V&KZIVhDF}~A<%&{Wid2_Re#qZ77%qqB;&QV=_wCDGNCmH`hhM6f zDGCtq7T=wZnH27;g##b@rchTeqenI)$9o}M;Fs8&*lXT0SF%6$IMS8kfhGnJbpD~y)a|mf>(5|C zCF6m+U;NsJ|FC8MvIqx6g+jE9@t;E=;+Wwj7*t&9A?g4xe`*>))PLX%Q$!-KU21y* zg*)IY=dfv!g@PMfTDnJaaJu1eH0`Pw2Cw8fOrPatawE2^drh@XUp@k7LxZ&?j|Z(; zG``lp8FR{w71iE;5sH;4nb|@ispoK5DW2tZKPX0-uy~C&Jak>h*nl9^tJ3jX(RGze z-?{tFO-go3Z0f+RaU~jc6M!;b_sw&PC}wMGsg8lEC$(u<+jXu{D!|S^BDb*_m1jNz4+YL~9c?HRw$ihT*kHheT2bos1;XH& z*r-8eE9L~@+nz+M4XVe%3%`c{7T?o+&_^#NI_j0$R-e)jbG(*1 znRkd#9d9lbLm&JNZ4wp22|4}tP4<8@{os*|LBb_@T4)N6i(gNOq-xhbs_<;j`X;D} z)NKa{M6GnX?m*Mm=l3vwN{cLyfn&kf-GpZfY@k}=&c#$_c6}5@eCTDgPbAWj zs#SnMOl)HDg>A4QIrSKypLRiB@h#t|M>`~fnl31p{{t1-JpgKIyps1%W*Zh|As_5L zvY>xq`tc=+$<<2=f|8y2@-b<`CjH>{3Mq@d*rWD$$ZII9>Qn4_#rFl6dP8>+;-qg; zwauAE3)bvNF&DMZ{aZ$!?w_!zTA5*mYK!8xhG*o9tFkFR>6fja>sIUU+v0V94{Nxh z%C$FhB7>0j-%HuK!vgg*rdNz}Z)hQJWN0(H`;9L+x|$1t>jl;}CdE-9Ayk513t3(` z-bHkt_p~mkiq^VAF=jF$4FB9>Yc}=Z43(F0#~yS*eoU~_#<-BtKfp2;L2Z&kk34qP z5QfLFkY~S=ZIxyO><*w#G!s89K^bFmR#gYK?GfG^;EsLWT@AHG8A-NdhTPQ8+0^_C zi0k@*CW_%oI0=wa?8q9i&{730A~QmHC?;_fA9%L<8VwQ8c54D|?vnj8D}7ZGIhaTM z+#mS|QF)*7YDY=zCe`-lF>le%cMd9W{Oz`NNHHi_fAK`Tvnvfb78raA7->&cI@5lw zDC0WxeFN#0FJxqEGNs>V%&nPs_Q=T4!W5HMPrQ()J&5z4 zg}SXET>^2qDv6d^Yn)omKtwLX%#%e3SK(LT8LYAFf+9`78+Vms&l|sl4hrB<*krgJcAzEwhm$UVae-A8N`EEf7vra<#b6ei{8^PY1VY&vQ zau+6+F@qLxhM(B63W<9dVdT#tFUjj+#zv95an$&O(fD^uqX9PXO@(Pv39d8~1Fd)j zOqxIk{E7#@Jy32>7mptum+mP5&BE9j2~Mh zllSpcPhYQIFM(rNxTARG7|bxHecg3)Rhifz*RTGM0drkO!_CdcL~I7AWXFBvXrlHo z1qAe7=UiWK{FsK~tRlv|fwit0+Rcl=o~pnaDjS8JhYHqy{@2Hrhi?EF0<_zjA$p{7 zpvFQEzUB-}WOQn}aePNjDdf<4dH-$Kh=otw)C>8~e`#Bb6{P7p{A@E0-x1JT+~miM zFtd88m1?1E61{rqzhzecGs}P6^}qG4`o7g^e>{a*|KGBLe_sY3{60+eOlbcEK81#c zh5Uz{;vaSjh<{A=D%YaMxmRdpjsfw#wX>`uMquS_|9>H;_@J^7M!Q5U-5^!#5Gr=} zQM`KoxqZ*yf8rK4|DVPG*(U$lA^zK=AqE8QWneN+yl^O8+f-}qDt7i!Jb3;&dC#A5 z;>JDypT+;#CjZ$X{@bG=Bw!h3`Jlx7FJSwkzXFByo#ZaNiQo7yKslFDQ3ceAkl68i z>a!Jh=7zJ!_vRQ16>#(RLE7R*Y`XcL8_ep?K`7ig$Y?bc{NcqTUUQ1~ z;VobCO+kr0mnS)LcDTc+UX8%dzt?#P_QtCEd*vVZONIApqxsvs{8=oXvS1f$Vp#&| zzW{Q8#lv8m$KuMgwn)o%Nbf3BfD}>NELH6$);Hr&;jyL%EA482vcySpiem`Al0x2y z-s&A?ZhAT1`F}5VAg`_lM)xEB`>qg?xYEozn<5Vq;t4B`Y zzOOcu1AX%`-gidP@LP23oV~XJ-kcD!u0akm{YDhN*M4$Ih{06r<0y<(mXOKXm#}dx zVD3*py*g_T@YXhqbQ``zZqanmBCkg0C&11vx@kb6j#4Rd(G1o*yC@>HH{9A4EuyGc z)U|(2emlz#D9Tau-xCkU&Ob-29s)!!ia0^;mSUygCECQIcvXGNy$|C|2%)awCPg_SvlTNCF{c6s;@cg_;g9$XHBIby5M&Xx_kshN~ zC+ckyn+coI>Y&)!%dZ$ia&r(Ic;t^6aClOgIfqqnkhb-oWNpTJS`mj689=t$+w!R$ zny^OOF4ttn(17a|5Ujdp-A*wevci458zScvKk$d`r!}9i=06Bacy8buBSZLZ2qOwLiAjOyRCq^eyL`5V6>I)upjawO>$b)FttuA4reZFES^PIMzI%bye)K~S2 z;w|WY4j}DAw6O#p{e6Z#l2aw$9!6ES@4~uV{s_WIQ$#GLqmial-3b>omR%7Rsr!(_ZD-L@}LOmY%E_MJO6TC*dQI^*)Xz>>W~h$QJz9O%jBzmx$4vg~<`T;sHz8(?Apum(nICq40w@SP36f%`TlPL9K2y)Jdu%C4? zh;GxqO|p!=RtP`qO}Sllf*b%uokVB%2UU=5fjm=LgfLQ!_$l+)Uu>bd7+?zgx?1Ro z6uombTVL3tB9;8$uU@=Xg0fc};p;Bh*eI1WH!PTl|wc~G_@ws@#A_-DLi>QxNF;-tFv z@``v`{8_h$OGu!x?CgD6>c~$;9R4RtoPN_MhN8xlv3qR~mF!!5GYSJ6v3dL^b*Lds zdgJsOvy6dN|6JGov-B)YLZst-?eH=UXYFDcTwJ`ZDNHghsj~ytshgAvP?A-F(H_Ge~XBIq{xs`3IEc^`sVA#SXt% zpJrh6(7Wjs(mdFTZfN9}ByMh~ffH&>D0vU6x{(<7HW(6N9jd||ww_42aEXQ@J9CoG z9+H#CC)*PDQviP6A*PYP@~jvpx_Dq$VjX^sAi{)|Acc$SqRRBRW%?ExTq|7RkY@z8 zM(WL|ey)WSN)@&0DQDuQHP=lMBK=>$3(_NJC2W5hEbUPQ>&nGH!xBk2sMCfod%&NU zg(wM51p#?PZdx)_r9(SFv~M|8Cj~E@7OjxJJbmDRc|2!^==&LYczE9lDI)q-k^e9* zl82m^J5{y2F%wTzK8!ND7`<#s3pd=%DToUk#YlJ3Lk5Yg@J_M5Q31c$nDhAaMT)#e z1e<5!^Z?&QXAl<{RuMNI+<|b6-t*^GTB=V68DdOmgo70OX!?N3;uq}#1XIrRr#z&9 z9Dyl@OptkrqdIPsifnYsR3RGUMK#{Vq66f$+Tydk6GR4+s}=u6_5Nj>;%J%Mxr&~e zxblY)n42iXR|2j9$=%F6l>1;`Hm5o~h6D>iRI+C7t4vQ#Ymt0=Nwg3w?ul35xFnT! z?c*2rBHH#TTRzO1Jv?-5ZA%1ip&IFt@p6{na}RM59Tq^N?$om+r46u23erG+L`*8yEtCo*8g zx{aeuT;8Z1gx0m()AS?jfouL88xTu6)VbM)4dR2aLwe-VT+;AMufDWEp%UzI7{skZ z2mD_^7mIwRjT4BTqE2VUS5GgzRC_(|ckr-;jM*QS*nULWZ4fd!x4c3AyI6UXW%&S` z(3F|-58iL~t!!=jNzm(A(5y?}Yr$M*TYZS&j+U{de8BSm#ok*-#o0UygToB2gAD^r zaCc`QxCeK4cNicE5Q4kALkR9pkdQFAyAy&t2|)u1gm-!0-@Uv0?fL%L^X<9!p8d|9 zGv~BCE%j7aKiyT;T~&EU5>`)idiPQv*bv)8CQ7zlK(PCKqa*GZJ zAy@P0cnW1>n`!K*S)xOs?8_kmRa`BmjK+3rl=gefS8-FTSqR%MT&8R-7N4yRx3-bT zY~O!!iIbQ|n{hLoH6!e`ry@F|QRZaHxDnODr=)!XWYOf6OR{i|(pU5yZNJdwXA zkx3d_%XGHqI75b7Hdj{v%)L?X7!2uJY*>V+*`J8veI8XOjsPl5I>sb)Ty_wl6l&Ap z-18!Y>LjxI@Su+L)oDNP7D9a_ffS5QQ-T;q>C^-^W}!?$e?^4lmnw-Qiwvj|@L&?i zHR;y442dDN#zG=9xd&*2NLVYc(60KSdnwLt7Hm1X{$=laIid*l*V^O7LP_Fd*^RC6 zSi5N|s+0G0iH6W+qECqwp`31Y;$jE)>y{&r_&2pDK}3XgJiEyO9DNGC)lDCw$Jc7W zUA+c?D0do~N^z3CkuyueGaV{nqgV9A$6h(+8O&gF%*}Bq0wvJS zAa*v2g%+Xf(|B&S*=n~Pzc6>fCgg1tY)sKn#K{u?janLR;vjp3d)FzwV7VgHQt&C+ z&bk0o$bgQsT05|RVtQ~9J@7IXPKV#sJ#U6`uURsS(9<4SwSW0(iZU;<`7(%o3*XY% ziDMi?cWxD;GDGgxa-CFX$(Lp5yFzGLuG05zLdj)_o$paeDR_y~g5N60Yn^^={jd{- zxwH|7#r4)|$8!f;Jo?q3Zs?|;@E?N`@72nB*s?Hh#LuVbMIL}qgz4d&C(RVJ)1Rp^+pO9bXlzihRAO0B0U~;j`FUO0kgdO1t9<`-CCA!f`#5?9S}faPPpoQ-)4zmVuB zJ1`prdei>OYlH)d^6LO(mfdwei%2)Lg%-PoJ1$y1#W zM>BqGul;-{U;fb+q9Sq%@YKCh}7FOr^+ zdP0<=P*P&1MClKXbB7q)vXXK#CX&0$zzih8FL9Dk>mUuaX2sqQ#NqfsWp_~77g#dm ztW#K`bRUb~N7bOnS+l2cj8ttLLx?9d`t8v3_#@+$*(*i{a|eyD=WHn#SxBBIRwze* z$pnWcg0;6wqzo_!uwM-VkzOAwM~jM8$G*MH%5@eqcr;oS6kBO+ApJgPqy~K-FemYB z9Po1z>d%B9^2fZlYF`U=8m~2>2OaRHEIb_SiLdzX!zpC>QFnmW z_hgaH?Mbcr;fve<^xrmN9CeaGa(RjHWz1~>-LE^kwlwP;f+lzBh3GY?bQmt+EH#I} z{bX}_AdUy45wd;g@_8*-K7HU%eJ+yhV$27t$R+;exG<}x}+*9 z#%G!fbW3*r&kO^{QCe4DR;5!Wjc+J6JVv=KvHk@(KR@#wqzJlu(|St!G%wF@-!5^M ze$jDLYZF@dFAAgw`L+Dw`5I3DAGmYvT`OL)G0pfd1MI&DAODfvYO1writy+WwhIp& zd}bEB5NU{+3{ZF3@wOn&y~|(z@67*3$p3c4AS+b2JDS2SGB%~})sm54yYIj*V)WmM z|G(pajMN(=>n}i5<@-$nz(w~e2@KKOUFud+Y_H|a=OH=F%diI z?Y8)OzJ2H=tRq3MA=u*?xqmmqF^WNIi_h^H`!&g&(E~JQtVqHTH548*{1-rE%ifZj zBf$1xDj#_M7a;ryI=sm^@-W`g%cC8}()`L{{o8a0mSqj{SE7<7=F=Iv*n!$<&L>+8 z7X3lJS%nYWqd{|?=p70As%hjC!HpmeCtqW(_ANsrn$|M^D1I3LO~;7t!yV1TDr$=s zLVDtPQ)mXyw4P-<9wz5kD~#RX(psM^0?*_Mz9!Z45Ef>aJ;KOZ6Ak(LPms?8&%>O< zH04DWRExXm(Oi%_$x3TXl+{uqBBrqIc6M;layPvdX8;=E8IttitUk|t0Ok}+AB$O0 z3YxaV;5OI7&IhBl;&^fC_OOHHgNaFVh^V%Kzo#nQydnv7kTq;=3BN7SQ+`%{mv)v# zd!bk@Lo;*d35I+yVB?Jpu;LNRgjF=p?_iapNs4GTbeZ(H{4|RtY4j|8gRac!etz!R zah`z>pV2f=Bk-9smM#Yuf&pvw+FPdk0fw+Mlqk-19@1C(aJh{6XJHzc79^Dot7%$e zmXS1r=XmCFPI(@M>6yP(RT5Z$sV(qQg+%O(IslrvG$xX$VTP<)*?)zyL>}dABFXZ= zEGA#tUZuCrQQNS9?8VJ(fe#;Xt}I6@vz z-f#{4l*Q(>Qkw{{;W-K_2HNTG@0}*rm9%(J?vP zkL==TvIm;E_>v5fjc~23h?1|GE8NzHF~x#JLb}d{1bW$9N%Bs7@ZF%3l^McWS2u;`LE@sc1%}N zJcpR!NaxeovZd6v1lvOm{&G&DE|GcaBz#SOct$|IrXDFdqYc_+8#)#K?l50@4zFp5 zUiK+xxS^i&T(W1`>6i_pbpl5TjH5k_@EKa0N^9@$EzcnXX}`|iCpOus)usakdD9n0 zh+?}OR_Fnt*ff*@41;mTs6LZYkoLe)>0Kf3330Z6@YkJ`LCwhm0mIbrUW{fKbTZ!T*ZFUhuH?8@Um4n#N42j5&$zp`g%Ft(a}-T&vLuH?taoJp~MaiOAYDl>X-x%Qg-`xAd|BjzzrGMS~a)Q z!ON`z01|X*Zo%GV67p<MU zmESUFe%_HRSTLt4da4)A2-DsIQZ*j7VY-Co6GjcnvEnIrWOCI|Yer9*_G=F!So`zY zUA?Fi-f9=}Y^*t37RO7$cxd`7(e!g6?Fq}>6;yA$P;v3-kP?1N!@TXS=rg2wnZBTO zQk*U0h>E0VmIHplWw2Tl~xfi{q`jm~rBL8A~4u!^u+LYQwN_88v9*~~i+t-u` z42FW-e)X17no&q`EmQI>88lU8M@#8#z*biU3%(dRyQzA9aplHDK)V!iYX8`+PY5** ziS|kNRt^A%9(@O1))+%RgNQ7pIpHS#>8&Z2vTga$>bX6FF$ka(ac%ARY2>36k7q`d zk5p3w)GH++xfkQ4TrT6Qlz!~k_ed6PH<@Ajgs(1~QqnTtwPHrIKL!vszBL>%dG7T1 z7l5QZSF-nO8P0yMC~5MxKK>UX&yIE@+<;+0o}8vM&nMAW!0sB zCPQN7`*a))_d^Z9v?S*OOI_79!DyQ!zhjZ8yOvJ#~JlYqEPvg~V(NZqL*D z!dZ<@|85s7)dzTc2odJg;7urod-87Ec44DwuO?n9Xk^7xj~F|I`MaB{ zdl2BhsBt^d6?vOFzDM3N0AyBKrK` z0dI(QA|WuxMwj)c_%fwMpMQ1*jytt%Y`-a0{L5v>p_9hfYp z7`u_RHpb*=Q>9Z8wm(3Ocw+%Ygf52 zqZH1^Sc$=5Gf8qCjDdm9LZbPEn6)=4Y^{*A4Tgr1HJWRGFb|$BS&Tm->Lp%hR8V?x z(Hi%jr`_3Nm0?z4KtfMf0~+YO9GNhHBSkSzp%kK?#A!f7mykCbdc8({ zlqIdKPOGT?nn?=VL*xkCGP!25BAPyXQpjhR+3LmJUw~j1KFP3;&Z-*^VsHCj%-^{z z?`S1NJNJ#S&#{M9uL&B~b<2ndnE$f3cZs&wTM)QDIUkS`r>z)Fcm^UG`(SB(l?{Mb z+PQ5&d%K-4rL%Y7Zx*ZoMVz(VE`;wD)5tLe;jMYjY>bsd1x%DDG1E|k(PI@-2sKsM+z5Z_gvQEoYz^er7v4yI>} z@@^z}=94%=1ztI=ba_GQ*~zwRhxYuQF)#M!8HxjGi!y$`V;c$<41yZ-yK#S*>M)s<4(uzoH@!YD(Rf7ZH0fn+#`Y13P=2J!AN6@?mj~v>LMY$4|bz%p_5jf#*UF17eiK+J%&*=BAR# z;hB>7@UHkW5yLi>9dDDMEo@ctCo6a#mA z4*qjcAQhi-WQ4L^)cbVa=>mdyL{0t0U;LyUnsCl+$aH|FTT4A3svF=t>^k<_ zC!PljucbB=v+{np)d-!&WvM)VkeIL6`O{QhUuMwVmIo81pit$Zf>|Y@+3+v2%!Rj2 zTWhKXDaH zxI?Dave=b^MrfXxBApG@!=Uz8W)h|Y#_i;|Fz!0P=VbqX2mj$H818uY86JW=%=bJWo%)Hb zqFtF!k5ua8wg*fuHV0cHqA#s;;95q$qCo_$rwKIX-=Qj(yXsc zqWY-j`e=;a=sVHVrE65K=(^(MKI0$Qa1s=V7EL12qn8ula>ZgS=4@5hG1g*2w{D!d zpH-QIoLaHBc<;(S&k8ZUc#iLd66p424#Z4G3W;Z(4sp71xnISx!gCHmRy< zLb|cD&#|B;iYgF_aNG{*In$hy#&+x+Z;^dIEjU#DAv%QM>#R`}?WnrfKf>NnC*!`t znXid}@eETAV=YRu2(Co-_^~K-1UtuCI?0^ZR*OH`&J2-4;_?w!JSr7!%5T+PHW5WL zCfvX%yc|l?;%kx=aZF8Dhx@Uy5zt?zUY@^nM)I{W59Jfq+uxxGDwXZ^y;lU)T6xx{ zBy@OYN9KWb8_h}6h%P$qh?7_6K2bqm*LgQO$MSKgo>P1CPsm8abP_sC&DvhyiKh1Y z&L8+__*36VDzVh_*gcRu!{kJWamJRmsf*kdy>RiAPy15o%P5xR*h~DvUllD#C?3bR z*YN^fL{?v)qk9X6h50^Hy*WSOCboOhH?g+p&Gl{}1FvV!igeU!c=`Q}oF6*NALGtO zz8E_Z`6;)oXYTLuG`C=YoC&MXE`nX5jk3X>K+OQhA6>Ez>k8ih3)}co%RH?unDe-1 z#O!Ywds9QF0<4e5K!e5No)ceKyw@4qQ;Ub5vJwFibcSis?+_}|h;a9g%!T?7PAyeKoS>aDQU5H#iKg6n`8%0j~B40A#4a0?ph;iSCjM$-A%RWn; z*ff0Bt?tjss(R??JD+@&13naM_8=SBk0l?hgj)bd0*yl+|B)-@u1Wj zb~dt_!P~z8UqyExD;5H9sdu7GgQ#?7LG%1U$Ildzy;*l9^$$q6SUwP5r zN_0p5nUQ-vRfOXQW_rT(&7lY9c^Cdl+r^ zDASPE=jIW=WZRNw_mSl^iOsG$?-V53{c;~CiOWtC%-u7xO+xjG|4rqp9n$*!YJ+J zP~DOsfSY3zyacs&;vBHc%yX%sT3_ZOxf-gck60Lg5u2;R@fV-~D&6riTlvdWF;4aD z!FnOuG0VAv^mwM{IYo$+a`tm-rFj#Xe+4*41pK6sLUSjpWa%j?2qF z)D#m79ZAB~>{1RZzWm5g3Q#`Q!#KTh4iF7G%L6C@Jti##z-%6{l;v|+wbG`T^)Uh; z%wt7bS_NhDb^4Vqs-i9Ws_)CbleSeG)~j-m9>Q33MFw-(tjyEa>9sS8KFkcdgaG+9 zm}Cj{eb}L6@CfC1EIiW?(&BZn8Hz6)83HgyS3>W+GZYB^G|F~g@sWdtfOIq)w7NRi zzE4g1R+S$ojlJTvhz`-rwkmyK6aYK*Wokc_q`Vg4)boK_o`^zJVu)vEhZ+##SpmI2 zZ;7)duaGghVDPPhxEkBq5HB*NSGEuYJ(Sc-p1f7;9TfcwKt$z5+JvD*&Wt8wG{Djo zcCs>hhtz$!MAq2nY%yAfJO=JuiWKp5qurtY5OvUekhY#4RIwhrp;4yImNRO1d7#2< zvcWK1Y8e-1m2p|G#OLVUX)G0y9w+HrL$UvGkqu88s{;Y&!*P|I%aMXE2@c@^E9sE$ z;yrRWgr9b4cEnzoP4mK$NMwHsP&V39UtLr-h67y}U0HeTHK*jbZDPM!{0}z13wsq# z)HW-WY3w+};*WhVaT88?s(PZ77sjau?&wC1P9`@bL$1A1CImT?V8L3n?U7mc5o7sjutTB+O7uD z>t@OTg+a^<^f}yxRD+Jnr@606ss5W@iWfK|M`nic^VSeV(k7`HLFs=PhKFJ;B`-q&zj#Tfh0(*8OF+D9gx6!diSbeOL>KC*0fva}1EIju*-j)6Z0oyIFFBM#8gY zFY~DKCUXbI-zh&5f@zooBOL-P1n1SXDc=gv<14WDooQ}b-suBbU>+0Y8iTpwxB*r& z+i86mdy?S}OOhwP(~>u00uks^B$4YKe*qZZ?I?3(yenny)gLc@+U(@2mUq)>PwuQ;G?T<|*m+e1wr z(aOcr-QeO_^)q~D)c&I&NwD&DPtGCo2$dC<9gkgfutO0@*G@^wegn93WFgoRQjo`0 zwL4l0Z=v{|m+S7-S$^=}!G8h@lJv-dL(yMAB8pIFUi+SjxaW~+Ai*p&Wc?B!8f}`n zX4KHs2}`+vXX3szTVFkF^OW*KZ{^@kKjz^JcDe}#Knhf}!;%W_T57^R>P&ODPSFA* zbmI?=1?I?Wkx7qDY@NBuYiBrT@o9_H1LzEE?>N*Ovr()Mc|e)?d`n}#*k)2V7-H-D znm1dxF)}PGc4BoRTKyr8obDQ+4+oZ5hPgBP;%1&1sIqip9oh=&Fj-vk>W3{Ptx_ff z`cM&zWb`}f0^z;C0F}3#P=m|f2acDRNGY*ec^H5j^vqI(u5lU{pbyqOq?UIM#=-Gx z$jAP~>8V>IeLRJ!RMPfjlC`(n#Itx@WNI_dy306!)08Yh>H;V0LtYJIo)hm5T{Rzm zXJ2)<@=FmJ(T~B^!jwUV<5W8^X>u$NwAp79=4`Fw(l9kc=BZA6bTR^EzrO${*SN<$ z%zqR+9jEemE!SfO9`VbtGPu$m$Z}9}$M8W$P&{?3K=u^W126-0)`j3rY;J6NR)_)R zjY#kDU}%W~H2wp_Lah@S?LHKGg}Yswh!FTl^A-_IjZTzRwUmwKdK;QN--Lqr1;HOa z$#;x#GN+K%v{QX&_4wqb9_fe;J(%kMT@ZEioZ#dY^};vFs+Ay}#>Wb)KkEPQv&;SL zpGQ2IP%{q<`FHe^&p)k{kL*PbYQH2;#K#qURud8GJv&|KKAUNeCF`vYYLN)_pns+B zrsF#on%--T)?N!%AE82=D-}F#4I|&p`rC-f%Vqa4WP=S7W*GF z{eNQCI%Y}BoR`X0{&14Risr1`apei|=%R{(7mP#}f+z6+njtbHrJ8N3_$;Ro3a=3F zrlVy#LPjNLp&HV6wH^#<+0`E~aCoPs*a|6i`*c*)4%v6Z2Ad)nYqCjwC*RS)3OtFC3is;6$Mwe^vsqm{0mxGLVq zRd3&unX((gHd_+oyjMxE95NlY>RRhtSH6t$C_ZunkbBnN_59xIdlqA;K2FEqe2TzDJTpBP%hW6Lsg38s?$BUbEuG6R=* zefh|-CSP;-I^d)Fx93`VgOhGNR2I$2*ds1<5Ptok(9kh%N_#w@DW1%GJ#8aXmX6ID zN9Y#`a*W_r^BMH3n)02F0UG3jvt~J=(V^00IS%G|5wwJg6#|Mkie!B>`Fmpi&Hd@Xefv$Hzn63;(e;j2PeYhJFk&%<2gF!O=iKre>VmMK!Zw zP)2VBkZDm>ExS&cgV>}sH?gfow$M-5V0;-tT|7}PBMT-=#1}NRA}=45Y7cq!RRGRK z^yCjNHTJ~;j5FAREu|)S%!O}CfNZ|6U~e5r%ef3?XjgW@J@OsoYq=h4B+_o$H%HMH zq7h(r$cS;=-$2q0Aq8w+$x9z-@o0?^`#sDQt9_)veFS;qyg53g^Ow-Vup@W z`?Wv+1Vr)jw>9Zj|MA*K3#tv*&j8>RCK$isGvsRf!8f_iP~Xwj4fYcAKeKK|RA(Vd zQ9To~s<7$ru*h?KrvC zPl|Dh?f9TApi&aNV{9fsGu1Jfyi_(}LBWXKR3=C9v5GCEpY-jv4;p$qPvvkrs(6yI zof{!?R)KJ4JHsRydA#{v)Ts$>&;jLYjCn=fE|nrsKL*w#0)VG!UNBLqs@|pg$iP?moxh?qF3U5 zG}x)$ox)Jz2;Pb% zf1OVd{qdWW>D-A>v=A_!=#=s&4G_g>mZCBFqZP`r_oc)yEMOPN6wQ|t{1b+*qI&_g z|0xJ4!UOWz_xVShf|Y}5Szi`~CO&Ic^BjtE4lEc~BIU+Qr6_vgZ;z<(J|#2qygD zi(_m|IG`$-<5`&CQ+b~&HX6(fTtpDW;&pRU&4`6ll}t8=ujKyor{%NdqYJu%H?TcQ&nl8wNmDVvVV7&YtMM-7_hot3VKAt(MW^9@p zzl$!Xq;HSVgMC$LGFhLQ=Zgb*4pkn~DM?{<1>&MOz|U<>^)uEXXfm+4Bk;$B5RfRd z%+j3^M22PRZf|TE+u{f{=_`@{!w<228_M(>+Qu4v#HiT$HHvPDqoYE!cM(J4Yh|<{ zJv251r`Q_>ttEZP1MN@pqS}`AzNIWfGyY<^txLX?xDA8Xhp-r4fI~mY;3SVHUK5DW z`aqT&E9Z)3RwK$|Aw;pE=}GyELMuSD;e1{cyCI?vKR*{Gu_|KxCMZ|uE==u`TMefJ zq!@rgbYn$uC7;&dExo4V1if04jK&&Bv_nE0ySytWPFpnxuFkv18lNR^!Y)4A5+k8) z2rpKBVO#Y=2fZ?}z7@%uRV@V*79YETvdXILa2#@ew)j@>pyoz+SQQ@(>aAagFxl?u(+%iy=n2$U5crXTqW4maqbMS_NGj z%5MY~Co*VFwT&LQypQVVAllCW@Ne{${VcG5eIQ9hs4B^Mb}(oPJ>JVl6>a2Wuv77! zGC(;dhMSFJv@6Fo40pjAIYe%ip@*^~2&JdM>80=ZYJ$skHbH$SSObxkZcpOS+U!HO z(v&6D+f!{ebG4u=p44wYUII|FwG*s~H+&t1gkzjb4gDazJLv zQP%A2VNZVih7R2J2y+BgHf`zfcvG9|loY;KOvdm0ma|3W#T>>gL<|fv7F`&y1?yW* z$|hS35cwQ#3ytf((Aklvk~c1WTNg6ml-^9%G>@iLtl@3sv#u=jFi`hm) zTN^tdfB;)TpbIcUD_*;q{7YM5(p_e3t(CilU_vpE}ANq0XJG1gt_5}7K8)qjhY+X+d>bx&saLq02iMT<@#VlAcVTOR&u3 z#rEt;XJlJ~ZyjUrabP>}Kr*E^6(dH(<%~cizBx_7RH(K5ved#u6@+sJeM{ceVhdE4 z>tTUBZVL1jAz}0n22xXbmwxKQ1 zbXK-2Gb(hDkrtjhHmiKE+Sh3@wHO^gSfNRftTQ02B$akb8)2%UT?;* zz%WBq)6uw%ZkMG{MJov5=U^32P3c#!O+1ze+8d+TNW?}{KlWjUymu2B9<0utZUd1T{El*&m`n6jTeDX+L?o>5Hg2-{HO^^pR=T?;1no~RnaF)=lxSApcnhXjMW9t-Kd~8e0GQQ*^zd|<8Z(ZBVDp0}T zm#oLOWuMcJt)%8@F#HacW1Fl~Zw2Bh!HDVdWP=-zbuC5M)+kJ6b*vMl_@A}u;dF4cgD^wS zg2{^@T~om(%V_j?-@M6CAzCg;|4srJ=ZA4%syH%?x>nZD#X*>yyj4#JQloj`z1Q$) zilJmxDqeTX&edw9xMfWfS+iWPxa5uK|HV6=^OywzrDg=2$FN#5L;>Ta!D%G(JtaA8d3tnv$gi zHk{*IXLnJ-*tLB4Y}?u)#|WS@T84ZF8T&xau6Ft~_aFTKJ1fkAqIfRKG6$J43D;N* zFhlR{n;5CofTbdKta+zl)eLiG&{R{=ui3dbu=mNJyQ0-Q2}U^@qjlMWUX4DMlJ(3N zE^za;Yw<;5bPjv=TmJ0N6b+y>psM#w-!1T$%7brtjZs?_Pk8c2I zi`CFWnSJf0EI-ac_f?j{!63)#G=fxT$-eEHfYQDu9{W{ApRQWw@PqEIk zouiJucVW$^V{K1Gc8@j+^`4$Ha4cC|TBN=Er<%+BzgG)zndbhxT&>OPgjrar<1355 z3m7cVzV*|uGj0tT=ii{1`ik@K+{+)^*Nc9cPi?qR89L3(>$1)bf4duE22*-aJaoGm z_kQl;dw|qC@13x+%jl@|VR6t(>dwO4ElZxRy^M7Ik#pnb=>}1mCOAXe0 zClo2#r~StLGUoU@C2jLuo{ygHfel$emqUj)`P$I2M%P|VSRLr~>U)fab z1f5W?yvfJ0B_$P;;S`@A(t4-?e|4PW0f9;5GL{=vyaP13eai=#e!2?s@A18ks$?#B zdGuM!(e1P6OwA`fc-`980{7g=j1NgiO0@D|c8WH0uQzdOB5JGXu5>svdfJmkoPWro zX*q}rYpK%Lr%g;6_}r);46|SX(?|v1r!{&h#vELPa$@XB<*G-?tzEfHHVAe1Y&p!o zXggTWGk5c|Dl3Q4)(eKMcGXyc7Ue=dn1vvVU!P^%~DX}R6 zZ^i<+0TzEF_PlS2+hC2Vg*|I9nS48{^t(TmdCo7%PyY;UlfzGzSiGd{v9^v^c1p3= zk-@;=CyUoJ|L}I-UTzLYs27zluRm?px^VbSq4LwL?XvEXUqKJ&I~1l^ZiaqrvSohV zJ|xS5r2+wXr61{D$$g7i1w+M$`(LaQH#6I+k7QDqmRS3m3dU&pKC}g+x?18V#Dz6P z1ujq7*~s5TZl-Ywd)qTjsD5-xDV-SlMZq+omL2wt6N+;gaLr1*S=#TN}Rb}aBm^f3Eig%ySq61&EM^KG33=U(}4ZcCpDw^{{GSj z_ebts^u774-j@;Hw{#5aMg6cE35qcx5A^Z(NT>d+yeDr5(r39n#aA@!!78HKx#Ubd zJ8?IcOK;x)&c+BmtO3AKSqgSJj0>NAQAOt>zUA$Y6OP(`+ivyC2^(Y-+G`WxL_uc! zpo(E-i8Iucfl(0PLG8GajCQ8aPsspOG``B*mgWqQT^+*a#KzpgC%2ihWsWjO4)fpU zi3$aV6LQK(wlsvOdDdd3DoVL?(5DP`c!iMTR&~`5{}|8t8aBBuSRi?^edZo*U0wEM4r3uwQz{RM#hL&}G$ae0a87tYJRDpdPo zrkY4|ZXj=U24=+-0G#?K#%5z8re?simR4TFSXu#vLZ9Rqtg}PXc99DP0&hMA8#&YB ze^ZHz246Js{*!bwdfhu!pY~MmpBfsfoF(I@LbcMU=hWG1*xfB=tCVBn%H2yM z8?aE!?1mRNP{&?O+j=^qe|qctj8lPQ_hCEcsCwc^?WqYf_VJ_shZnO?Pn#BK&V7iT zCyagK%|hd%Wd7+tKc|~VD2ct+Y@tTfrs5Of&Pl=RjGh!ick^Z(_ND6ZfeeSD#j*pa z&D6hv>twT{f=b|J z^R?*u{A|qzBSu;r;V^J1zx*4g~iPCrK6UOuspSvzEvpimahUo{Zg*ic`ppd|a^41l<7CuyMw&MI18T(xHd?_*7B0~r ztD^(Kc$Ox#F>m*N@jq|$`xx0$@hB#a;zOJ-bF~#fFH&_Nbi{Fc{`fx*fBcg^;$Je( z1Lgt8E`t$BfkwjMaIp02NuLY(FP^%$)Vy&P zZ@Z@Z&SvOAf0W3JyN_=tfzF$o{^5<#uSgS8OKt<9XK}?%0!oRax!e_?4CQ={kRswI zu>bYC6l@8Ck3L8Sy4G&;xEXFA&LNNEp7O`M-yX+aGnrNY7VRlZN?EHGzTTVsaHy%aC$&7vt!6&dwf9Aq$?MC7AAq&d}c-O&L*~oa8;l10JF)ev^ODe^see)|*P_ zBT`~bx0QEan%e&ZQ%_0XyU<$Wgk-Em&+Ks}1F9@^8-9ix+P){KOTe;lW2S!*d7|(uMtF=S^tuZ9)fnFEWL^2;jM5KDRcmg$q)AI1m|MnRgFJ*0ca&#BtSs* z@$T}rARvQNm&}PNJiPCVLW%eIY1>Kmvl~CNFB;$kwerpS!i#{VSO)KjPB3p68ESXY zS5!Q72^^VY)&n|f{v4EaPTDMp>itmR$M2KMR+%Nb~;Sv{FRluy))B*I1<7=-cgOc8o{U-2P{DYB=?fGdW z3{hz&#-hsm(u4m=86G!+wRdnpt<1#3ThWjH7r^%~z+v1EbfTzgjZRBw8I>ID3x3nq zdma)lky>#ka%FaI0vb62zhmjUf-L5K)E$FDn=ShlZAJgwi19SiSAfqon0>)Tm+z-^ z0{3MkX$X8Xu}PyJsSs+z@`>%L!96)zOEf!G3Sy>pG;I~%i5*kSgG9PfQR~``sUB`}_J#Oa9IE2iep=ihVk?U{usf7kPy*@a4gp zXH9tKmn+V@Id523q51bxHcmI`dq26!%(hz7D2e7LCFwrxFER~7hs$I0cwlfJK^6?t2j*ASjZ|Wd68I$rQqUUYx((}m%p;7Vae6@#&yQQiF)d#hJ4o& z;?qZi&cNs4GnxkogQO-!?k6_w%3^m@{xR&UV372I^>?Q_-#)5v0&F&-(rmv6jMlaeVs1za&VeXs9L{Qi=PT5jIBz>y*Bnfo<{h%){f@Nm=yyO|6sF2u043 zWluN@&BzI*b-m@^Lz-?P4*D^upuIO|&2D`)M(*dA*R{O9Q|8n7`&}$W#(=;dAE%BEsm0zDQygPRsBq2O}Qc?xE%v@ei zkN!D(lB&20to?A23$M-RkxY@&D=J93eNKJ$N^<>MQ0K~D0Pm*9Lz_QS!}tGd`2hbd zi=mZi4M2y26A?zPKBni+(yQC`?>{=fY67VO3vkt~d?bB6_MJ6Eu74QZjoGydrr{_=-+E2lk4+;!-gGq2Y=H}M3lxL zC+>_-Qt;Mw#rlu6I}k_c%D01uFip`vcGS(?Z$8KWpb!&*ePt9KyOu;;{Vt+Avd-w9 zr0xn(Spatv7UWGPW;~hJwxtpWJwNCf{v7wu2H8|t#!|<2c)l>-* z@G6ysKCO-iD0SxuL+H<>_l_eJFEa;wVbZC)HyrsKVvxv>+`I~Qo=m7^vc0XAxeilo z6tk`{*YUhO?9J!o5@$RsSBhq}hjydquB0!zhk^qY?z6VIRPxjSa~n4 z9zziqs1@<5|9|*4SgY?se?6*tR5MQfyf65E`~1t<>-O~@9-SMT-k&}nzIpqr^P#XHx0?9nDkZ|lH=fj^Tm_j$y(Ld<9IojZA(Ky{imN25$D&7v5waR%Z|zFpeyh)Olj=>qxVB zLforO%1AsKM|$9|rsT|H45C94cRaw1C!(ak{9g)`-(%T*xXUlr(qQaJ>)M?4v`%I( z^}UEA?m@wV(dVhJ88rX!V-FN(Ueccx?#-+Z3fvx3VTGT5iT5F>EX z>MOJ4p;ecAJobO^_7rSYEzusjyTe0CcXxMpN+S(=5JgJ5Q>42=LOP|p8$m+4Q6wb< ze4FsTd*2UuALsj+GqKj1*>h&E9`n-Dr)v=a2!QmbEbp)3oeV}d9tdc)vATo(FsJyE z_3&%t*0?Sd=`bCrG`2kUAo}jsI_%uQfGJNT^Qy{zDaezV_vC?G5I#s*9ddHm`#hYJ=IVzwIZD7 z>`V;O$6Sn>We5PlgZii1F-#53-ur)Z3e%Bv)C}7yfg(|Yib*@Cwz5BIfB_wjPKCb4 z9!q+AG;%=jcc=Y+^7d%+r@$o|1s6k3I+Qx!Q6>Y4x|e3rf}&t>ylZ;6q>DAdFU3Vt zhT)GZ?j3xva8R4~FSx+ii5U%n#)21b7y!MBASKCF8nXPcv83Sa`ixo)OkdN^=uvLF z>->AB=Y}R7_Bf8SQmr(>2xWTil4hok@13U#9~nRI38w8_Z`zW3A!W%3kyc z{O_*P`b0^Ucd8f3M{8Tyz4WuYr@}0#kjZ`GWjTqYaXs$LPfZozmRtby#~%pPLRO zJ{z7_WBjUQ;Y!5T_@PQItIC$RL;7X-gbk$mdIMq5Q2_8wCVBBRYWOiLkeq^;5M0a- z7Fs(Mc0qH}mH}MhIf6$|NrP^uG4FfDt*hL506`I96jT?PUhiZJ6)6}Xchv?5j>J+7 zq*0S=0oJ@Hb_n~!8v7XPKNA7S1_n~w^H7=S!D(0VhK@YcERZ$Ui8}clX>d6QOVEqs zY!Nu@Sh&ai)-o~wHg5=VPGmZr7aDPAPhxcScn$w-7n1BA#%BB$vXbceq~IGhZP_P= zL>slKfZnnXNn3B4X};bJTT47&))G?Ml06`GD7CY32A1(Ju7=Pj$Xw>mx96`W(s|xG zdY=!*e;y(lxvoyVwB|h?eYqJ#2<7|07T+nA5}Wro&gXWSULXsIIX0_*#2CicYB>{% zb2RBR{mJmLR46+G4W}R7p$qb&os{3bwOJ4Ly)bB1ur9-JOn&ao;Tbf}402besOoJ2 z%K{m4?ay}LE|NUU$)q24uBmIHnY3>w($}R(M$C`Jd(35{Imr%0fS@8I{b!rd-+p$r z>~d{Zc?jKaBxSP~3D>`O+Bz$1ca=CNKa{Y3VOSb z)lD9d%~4DxrmOA~uZ?2Z>`_p@594LvE8KU_zErPHL;z0 z<;h&~mQ^vYkFj>K=IDp#YGPuuJscP={HLlM4|=Q8=yx!^Td%8Ew1afU1VI^J^l;PR zhsz2F$MWnM{HOFDuI4Ni-S4wzi>|4fUsD>+LtnEs$9CM5OAbwm(w^TrkyY0NmdHP# zlBnN-KIUsp;PR6g|}lTngk`Z?KPx|(b^hfmWJ zO;G$bRP_wB@XbZZibhXXE-L-E|A0bw-iTu9PkRX|?-29@I;%-0WIAqLP{!rUu#uJ1 zocR?QhDueu8O`JFOV+zbC;J1ea>wCPd}r;;d}LItlaG+UiXf6c2{#D-Vci-{=_Agh zkUXl+HJ`Mmh7An>I;bJPEJot*eg|l)bTTLD ze631e^|Qgh{-xd<{Z^{^=qZ@QN-4c)I!kPcO=^ey($^H7xF@SG?rD!4sjOeJC(-3M zY#s7iOT1@N#Kr5O*tzkFU{ZEHU7-Qn_TyZ%#@?>L-z*Yt5%%$}539bZ|I7T1Pvx6n z0h5sM;;%LMXUEPUFL@WAyjolxGs~aXG8Hzo#G5;oC+pv8jvtsq;@zC%QMtAtr=t8r zh|<*2%i(&lZ61u!mTwaq2UqL_L3%4AjQsT(m~ELTNru|Vse-!Id_=O**?^o89B39k zi5Hw0EjD^t9((M(4xG7nyy2m(8ll0bga$48@?~_$Re6AD(z_?@@-0j)lrKsfe!VK1 z%TyOiojM!#pAvHu0rE6C7uj8-D*isE7OzA2C@lf*^BW4PY$#&T6m(TItzfb}R*w_c zA=D661EJp>#RZ4y50+!$FX;woe;H#d-op^32+&EO(tC^DaT)f>amv|(g>%}~2S=yC zUCBSH%zLdF0`?C0-Y9~**H*2x;DrxAU{*s8n_K>x-bc2PVN@pBWaWnY~f|i$ULZ_eN?RN$t%kfDv;>2WfK<4<;CScNe?VOn!Rv$)bRE zEzWIJz_jp?ZQI_0hC3g=Be zIZOZkVpz&Sxwq`rfsy^L{X|ud;gCGAH5B*bshD4qx@7bTlei74pI*TN3k|F|DIq#B z@VXJ+4`kKuM8ca?F*QG^5V4pgtl#JtQPqR5BuKB1>CcV(?v4eBo?#A97#-~>U38VG zr6C>xeBVRpkF?g?Olgn=bg5Xw!-hXHs29wjiRVG_L5Z*b99tD+Q4K;}LL#ipJw#9) z1Cv>?4Gk@E$J?h)3R-z^P6x$BL9ck9tqqip@>3#>LWdm0IM=U)yl0N}Zg8a1d^v$g z-HT!{iiwH}CE&e2@B_pD-+B(~7c)@$e{BctE0QHzZ0}Yf^Wp=akjpU1jmcLR_(9cNMCm;ieF^Zo4|FVJtNhzSnOO%{GAah~=t|*+? z-$jmYffGnTdiUq3leQqVd{;VyG#NFk#aMoQ4jY|F$jT0Gy6!0nO=X0?QXrR)=;+M~ z*$+QByfUQQ1o(Bft>HHmcspQ%7&#_GPs$7d zY=}~xAHyLSpy>=I9|rvDLjoyblx6p%oA)2k?K)i%oYEiAz40x*Ne>F+;fT43de8*= zhy4MasJ+S@U@<5)?+qKaxuJPJx@gaRI9nggoJPD6-~{{f$4mFno%vi6NJj2)Mu6tP z-*evYR09r1!Z}Ys;q}m$1m-82>9bRNn!-kwX1Pba`7ck_-Qjy!toPtyUvPx-dXyQhNM(^77^v*zZEML{4FM@GhY6E569XUD^U2_q-}o-H%_fV% z^so4&Mlvaw5;=!{J||smq3n4E0#=3wca%zi>7`br$i9r#&o35&_sSr)w31`j^A>8!;+-+s*uE@l&)MGJ zz1_3a*Wh=jCOd$fd@LGFK)JqlJB7u*6MZ|1nIlpgQCkp!tXOSeX{m2Q6XTBIgwO+b zRgRoWqSJI14=;Ww1VJZjw#{;9odd6BHTYg9TT&g<~_Xg50RREqqFe?WS3H{}?c-9-Djq z0f}P#$8=rsTxBJ_L(uOn>;cjuP6I9E|Mlw=*vT)+4-(Bjdkupdc7|wN+X6SuU~1iq zxhAV0esZ#7rQfPu`XciC@`O?BMJoCY?SsaVwpU5&xMbp`x!w3zp=%4yy`Ht~Abbb92<(vg0Bv%t{s56wMbv_XlpC@LS2k4k$EP8g)7eHe< zFXjSGla}QHEMXlW>q(@LknI`JxFY91ufw-W0h~VA??##WKx+s`Z^Tsx7oOqOpb3=o z$O6z@?J}|ehJ&7L%Q{y!>A&t$cB`VHk945>-Ed10rwn7$k7|mY!4D*OUtXVtYRZKZ zQW1yL!%e&!mOs{sa7y~I9%Fk>n7sVa%2*s(p12Vt%-;!_1mR+Do&+t2>VEAy?kU9I zHbuO6U+_dgr;5+7LqS|sNtMi9)pFqImzlk{1T%J-DU!0Q@rM9ZxTjecW_JRT;6(gb z%B^Qc*Og_V#$LNdhktHq_Z3~umJ&_D1>|RD5O3@I0Bgs2%ETqnNzRyr?>r1L6!*l%l&digQvpY2_|Q6Ye9%fB0N9cMMmdi-7shwm=LG`P9nqsoV8n3E z_cK7FMFsAm#E6Wt9!AL2!^{Ar(0J{P7nlx=nqmqgb2LoQ=&vAmz7iwMh<^F92~WJs!;j?R ze?XI#baMr4R0Yp|V*i*zw?rqk$1o>nJ-Ydbw^fHPxg1eRjZ*v-BB--X2hqy9y%LY`Lesggns0O{ zcKsc%U6!wSSZ#Zf)UNxRpB{ro2R)1=PEFy#rIr_lr<%u-tgNmxgrns%uhuTP%Esew ztxHx?Ee$shF8D_0akiBd$Rf;;keo!PW@4IyYnIfyf30%ofA`%ho>gO zelH3!*u<*Io`8y#LE#uCov@0enndqHHNO|Aaf>AD1h&z-ObsRP3bM2k7L-MUdyDzP zpl@8S>i&R42;71tF;|Exa`}k*LWFI(EA!wxv*<9;@9lIL5>B3Xhp}vOB7TO*opek5 z#zcMz|L!Ojj5vHJH;y<%98COlGB9pCg1<4y%@pdpM2MA+pGAD_KC}$B!oE9`94&fa z{bsucIuk1)zmMI%xMwrrT-Fj&CZakmJG~>%_SrulvTj$>3L&oKFm~G;<;MSfB)Vdu z%9v^NsXK0zg2XD<7CN~ZOw>>evkd*&!;5S6DxI|`R8hIYsX+!_F}ZQZs(Xe!r=};i zgQ7?!5ZCoQ$W~3tHYztKD#0?#RT#J1$qH6j3L$)XN%%k59}4ht&up(GC}J@S&!8_> z5d)i}_!T@UbG?!~Qk}2DX|MEOnY{(N~ zn#J3Kzn|=da`OFMlw3Xm_kR{eEDNksP8WglA#<9Jmy}lWk(dXP!0jM(=CrnZUM_^& zgqWCm;Qz35?~-WCwbRx`VwUsQ7_Ts}d@>G14!@%^_b^+#>jA1OSQqzqm;Y$BzB4V8 zpe1P>7}iHWEni*TXM3GYoo&O9Az`)oH48$PSbeV|&0$8tAnNM=>8=o!Kat_rrV75> zFC?{gqq;bSnePZjH_*SRs{7(Fc9oVzn6h(nN1?p0SsjS;(Q-wnH=leuFhp&DuP%!c zED}W7k6W42yLy~A=~pV{>+VXU{7lP&y zHpemyj&C(ID<_Gsn}I&WGEOxjg?{_oev@--fluJ6p_YPdDZA-&4f^$$utP*skR`_A zY{ZRsvV?HenX-YINT9coAP#86L#}xo>yb&u&Zz#GXOLH_76Z;t#0rv35|7AI-uNT?@gK1wQs3C^Dlb zd_^!5$L>A+kxT-=KDsFFYXRjugh!1I=xBObFoU5N>DiARa6w|-#D9IW`XV3Yt1bL5 zfmw8D1|R8&J?my5T5BVk97E!c{G7z78(ZJCEQFXAt>j1K?M!Oks8CM4ax)FGEbD>R zK@9}*@M-OQ)x}OCUVBvBtQ$CCG^(seJy;X(WQX=B%xWak*|KR77l$7(}3EdZ^ z`l}~H-!54%&NR1EJPk<;1@c*&=OaEbZH_n}p*U%y4gM$te&OCxml1?{5#jmcmCiEwLvz2kf~AzaOBaZjU#MsUHXrOgH(xh2fq5+ z8ztVZbQ@-#eT)x1$M?H{j!;?_hH@Lz=XN7|*|5AC&iBt1M;tKK*i5fHTJ35Uvp(fU z^on}oE2$8XeaP`79oDyI)(lJ|bTdhbvfNEfzci}w zBs45I!ZYAP^0C8Y3~3NDu~5ZODxK1LYkmeEr=EUTjUI2%MQV{bBbk1w9)J6!+Hfh? z-FF7XNsL*|4o*C(K*_3}nbVT@qxY2>vP;}DUTgjp(~Lk)K0DWZt^~!400frC6y-hM zk4w3jmRGiM!01TeTg~JQ2f;e0;h+|aQ}1BV1b_h5an-!{vTH*KB$n$}{CN~oDg1+S zR1-EG@BQ>p*e-UEoq!g(YtqOb2)V(D7iuq?u-PG`UhWX9SS`svOH?ca3_U|i4P7AS{c zJ=I&M&l^`h>nIYCCWxO}l_0k-A2Ikt`XGB)_Zu$P3w3uHH2T%=PL}EhNwRRSIW(i# zK%lIn@9y9iKMK3V!aBK*kEGe@AkAqvQwZ~@#ttqimjI(197S}74)`VD-dMrsi*in{ zULK2VcWt$&R{)&gk=7NRlUJtKe|8+&Aaw9y21)~T&>X4R_24%4A}T?P3X~j0xE;Qt zVWls=kbo^g5MrxZH&&a1VkiYP@_g7>nuXK=5^$d+(P@*NX*m4n*2-u;2IibCEdmAs zd`(v0BgAy30iV!FuHR3Qp3K0qqm!>~V+5%qI=BM)g$bz~Vf>Q=j2J%Te_E(CVZ-wI z2ru3_ez67B-__u}8ji9Uga@9ljTZN?@kehL0Z ztOilF=gh+1D67Nb32*;^Fsu3xMU&^=sEp(gE-uIZRY#O=O1#b}C|3pFl(Ls`lC$f@ z!!r{63gl{`Viwna%DReq3~&=9-VowVFZS;dKoy6v^%*toS7%6 z8-LUy+|-7tW<5@z+8GJ5W!GWaX#+zw@by&a+k$E^B~`~QT*`XoFK>=$f{oDVVVBRB zk{!F5&0sP}|M4+9wL#cxQHUp*Ua1{M*oLV8R%dHw!0sPVG@k0%MKDS7f0@i$dmsdE zeWL-^f=39c*H7P6F-!~Jf|R)vmS(F`4dOtHQVw;XCe~vQBGv9h%K|AYYWm+ao9kF4 z)gs`srsRE+d?ihHm?s)fz}MJ&bYEkZiD=CwW;qqa5?0R zWe~Sk-IJuyRAKLXr(% z4tBi6&LZqTupJDQfbyMxP06XzWwPXde{-7 z!F)(>o7J8X5a>&aFc4w;aruWPqm6||zDI?LoAIl&e`eBL7H$e3Do!lky-K%q6wlz( z*(mk1vYuxRrr4F168$4`V+}m}6pp=z!9uC{2MUTxabinF9R}j64#of$YPokHm^G2$ zdRZZ{cT*t=R#!*h!%h-)D`&emQp?4S{Nqr#kdy_kunKm@_K=SlnEFz5iF(!edcD;= z?^|De7mP%A!5Bdv$x^8shl&ug^;;a?-}b&Vy0bt2)rq~y2p$WhiX!NuN0;@>??JCl zV?Nf`?^Lz@BFGb({j>jI-pyH<)^bMw-4l7WJwVo?(4{&wD6c8mxfp0w+FJ#IaETE7 z_^{NDD8kv%l?T;S2(lC3>?$c0=E@<1Wgj+~dr;^;>;-%W&8~#+DB!Q2+l6WFk%Pn{ zB)!2ONy`>VLbF!Ik1Rst_2ka~KqTR}?%_+oS6<*&DX_y7vaQ0K%aN$r;SH=XcT z#vAu7&TS&AlV7&Uw;#~|ty}r3Xk|SJ&;z#^wJ}mhW-$YlLDD48^gRffk6|maj@JEu z{%eTSBI9B&@hqs3^%ALka+2@ZXETmFl7m);~z0sHGyJln>RmYul$Lp#u1z{<7iu3AWgTjN@8G*pp+ts ztgvdAYJkhXrA#Dt+64awMg@a`b#10-qIm+|jTXK_%pK@Ilg>IqGm~lnjLk2|2qAgL zT}o&jq%21kRT8UTn+;gSb3OgJPkll(D@cV=PS{G()m&ML??zCg4A_zcXz_qDYCTev zUR9DqiFsgB?cWhIKk)~I0mMmJNYO{-f3W5$q^Y)-q*4CRrK)AMY{87G>e5m$6OhGM z=fvzxDrZy+4G&)R-yLIWq|fjFmK=+Rzg|Om?|SMs;htZc%ej)8%K?(Mut6 zIlA&b!l0LY=K!h=bNTfhuIXIyB5LY$G=cnr4g2pE*MDo=9DW*fynYm~_OTKe_&LVD zCAQTSm}$Byukr>gVW~{I)uwj7MDn6ug8=`Usl8r*Ikh@WSmxQYzBGTmEs zwhIr(9m%l{v7505p*zwXrgoyB@UBe)BksxuHg=K9@m4d>6yIBIgMUPxhzDTTe;_=j z&J>QOvm|BjW7-}@g`LH}?;cVH(}V587_P&2nZ!B*5P<{Gf`Lz}6Z5H3X`V(PI8w>O zTHqxju`gk~pGD`_c2i4ETiyWB6|6cbwzVLK--38T?p0!B28%C57LgfhEz}V}{Sqkw zsj((mOza~!|CebzP+348qFNIZzbV1~BMROC28}|z`+%EfoAYq$5!F%M+_hyJco3JG z|Hp-&D~FaL@kq{e-$_f?3!BA`wD{5>Yc;D!$k^`>1X5xN(wL6iN#Twl!Q_O{tJe4p zM0LHk3VMl2u3%O5(oaTaUS!VkD4Q%Kd%WYW{#=2KP>EGxN_) z-Pt0t!0wNyRuv>5!ewt0(_H@I9XKKnv36t8gYi4@WlZK;cOt_V0=-z3v!}f+8%q zW~%+7gzy2>Y=V~~#uN5Kcl9ZfgL{#h8w4S@H>{@oh{@m<1Bk6l`el((l+jj|&0?4H5UF7po6`CjzFmnMU3r~do!tHv#c^-GoW zLZ6b(E<#NuTJ8=$s*p}^JNR|!)%ONYCFqd`h?Rg{^KdgZ>0tA_{63!DnVXHm(;+rnV`#q8qyK#M1b9Zjj%GjtOnwT=c1I-)Flc zFze+ znk6kE@)XX12)~T$C>~4Nr2hO#`8y#a_n+O|BgX%co>om2ZsC8vAWRBAO<+LMhxPz@ z@4UWC1@=qe=lamnn?p6mmR?7`GuZ3w<>o%zp{4>;+$sfZ|-xMJmh zIxS;+xmfwBcFFWLntmu%ow)Z^2$SD>h2yC1u~gI3kHsnqq>UBZax7e1#0`osS)(6S zu2tz0?y_`U>X?Y6%YE{@4q&E=lV0Sd)IzGRXEEM_Af--`S7+;5* zCq%#532tD}<}zD2rIq?W@G5NtNU%b1-$7o z_QF;?5@E0!w9m;;;QbHXT90Xl&(Ceyo&TL3-iHK~kT!#HKJuCTsM0>du;CQpX+OEK zTmUaInOb{+;ZAn87%>~PuYGj%>o4m8>vT@6xb9@h8Ai6sY^=4HWO2X2@7I%CQPzoe zHG5E7bf|whaT~B`dCZ$}^JO$PfBB{s#;g1IoMEd(ocy=jO=vPImzi%ajWM{olr&Q2 z-wi)o@$%ig3KfWrX^)q$;4(2LKqFf-Dmiwj^C5tl+5KdNqXfoYwxEik*QoS2!00Bg zx`e(cPMaBU?jg=Lm73e%z>JC%6kP+~WNw zFGQ1e(y()AHtwt%X?B}XAC9URO7aypwU9?wvDGn(N%svL2}zE030NMeQuFw|D-${# zvF~{A-I=$YnkbM#wf@}|qeNj_>btuHC8ai6pmD^AF_g$7&m@``YV3@YDQAH?JGJ`S z`6`s=Ns%`+=11Fx=60}Qodv~+AL1^ZF)x0)};n2 zYNM*iO5VCorc-n{3$?M2wrqc6X6JV(K2q(JgF=#m7PEbU$iyEl+ zliTNl>jeIlX{GCFhnYG%zdcT1La2$PiW=Cl<>Ag0}m%zYSFKHyPgelI4 zNU5dTTLPEq<0NAtNpjRVi$l4%?_U&(HCEq5We;^S3R=L!9kR!!xJu_ebFW!w)th+= zG2r};WLe>gw-bH1#f^*KoX!7YRy?mkqoGuMC$3xrZWAIlT!~Thv!|Qzr}6kMn@y0*UlD{)7hutnViqP z!?M1DI4LDZNNka83EZyHbgw@d51A)4dqz$}SE~C4{`_Y#dk#v?q?RNB-X>nsL`F4@ z;Q-?r$LD91iw3>E2k&_qGs$pI6=>{mrPw}d#A}a8Or}X1BKn_sJA<*b8#$at9m^LN zOd9LjoE^PZ7sQWm%7Dep>Fqi;wpdH?*+SUV1D@m|^6U znByv2oRJ}Ee#;iRNL;VE`Z?R>jHyvbjqN^vV7pmxslJ-eaBq!ML)2>wpW>e+45Jtf zrdc4EaGyTd8G4TfkdP9?XwPv+{7;Hl^*-1EQu(?zCob5o{04V`fK>Z)p4kI{w5o}T z-~PRn754j>a}HRMSJ@s=dPjwwWRNU)gkgYg&v6Ht)@nzut@#^ZSa1}>%FQVAwLj|R zpm-zn=syhrJQ((*k5e;GzDb)-jAG4d`*ELzeQ!{bEN%KlmhIBK90eeAgsz}}0R{%G zub0Xmuo8ELhyts!P05OIz$xUHCq%o>jsw%sxN2o(Jo0rB+9^1!*VR@M%m!22O($$M ze>246w{|z;ftZImF#HO{E4oEps}J}}15T&aU}DhnIz&yEpLavsO7R`QV^!u$@k z*#`rN%J1x%e(A@(w+RhWm)^+vSbXNG!{n$Fi#-P29(C5~B<&U|lV%D^5KwuO)@a(r z6$iV=`X2UqB}!q+>g4s7dx^l;BAdU5F-}fHs~OtWjNk^NH{1h@&Kk)j?6Y zr9*2$h(u5juhvcgUFA5Ud(Gce5R(bRMEK<6%Q6@yHj^flTgNz!I3d$oIodF87h=xV z-Bn@xbkp)%0fj0=C8g=D#GkN!Wl$`QG1vO~>| zyVf+7?RBX6==8$na92aT-`?k5NY@l7&Cq<^pTMZEM0doA6B4R*8`L<1k(@sm9l-lE zF>sx(>ZeIOsD?n3_`7_k9O%>Y83N`A4)PqniJsBr$B;-?V&7w=gWsd$L2PeMx zL12EK`$RENiGZD(N=Qf5oQSBqV;32bpd6g<6E5R;CpX)e7B_lPDVLwXS_?p6~|dnmIqTtBDqbO?Q*TP_ZI;hMQ*8;?+E!%Oj!zpbj-b8 z!1R1lb|^gx%4MEJ;JV%aC!Ggih!3=vp1I9oHT?Q<^|&p>x_TP7{$%xsWR69BayWY7 z%TSXIfngLq1YEt-KK-~LrDsVb73 za%Gng6jB({txwoB@K((1Q+;hNwe2t!AlMrPbAhwuu<2)42dv+nefcfQiMwAs&8BA5~?&u%r< zAN0x5Pcyrig#x(Tax13>nb1ujE=*I&;oS9S$HgLWOma88?Vs#4r=!X5Zfe2`GD=<` zn5Bwi54Qw?Y08YGEKk!K5mt}h412@k9eA1i$1N&7T(2$@xTHcTdkGoyd1x$GAe`;u zsL9~4O!2mWegf&>%IX{J5geT20i37|ocNK%sv_g;RV+bEBa^jZb0&IPZplEvS^K5` z_5_S1mFom#p@ck>h=&GEv7feVWq2B}VICJ~b8_aIj8xSS$zr^5IF_sExTe z4u{0=2JE%n6HJqo;FLN$dsytYdl9M&^(@+LM~3~r_|fDU`qfpWHQ|?8CYRhS-7}Vb zM%Qj0qpf&*C=(GQ4(5!%{O3_lJF+ZSy+U!`MPYQGQynj!gRc9eoakzJf70vYzf!+0 zw3^_L`vby3ieus6gP36Ohb-5B7xZi;go?-#jj;SFfgv#N)Yqvsm!Pe zPk-f~Vj_qAqq`ZN5$}-OcL^S}5t__-r#cjhXEfP{aE=p~f{p+I(T4ceLOAQvXj$fM zBqlM$))bsin2%4$O@bLPLEG>I9yGjxiErCzf;jX2@<_HA^$D*%VOs)5)0oFDyAi5W3gD z6E@Ed40l{&_qPZ4#2bzcz`4KN{*R1p?LLE??2X_u{Rae>`kx<(K)#o#b5eVXNa#_s zO<56eyMu~i*@txPR(P$e_Hi%y!F_KeL7otYT9c3)rR&O!q{~wo)6$VIUd~^F^Vp~B zR_O~d;@t-)OHR?7y_n+lpn7tM1KD69D`bYRtsKxwNljj9qE?#eA(*%HCwOEWbq+HS z6rpK!uBizGFAo5}mJ?qQTRLUW+@T&$IHGS73o zNKNSe$MW3!S?<34q`T_-8q`_C*JdOf4*fI*LRfihy8VcYiukwzxVDIofS<^9;o76c z6;N2w#j=AJD{-NGB}4A;Ra4>5cNcsB3e*DH{e}+xL3I^UG(&1?xK)>RN|Kn(Z06f) zn9%6B_nj%^BCzzSsMTu4f$KjV`h8zw$KWdJLp{NQPzu|vYDzp}qeF+zR(TA(X|#ua zbqn7d5AT*aN&VdCwV{{j>3s^Yt3h@j(kE8!NjpYRwbndRb2!hHV~E>_3aX#W=wpey zVYq})HS0MQ4ha|Zr&1a)wAe=BFx_z2L(}C>;3+Lefa(ON^O>_${p|Tx+at`7;kcx8 z*(R7bSScbLLaWG4zTkA}r-ZN+_-wvtFyqYZShFJ~R5nJWd&h?vdH#%QQedQ(%1Dz& z`wwV>ybI6YGiSbq;kA)_8|pnF>=k^V%EfvHSnr)A39BUKI8wGE&^`+Z4#aiToyt1 zdwi0jpO$GeSFpXV1GXI$>}cv??$8ov3L4hotP)<Nmg~@!#l70 z!x_`te@>^a5m}4zledl{N-Q#MnYY8n{=yNzk5|-v1z+gKlH6+D#1j0_-uCaAoA9Ej zZ+jUfTyk>gl^{btIK#_+e1*eG*Y@l<4Gr$Q^@!<((KU(~ct3(vfV-%Nv_*ppM&p>Z ziRRz=jXK`Vup55H7BS^;xbGiUu5@Etc2{ieWOrD= z=yz^b=wm(Jz#%Cxdmm5Hh zVF?Z~u>2vp1$Wb)^OQJXkhRrUBebxWh0!LAj{+&_oLoP2N9((uvs|&!hm-E`NoNzz zl*_c);Tn%;u5LrhU}pv<9U;ANLAY1Nsf!G~V^WJNm(YXJgzbV~JN1F+hARoWXNw34 zZJdR}7e=8m6m&+3o%FBy4R4A3BP_))t*q^3yv)+J$rZXsBL^+;EP2q1b-NH`KpbSv zp6NeTu_oyYTSo1-tgKWC72lGGIN3B>y(QIGlo179!`EmGBmP!)llZZy)oPqnPZ$B^>+L^33`s=_b--9YL+BEIyehN} zbH3O$^jTyquP?j^h<$v~d{oeM9E<9|BpNR%qdhE(9_kMh8dFJ+v)y50o(pMDi?Eci zrI4y`dYQDG?t9rSrqrc@}B(DyYjmgl0j+z1BfHg{ZB1K+)r4*>p-} zQ0sftUU>{Ww!lYP zEBlrHLuL~=KIlas(~_x&q_N%_i$@h5;)WG4C=G9Ob%U=qKGC9(Q^G39M$D-jR5uELNY&B;}Hop0o}t^pd%q>zg+?!hb!2@ zb+c6uers}B-Bp@Se^;%WAff}Hr(~&^vAIkI1~>GA5=TRH4|&0Jhhyo6Sj$0A@}gz91_g)w|Xn}&6I8xdQWO6cxd>IyfJJZeH9%?N146^I@9>h|%$ z;y7f7(jjX&dvW|5TQZXA5a(1O8TI29(Z$hOztlT@0{_?NHhy)i=8?4NtfFWIF|lE2 zHlFTSWyrWbBvRl$#0Jtq4X)f_y1Udey?rs4Nz#(FvTBNrp@r<%%RyKhOv9`bJdyJ} zG*HLQ|B3L_YC1s1vwr-|JxC#KecN|l1<|a3{kV;9%8yd-GZPFgogUvsM*&{r|73N1 z@P51$N1#C$GZfL@Y=33e6IQp8HvSgAm3uz|*?4F>B2+6c76<7a?yp1ic=Adt25^Cn zNzRPcD`F+E4=HLW5RW>{#7~wSCJ{cN+89AWJTnF4+w3;$7cfFRWFrFomvw1fDp{f* zy3A;9j1h~E`?e20s*rkbki2oCSHX>VQN71 z_atY=pMc>Ju7zq>vfxg^xGnrQE3`30sXF1!dj-g1GcM2Fz2`>sNEj4Pv&PYd+& z@UO@c?(#X2oJJ4pJ*h}Bk6sZk&Z^yHCxh+*70wmr;gbIM`MmvMT0C&;fF;%-9KA)W z=a!rz{4AnVmyZh$rJc?S4a)E*nN2onsB#Ft!D^{uw<)h;$S;Rq>0?SiudJ1>z4!iY ze7Y4Cyi*6bdDK0m5I8>n;T`eqPQC*hpBE$-uChYjKJEHdGq57 zsuc7Rk2*9gz=LG$TqJ`t{$nr$Zs0KGxHWxkvJ{dD*&ml>D}6Pzg`ILv=(U151EycPk?{Z=sY1bh%#f7}`UK~Bbqki7Y{L;rwdG`?l9cEprvv=Cc-e+WE z0?gqb9eN?#&cH10n-wLFh6-W+_p2@{^H%vI+43!va3dtOmAe0Yr^2L83AAvyUj$%y zjDmZICU-O2{k-q6>%blr6A3(m7ANNTro(aZta)pENs2g2M~9m|lO(aUmIat8bx+h1 zPNX624}=#!_Q;H-07M0xluCURc@#z#b3DqJ(9{q03je@mS@L7B!0_kv#JKHYV=Ex5 zWu@0u4K)AUhioBLobAfhB{>g5&`0G&oE9c$;q&{%i1fIYPov7FnT8%LCCjzZFCT+L zgIJ~Ir8|yu8aDgUS(9e_r*rXyJo8Ck)jIl;`HJOqMojtFQs6G*ud{wQ`C@B82{+eB$+=W?i&G^{Z0e2T;x|0F*mszH za|-8ueEm&(07)q-bsZy&)*o+A`u)~t$f=|{SRiRsLft$X$EFLFnpfQ$_cDY`6XXX0hBo9z=5}AEh%4X> zD9;eN;gqWwj>u`9;}77g-72@!pin2Dr?~mzi#v6#(ZCfkR|nYr?sw@@gD%OlQUt51 zJLaitJJs8Drb#gbS_TODP||E{5sZzDtV>)N(%>^1O+XZ%M2JW*o6a~eO#Kgg?-|zA z_pOU2fzW#ip-2Zq57kgadKIMC(0i4t0TDw9z4zXmH0e!5I!NyzpwbaUrHFzeo~Xb7 zKL7nZ`<(OP-g`dWXY(albImd4$YQR!));e)_q|X1QR^KvEKEq(9S@IA{DA!UX&^{8@GZo{5ISc#%10(1EFag{D%YXe8Gu%zv{o%l%Z zpac4);wL*9=v)a#vrdY*iy$EyO{WXGn(SUov26}Fqsi_kK8mfFVk~Q_Z2zPk=yBU!1I_M zsu_m{jVr>ve&XBJ_nFWDKrb1~(nkg*%;|X{br~>=fZkfwxdBGeXpGjh>IKjWZ0Id> zl8Bz=4(j1m+4^?4d~)p6&D!za_1GZ8hg8};$KWXz5#iiXyZhDK3y(QR#^T%c^>Ti$Dlgd-%!VYhZPcB7^vjB8$U}|WN zIVflnPZF4peRLPUpyf;OxdqprHTnxfohXTwTELkBI~u|K#6~!w$)QnM$K56)-v;ww z5o#y=3@{>-*yf90N@kK+&{3V*5>Q`_s9%qw7&78VU(@`YRvWzH>HY-(=JjZ&qxP6* z9V1487e1sFI%^qe$^e&t&4@LV_J39^Z{G_(JQKrwOtu}qGfkhMZSsw+rp?qwI42yu z8e)m73yb2?$tgmE;WhvZViYghx2*qg^`+?9^cA0(32QSrP9n5|qQghn3!wtEo_fF= zosz)iJ@W2cKW-R=lQXPdKW=8AUqGjxXa8b0BpZ!G1)JG;m-VhKq(onUmwjxO(*;dT z;z$W(FnE+hASt=Y%wBjc=MScHeHK?yr)F_ztDOI4Q@EiIp<~_5Xtt=v9CVK>jYCHO zR(F8M+gvnQPxwG8JfOe8v(LW9nKzHBO+1&EozZqx16g^e%9@E%!I5K1VF%dIbTdCT zX@rT&gvPceoQ$a6jUW8et4JAUHe64A`9>GOIzuNX6i0#kmdy((?5w*pyaKldZjsWR z?3w+@4~fB{G-%YC1pQ6cf_honsF(eb}{t2!KQd0nCZ`^%^W`)g1XHb%y(3YUeL>byb zd6nx21hl&NI}(^u_c?y>Vujt(qAT7v6g&Za6Idr@XozUAEFxl00CeSM$RYidB9;d* zZ_Q*3%*yF>N%%d(bbNrsP>iZLyNP}QH=W7;KHY|0nc!F20N6c>u@@*N*YRshg-Eg)$b)PmjClwV0a1kKN;8IwD7mTxY27!s0&sDsaucAfLg zdYg#w?~}SnV5p4vg-Mj|!BFIb7&x|(MymAD4GAAS6r z4yl1;_nW-Ry?+o0uxahTk;$hwxEUzcYPN4_@Kmq7jn*`(K8hHbwUN0D502Cs!M7!V6ot~@(lo6=ov zX;ep93P)K@hIlddK_sruO>8-xsZeRL@(}4EB}7N6{LQX5Bs~I1&rm6u&iB`v z_O9aP!4)i4+8pL_5|ea_US`E<6V;3Q4}XYXW~^sYHV{>ArL9avE%JD)^bz5dNGg;2 zZ#d4>`LIg@>hW9ww@omKxwXT@D(gTUJk<+Dv`VUV8xJv2--*j3WJ7j(Ze|(KNzyk?0 z8DitBBa7c4^egAFg?~B6rd-j>OS`548XK$l;xAFW`>y+Xm@hOv+<{$T`5bT_gStz> zXuc)Jl4hK@4}Jj_K4KA;*02ak?bk&Ky~+%XeF?!FV8u1f_!PODan$LMydiZ6)%d>j zlf!>TsEPqq<(WX1P^hE~_+H%8YKcW{*Uu-%T?##N5sAd9#43sa9sG!oo8@2n;K2KT z?<*Vd^?TCbUx2^A>-$xLDhtW~w6^m8Po)S5QNj7O?(+xtS^V-}p8wl;7(O`N_Fpv* zK3b1oc=#(02P#?{Aa-br8clwFw)eGaJolq@Cbm}(O+<#j@sn0?X4 z{_9lZ?vec^X%}mi{mIxj0{4FYL2&qIwF;79>6d#eY$q>j(dojkv~iaNSv}0n{3A$~ z*u>pdkkoWVKZfh}iTFNcBy*B>J{eyzlcWv#qkCsG?|ZwTzW09toc`Rle<@y9TEm1k z-AQ|y1r(|GZirX`boz>o&IagpynAtPgCL9D!94Z@1d%tjGRn_u7!m|(IC+o+@Hh^g z+F_lYGc6{zgv&c$|BWIRi}`~W_j2e@XyPwLuJqSNMfBOrzpmati}18JD-jllPqk>E z%8O94FGHhYwX3J|#YV!=m3MTj}>$ z`~CO)+4KKy{_kt@|6oFZ`J$?J^|FY3##9>seeBG1b+-MMz!-= zD92z1!Qn+Be8KvzSXs-$2fQ6;X2_iSAcA{@VKjY%BF<2@05M!m)u{@!IgDS2B?3i$ z9&^)>Bu8-}TO)!%{3WV4qwKes_j!2dl zXX=PiLp2lDeBtU^4CA#$p*@6C%!dYq2y$kf$;TyCWNMR%1Y0Ajl@@f2#K9Tx^5NCt zU5n>hQ4DcZHE)@ul#T8R=1i-ro4w_8Q(wM@i>vhuHS>b2})P{v`i#HfETuplfUh7LPX+@?ysc@z9XMyh>R`a$Q)mkF6m{~X@Huh4tWW2%+@RT{HX zpzk9Ca&N6ZDSJl}0<-${07j9}PdUXmm95Bq#LXq;b4jI6GCoBV2U69MnrBfVs*{N} z6`ojsNZe=zzYlRc5hm6UttJzh?&9KPKP$KU1yDZ}edz|6y%MXu9qrZT1TmXL9w);NA%)%gGP~9%R+eVk`h2f6f7p#4OHJKnV%TxMSu#gCqQ8VcfY49n zSngw`Q4+9=l^$kBH`IA9%fk7^i{%-aJ`?(5G5d&M?X0&eKR?Vl`Ihyu;L0onGm!Ry z2F;Pbad}LvR%}v^+$LpvP$YpLDw0|LkcUis%C`M_(ji+(mvT@@%KPn~#$pH}4li7Z(|tY<#A2TS}Rxg!o7b1ayevi3>Wt>I+enR z0*)$-FnP-@ZoCIfzW`xXQ$K3yovc>u3llhRi+v4K=kYx7mHED{j}OO!q!AK--lS zP{-ccAyN_J zl+c`BeW_SGv~zSR29?%Le%bobGe#Co%1hCaAz9%bB$y)Rkv+c=;qDNYl_a|2N@f{c zO8cOrFOxj+L>2jD;d|UsS6}iq>_g)BLf!mj6DIP%&9$Gf`7IP5=cgG2{387GG?KQ4 zQi3jlSWd`ZaiX*cI;W^{S-Xv!FMeFsyM-YY7p8v!kg7{}usE+Ww|@bkmB-(11p9oB z@!8CCD=qn85OS{X zPJ|F>R#*avalq)CV9FiJP4s9;F&f~==uy-0lETJ6J4k8!2Zs5Sux+mn$>CBZ z?-$GD7g0cZNzsZ@HYLpV2xLMk#@%L7xtg(6_R~x@t%+s0+mo|zuwl2vd@DYOWZhWp zt%qD~YK;8X6P-K~J_ujDjn)Sy3R%2H&f_@tFR*Pt~Tp{6h8f{;krredZzmC^KV%w|Rtyouz3wz>4uP zW-+?D!B2Q~P#=6k4Gfe^Okf1zGMR9g^)Fy-K)PAh$W)agzQn;cV3}@BjR-t~&{z<*|^4e;wjDpNShicLqO6UG2Wp z2NN9QsJ6+LPz>4_I7SpeE+*vQeA7%^PMn<|Znjyz6?-M=B1SqA8?f4&1(FcK78Tam z0kwiCJteQ2t}GlhH3f}p75Z-5#H`&ts@mr#r{WMd6&b_zcp;Er`>ckRtYQE-xEa?r zHqieR_m!q6J0&T&tXMQ+cz(K8kq&I+(Ldfcj!ZN-%?+q_Y< z!YQf}VpP6BS2S+euhV`WWR0j}qv6nuZO(|}KXEj8BjEo?D&paWyFp5?PlH~I?L*gq zID+;_S>dsVp*eE6FOl50+4~?SXh$ zBCEksm@xQ#$oHroGJY+|O_8jv=>>pMTb>DuJtbP(qt!fVx_|w!Yg;NJC7;C1Fub9^ zluJ8?a+Hh-8cOq;6h;!a)psRe4zjQ`W+YE{(aET9db$0FW$=6A@1Wy@7q%}W!c|+< zSY4_=CID^}vau_LtxLudwFU{FxL&P=c8|Wk$wkk?;vGYVI(YAO{tK}DZ{iPc+ugYH zpCSaZ4Oag-AZxdL5veM`Dq!UtsRO;?;{GlF`8Vl@pB{W(cMqmLId$)oEMOf2Oxoys z@V$h)Hw*m?{+(%my2T9YXlH-z03!)AcwVb^qwr>8(-=eXYpi+D6Kp#h2zA4NPsO6ar#z3byGmSt_8B8h zp6!xUtd__aTHBx+#CT$EtuOa8peP~lVv&HYI$*V&#sJ{15N<%aB>kbMdf<$(daJr( z^{RM$nj+pd2yo;^y*2{)DE>?XnqG6^bsTD&OBsAzjH3_>epZOcVy2N;$HDXXu9##< z1ei1}sC^8lbxn{n&|Z2-CC9z43m;?JeQpckD3KE{eMK7k?`u2>%#FpYUQ;nELMSy@X0(saanIY`T@u??j~l-HyhsUR$;50nQ9yDZ6nc2e}uG zAZ~X|KU>BZbhPcl=PuUe7w;PS6P)`M{fLEe0A9x3t~jdjdNZV?3c@D;gW_}J{_m(~g z<%nDCE5B#)Ly^xhU7OG3v^wgJJT;7WgJ|f5!x^yRIoW;6=jDSjcRh)FYsI|J!}YYh zWBKU#qeHi}>2##hHF<(GnxWtGilgUd&p!Y$OBfpBdf}$3{{~f=1p~zpo)_K=5s)7GBS;z@Xrm^THx0i z>9=lS3 zGKr%G%RM};$Ywox6-4I-4k?T&{DATs&O`Wk<5{?0qNkQ=Td7xyM~}-rt6NF4G6XqU z_&ck2cbIgFm0)M=?;hM3?1+ZzrC;pBGmC<%XrFqn==KvwEp~hQZQ?jmI+!%njV{g) zBN1i30B0P#`ldXHFS<=1A`fHs3Uzi~eWWtbnpU34jZ16CKbfUX1QL~xiSDtla$l?> zb}H(P3O5EIG^$Zs<~im?`w+LMw3^P&#?J_??O_?Nqb8lSLK(2o;k^UxX6CnaF_a?u zWW;dvy~+C_9>@K%{iNjwHKDaYGnxf!)|s$@+v&Z8S+zkfu`;)Mc|nVs2;qtT2ObS+ zNl))@TKB%qlu;EH(aK&g^w+p7+1@p^6oT+hhf8l3A{2NpB`9H@Gs6i}3A`>mgZagQ zTQa5btQeYnfLK%g(WS;@HG#}9WcAJ*G>Le)grMP)reiyHS6|L6#_7jCxY?kVNp*Ih zk3Qr)e_IusKZ%8dNQaQf*qcE@X+lp2S@q2rb_H7*H|Z?cJTp~v^K>GT%vNg%+K!4B zn#?`H&|d9ZnrQx(X*~g5Mcvr}M9;1@TeImK+@ZZWmH;!suMO2;;_bUs3vp>#z#89- z3gceSWw1+8T=!5uTNkOM7H5AA7_#j)Cqf*gCO9u76bUm+BgFCUA1AY68g`Ls$i1z~ z&(WcTgc{l&ZaOhUiUA$`6634d#$fu{!l3Ks29xv%A(=fgkz8WkVii=mjSH_UI~@aF z3cG@Vl+L`#X4qzwsr6z=Q!74W{5$7(d!UifJ(j`psMA=S;ywesoSJ>2cc3C`6kfc; zg0_N}wlOGM2)vOEu_s*cVx@md<}XG&=xXsu&NGClW8ry!b7#!ph#^02AIr}Oz#}CZ zy`-ply7C&+ifX6l{XC#b_E6TYu}O(V?KmQ()e86)u2sb&?(kA=6Cez)e@?>LcdQA%&Ai^JvSd0BV?O zyQ_WdSfA|J6Dq#4p|<67a}^W!N(6(*IH_|>Q%xtW{hlW(p711<@lJxfnx=^9a~OF> z>9fEG1OQn5V&Bl%KE1AVQ4AFEa_9MPE~ADIbXW3;q^o7DRi!wVy@#ranLCQ$saiwc z>aAyqja`C{R4+(B!@rb!l+u>SLPBM|?AboVj@o2ey1-dFEU(4whs5=kM`=$0gj~J) ziZOW-0_@?lmH`V?l2u~7E$WKNj;tLP-Uq6 ziB@gNTGZwMnTcIi=5C`(649KGeD3}?=G(MG`&v(i8mzl(CuGQ}QwH*N33P9pt!vny zhQW7ClX1`SSML|D^8^F~6}C(FR4yz#HM^to*h#}|?fO^&c&eB6fA)i=hGc#LFzgJV z#Z8n&E)T)CsB}Xt^|K$(L%TB!h2bZU5N*Vi}2yb&XyUG-d;vWC`2 zk0lvUBzET{H4hg!p(@(@G4aBCY$oITFTkxo?-ypV^0s5bnl}hB;7J~$N^0Ec3tHeN zZ~b^z6c=UmhV^3Tc~Bfpo^N@hn_?kG0<$!#E9PWlylX?mUkV?rgTJ_U94FDn5f6Eb z>3o7mU;!yH8dG3op-iYV%!QsbEX$7yK9f2X70&nOZ-kN@DH-2Qq6iHmMjX)JDqy7O za?$+qWcJ(dhnL+FUg9Qkv0Nk5u&t!^FF=vp4zucXp>1HBVKv=47AF1A-eLMQyw}sZ z!3jKus9&f}B>N8#NK76P3t&Ycp+-eH85-&+x&9 zp@ii$Nhb2>{e11koN@SV1;^9b!{@>G%C&)npOmv&1?>n$hwe~lXh@aSUZi{9@XnHF zQ`m5G6>JR$$0+nNX|=tBs+V;>^rrUkZncF_S9e=MRBMOWD$AtO*z-!<$H5$O0G<0H z$4$*%#H_7+vlNvC#W)4HycDMCz#NqVf2M|b> zbb!*4ma@mI+$;UW9!G5$&%7yeFo)$HP8#>QB zV79{tgKOB!pnK*#=ClNdm2SAiUc;7C4DAJ#*G(oV%ls$NgU6pSiiZG}Sk2-z+$+>I zfX2F|?JX_%aU*Sxyu2rw5IYIv%jQM1I1X$SAC96Ro7S>n$wvtwZ z6bmddhQ^B};c7cQZd-WY2A1z%$?S(%QH92ojD$pPadj5jNWkI*>+zv!FHA{&l*jZY zPdgfqk)>V`2?ykdkj_vGm`^fQ(ux6_Fe5Ieg~o>dMq3Tx;E*#dP);Fb6|NyaoN}(( z`ynXwDR|ws7(52z-Hgappm}m0JkTRU)}T(uZ}4bLV8 zYI`s(o}~iR!&%;Q6sXD=RACkAWw#V##UzeFj)hjp_fhniaw%X#%0auhjEKHG-M+L2 z4#}>q!tWnKulsnf35+S)Pu@@IrD?$$GWX<8+c1E&>9Fb+K#1tonHPy zQVA~;pXydooFQ;$g9;39v6l-!VzuQ2Opfx;>SiG08QIQLF@n0-f)bR(%F#=QH25EQ z&Uh#>BY>WD4tBORMYXXEMVow?&{x(@RJ$wuZN`J)X~jhfO!LBu3?7Q3`{0sZ^E4(x z(-SU1`YIDAfKLLz>%bD}ETo`+ZGd6jA!e|X&qCk(JA>B!L-=l87{&TzNVl*kxyve! zBsv}GSVk58$p6DLh%4`=(sJoZQf-!T>)|U;fuGp;xSCa4?)&Z!gI5Z2?Gm+gr9iYo z@~gzPI&}-vehDAfS_(7JRXR)ZauuXdje1=sf?d)CTW)dD{ttMs#J#I zYGE>_oqQ6-E)E);W&M!@U;t)DbPyXG89XyAh=kd@p+>woiS|NV64q*|q*61JpkSIr zXh^4m;iPFJUvgJ57`tBnanyeSOqlLqUUm7U5bm_5GI`>~#XS+ildNU9fXY_mto5>r zU;vJ_(gg6ANcm6L$T!60oq*3`*I0-@%%gD^ z9xkyAtLcLEIjEIZRbPQ@0q!FmFDU~DKZR_iesGH|H|qua>yZ$xhKw;kas8MBhDR$9 zdQe6VXk1TkLAe{Cql6mz4UgH62$1vh>0ceDwCqx92p1cEWr9fp8gE%AYQE*KunuK- zgIj}nrzv=;1}Q6l)U-Q|IuLK3Jyt?PYUJ+PvTgV(%ITf-SpDUpK4<>@y#4&WFYF>w zS7oRhQIEE52bFEJCLCIY4Q4tIkwsZJJVZ6e-V-k;y($ z-DY|(M#5cRk+~81n==Q~eNgO9fWj&n!fpve#tJx?0(x7eZ#8Yd4x!#CW@Zh2;TKCP zClJGp%ctAnYnwq+g0Rqh8u39Awaz4(tuU1!l;ika^Rqs$?4~?T1}V-XGa`pLnGuH7 zSYzsJT7jD&p`5Huc%lwYGG!kNj!A}~yO+NRWy!U!;?y+EHJVyBX=BkZtd};h2!<14 zQWCRq!)|wcRX7oHs&aMxc~M2tiZ9ThVEem-w}a3 zV3n)aNE0rwiCMih#$#}nK!Jq zqfUjmd*t#vp5MFF>(YPCI`9+9Eei_>SGrg z*|DZJ+L#tuu5}bTl6WhZ?zIiNB)tDi9((P*+SYH5g6LpebC3{JBGUf3qHvKhLx?i! z;p01ne__%i8jP8~gld~o2Q(<9QO^K(v5Bf>)1x9O2p_mM4PO7V&eO4UUY&tP$Mno> zY!0hlX88Kqo(8s8$$TZL3^wsp1MW>u!#+`9+VeXNm44l>Pp+TZmDv#6S(Q6(xHcfA zTqB+5^t7txk63x+F-YW(oLaMyCMd>rGU&Gwk048;ZGwz>Y8R0Z6n%psO^-ZycEkDk z@#zn9C^0e9i=a8I8s5&++C~R!CgaOj;pV;CO1g0YFKXO&u_tuoc_E}EFLYGHqv@~) z8x)UEr$jLSwuluHI{2C88?M(WRsrn2?{?*%-%<0~iStj(($=UKOPVDw=JdB6Zf-bZ z?MJ_Vvu=DjHuvrHENkl$+n)BsR=a@|G%K{fbNP?QM6L!~;ikZH+QM zQ3}o{i71v=n$uP^Pt~72??c)sj{R2o_=`{O^lzK0M}&opfBZ@D{P?{u8S@K3|3QUV zm~&%W7fo7Ye7-}MNgr)HlEacRC5pcizhPP|3Cr7ktoa1RS1apOL+f8^8Zln(#i{`N zLey@!buq^W|2nq4Vu#1e)HRnZ4aRxnLg3s;Ww1I2>1;ajP1&_u@Xj-T@j_wWOPC(} z6kv}!?;$ygJp;bMdx!+7m#0C( zsw7vY;p0lGG>Lzn;ABbx$T2M90iXbbJM0*1Un&M(`*X)7$OlWDg$0;@ImZJ!c+MO} z1V3%WNi~8Frg~?_FsiSwqr^}p99XeRgJn$nQ{#C@=@)=4lDJD~dvf;b&ApS|PhH*b z-im4GUCTbY?(zIKKEAZCehgg`tB$Rf!n1XvX!4d z6XlTKgz%o@{R}~G%X!$P!f)Q}pq&tsds3^~>9*_lL`-nJGP6c|=vij*<#RzxR}04R z8JYT5Wrt5P$KQ&DzEN(Et}JVZF(-12JQFcVCtwbohigD&Fmm~;_*(c1NiF=1n z5{3L1_pVP${Ps`I-kqO4?6I_Lm%2MiD;%4NGuXSbG+1jb2g$RE6X*+xp#=^pex5fV zS;Rmgs#ovwF3;a(->gy0TObq~p%hJpmW0kn=V?9&GVXzhOY+%guS>wVV&s z&sUMtW+%)#VZN8>vpn|x+SwxWo{&*~td)@B*#l(vLf5U#q5z9jbu65ZP+C2xVdH5; z23;b4=tNR4lVMbTsHN`Imk^x8UL@i%34L^0jj0SL+jGURjX1S(SRaSy?Ie~fw7T`z z8EE!%gicEZDRKt6PtDcR%=+4NozIY2#0C zQPN}TEM;#`o4htLAz5^we{K)_2&y--5exW2D#m;}(Q1!9aLAvCP?$%07jLKfeQE+& zw>^K5AK*6pGnA;MZ%|s@S+x9Mi*f>w)>ktdEV`qy_5*Ei&1x0Os;HL7p5&Z1ABSZy z`^}pM^K>Q}wQ>uO#`39&Vrk5vS(2^);#Bip*$h`Z?fS#oCh9({(}DUe@kcPi{=TaE z9|ay+cznh4lKfu(bQ@wn;ZVC4_JeQsbDhHu+I|cAi2%zddU+mu_ca| z&CJcLMB?P~P~Mc?q$^c1`xdldpHh{kvaNpgxCN=ixs}P6f9$jdVBf*@|5kpkoriJ! zQ)%>nymiMymSX!-LI51oyQz!7Nq1u&eeCJ?n7Ghox(5sE}A zXNiPGZHSc7bdb;-`#va|^o_b1VnRpSzx)=ufrQzN{S{!9ui)}g#-bTvo7cEsBUs{%q1aEXkP7>J z$e3(Sw_7guKvAIj?xozy#IsxX)O4UL#;{4gi=wSgU>8Sxc@qd~vu} zJ?bBmJVGeOssH{b-kgWvh+b$Y+N6}yetu|w8%KzJoPW!{Zc#5Y!BV2FlV!XN^!*_0 z!`^h}og)(W3|oh%cn$W+N4F^AbP4BXnep|6cbIXW zYWe4_gXD=R8&4=+sB0QbT>shDy`~1MTk!$v9JFkn)lyJ|a><)4$R#mznq&!q9KE1XgacHPTa5I>CZydK>o1v-cvVp_%^&J#)%q#m=F_+TXJx)!xW#n_qiFP3a zl)pLl{)5Z@`3Kso+s~22&O_OXtRWwb!^^BoGPVM{?uTE6$Mn7=DpbuGM*P5;18o@S zUCahKB#0{qDcnlcVCLi`vOLmZ__NHg=u+mAv_L8-`^Le*ZUj=A7J)cM4Hqk!q z)Hd?+XIr!gpx|MwA?~Vzj2;8t5;Ai$tfqOOgwjk0CyDK|YOE=h7jh*dA`Q5R{2fQ@ z=UVv6mnGjuL4`Sd53VN{Mh44JF0 z*Qbm1(X=8+FRMLU4pGKHIRZ+^uJK-roym!IxI^gWx4)l*<0L2s3Wbj3v{n^?%hFg%3B|NAVI3sa- zyjS<7G<=JlBa7lo%Qr`9$Bs<$vU{Mnt{jK8n`TQceZ>GyV$ky3ZKm70{6+05=3Dop zd$=#+F8F}xZ73|jz!{$VPgDBb)|Y?2=Wf}*)O+R|q;+y@@Evu~UHgxS+uRp_PO?It z+$a@LMGF4WlepnRL(7T2S&{ zOI+`#)$F$_pFwZxn+R7~jQ^-{^zM##R?s{gILLZ5`PWvh6Cz!``X`E?%q}fhCW)I4 zb`R$p-FDvxdyZ%3WU8EgA?Isyk$SfqcL2eOlz`FFT^0~#f7#GbvQ0z8S0u3$5lZe{ zG0D8Bv=V*ITp%~wneUC|{}sA>^|z*e>T@j~%fMRe1dY;^1d_;i>5S+`TH}~yB#)=( zCa({f%sRVzAY;e=mQVM$wO!i&v4-Q%UW^A44f>z1ydIoJD51T?MI$^=V`knT090?a z05}MfZ-7Qnw-?STyZ=z}bFU?l*9UTJ+Fwh$ykhB(czH-L4A#f-bt=lHvPOn5)X}kR zg4bry(60WlCwqfYO@u$w!=AEb_#Q&;pfUZOj%A)uUI{{{@7`R&NhV@FgYy`}md!}A z&IFod(7SB@1&Fv+eH__MY{I)pV|G(i{(XPG5*mID1BK%0i`<3rjn0nUsHav&-^`Up z!q}zg8R|GAy8}#>ORF|E#7o>LUWCrj3{IzeId;em-7G??k5n$#wOJEtH(v5zk*@N+ z(j}!%t|M9z&zBtT( zA{)b;!<73$O#8Y-PvvWUI^&z19>KN=RCd>&9<5*z_ANNc_?sECroMdSeh;;zc-=t| z_>NIHYv%ZU;SPNBq4HHg*PYy_*D^1bjdM@VRW^%a#j~J|xrIg*OQBrxg?-acmM7m{ zqWIGu^uK_^+-5R)*JtlglZ^z9(1@o5L{h1V$Vx`cCLmV4C8{M z6eqmD7ooIYf6s3|mv%aQFugz`M`Qef;sZ%j<;s7weJre+nW5eJ{_p+vCD(L{@TMQm zVP!rRK3%(`-xgd>A!h9OZ^xDHKb021k);QJRsZRxv%wpdvWqKo)8pT@^|lcPS=s+f z4^~c*eShOI$1O2Z#RR?Xj!?JU2;a8g5_KLEQ6$&IH#Jft+qcqBqm?$!hwhL>{}~)B z^j9NqQu}Qy^?z#%VJ&^slM<%cPWW$)(ekbZ_saflL`w z#eVc zz(#I~i=H~|SVc5)p%;g1TY?#g+VEm^-&2@~11gU-Y!d^k;3Em}s!L%U4{Yv6wm}a} zJRb*nmf@ZHjy*dft}heKXAHF#siH4;Nc0ckN#y9S^QvUn^)7&QogBmztKa4rJ|u~V zpXhWwG!k+z)#}WYhNZP4=)zG}RCC`*hMi30UcO9&=*(r(+)~nUZV_a&Xx8u);AZ3v zEtg=;dCRrwo4N(*wHEfth*~7wJ2EDBesu08&*X5jsWS>x+et=`z3FUk=+UJ)u0((H zFheg6RD4H-t}L3cUTE`E9luiFFnS=?4pt>6o+7m4p^i!}d7BammxyGG0k{{`LNl%I zksM6yLlDh^#d8H-I`GVd%D;iE|H9+iJyT@u3>7x6iNUw-eO0wbtqlp(gbIy5Ii=n9 z(;eUlCSqefmiZErQ)~Ztn)&Zr$+5#h3F75d_*Wb{V-t^)E)H8|`CupYw5*KT@A^Uk z1y1J~lOk=RR2Fn|LPH8l*A9j0Do0>CuEC4~8O8JR=Cn~7l+i4Gwj*JQW)_5tDZxaJ zM&rN+b+UK;ls4D!iwZ-ZE^^yJD_z3iwJt7qM|jqfZ;JcVri!s#Ucc3rT;4z5^FDR3 z*K6yW{%Llv5}@nDcGv$SWh^K5Pp8leUBP=u>c6`5GQ+e9{^_EzQlBFiY!lu$RN`LM z%UKP~X(lBapcuNFh?6*2DoJSVeQ#q!J60w2uc*F*s>OQ&N2~$Zv5R{o++Zz!f9%tD z(0JGQuhAk(IX4!rzx}CpevGYWlAE(hDpLOb2F4s6CS7y*(0Jj~v#Kq4yn)etMDVQ! z`dj0*blFV{ys#+=pO=3V1FwUluNBGY{H#WDOE0?$x5wZ32L!JTNmW$cD*B!;xfQA( zE#U{Xe$VMD>}vM>#z`V&ou-j_@w9bDCzdY$AGZ*^93i%tP&_0AkD}tsB(&Ovi^Q3D z0dU1&CHh47QrTe}FD(ThnB7Kdpx=MEi~IgY(4&GLB#Vt_*=Ot~tjYh{FGgjSOjHOB za-pp6lqqH)6By**0oQ6g;p8#t*nm5`J1+*Q+3O$fHT0dbXan^yUgpi;C**$$88|Hw zmHxvgb@h|_DuPMy{maB#s%QRpMl!n&Pk-7^y;9GpyU@-%r7nh-7X;mYMOrfXbz;GO zrpc7gnH(VZC3=D3b_1?$u`z<6i;Nce?)SXR8IQM$BD-bXviU;}Kmwvn>E%d)HseOH z_5chk!kLN_c2&t#Yx}SWE-fg@y)SWkjvMX8Xd7=d0E{QUy?%c7;P#WOC`UPC<&<5> zepjt1=JVT+kdSO2GG0m-4AVLsFl3uR`QTUq)W%wDQ%!GQ&5lNn+jKz6Gazp&Eeb&P za~qNWn=41^hLlu{u`O|V{6wc{Pd;(tC`YMYtO~T+uwr&f>sqjR!TbKz0_F=|!R_sP zw_KJdzN%LK#sE7gO#SNsy*@`@gXFr952L&c{?If1W=8rC_M$&LN&l}MQG^Q7bg1s& zUSp9qv3%_R#hiRXYG)AC-0hq%xnXriX*pc05vD9(eS98m<9(BO`fK!>8i00l1JlBC z6Z=T@{6w%Ftu#bPY}Rd4nq;!J+MKe%WXzhNXjyXd4s`Ja#7$u0AbfXmF&gI3IdzNU zRxvmR&NCf+!2s|&eRw74hy+?OoATRS%z+FJ>IbNG32`x6_35`ZY< zX=O*<%IFEIuaZl36F=m0VQSvCp*UtJGB8+)29%-d{j=l?42ZAwv)9a5~ zq78HO;NP|3!DBm)`762y@pr=hlsBW3Z~^P#$Tu7@m-Xwe>)CdYj&bRB2{TObX~TMU zDO))!LR0c=TH6R<$Y@fO{Xe|C;ywYTzcVs-ox{n-dpczY1!*ANpLi(w=?bi;w zpM?E(DAQM3BbvcKP(O|7dg-J+-bJ2ih$Y;mr^Iov5lUM^j2A^8uPC z$tx5Hm{G&X$p0b;IhNdCeawp5KDk~I(9uCj6B*)&+p6ps8{SWkXI#(Jym%sOc~Y6% z@?q~0$Nc!4meViOul@ox9)0igv%4mKu2R_dXP=<&37H5Hf4hp@EulWVHsDqtPmo#1 z-Si++k188_p4#gF(cX6lMfEKI?gG2y49kK@l$@5lAUS8rStRF-0T6M?Ip?GxIcEVy zV97`j5KvHb$w`r<0;0Yp_`Tly4Zm0KRlTZLb^qCOW_o7&GiT4tnd$E7o=MEW>+-X3 z;_=L0ybNc#@kqwTjGBX2ZuOBs)?OpHnWxI&whiTUJrB)GkHFP;-$$Q)%eng%aNLS- zS!Kf%r&OI@FK}*z2CYLc+Z%GfY ze4_DtdM-+M_WQo+MHQv;=R+3%Zjp5R87lTynniqME6&QI$@P5}^lh4Dw)n;bL*CYc z!*^|Hw0FW>s=c;(m5BqN54sAN0>6A*k|6o5ZxS z?`dFjc=7Namxys&0u8~1%QUckoCF?zJjUdPo6!_C+Qmacnyi96Yfy8~8^)UEpnaf~ zu{7C_TKPhlrQON*!fU9r(*!8%6QJzi>j{X z6f1UnxE8<3c!#V+Cw(gSY!DG&k{5lQf!Z-pcj^w0sW%bWW4s=M39A0ebCAb2 zeE#M8Tdl!RoFQcdmTtOi0L~6pzrG_*hN*Mn@^#y#P`+L+dUH>y?c17cdM6!!GitYw zyM!}7f2-Tf`w0+>66ii!cy_mI=j6N7bsN>4zRfrrM_j!eV|++1uHL%KoT0nRdlFhy z)X7uDHDf6naaIg*4}5;|(Chvm$88fXwXWku$h2bf#xffFdfqE8E1!OUX~MV^9ZIu0 zP>N}p(vhtV?#4X`Agv%&Gih1V{d8c z*FMtR$NpQj%)Q;N%CDB%G`@VGQ0HR09WhfJR90!pjREu~u{U`hJ6C~@(eL4RU%`Hw zpuWR!LN!}H*_QXIg5L-L+N3@c6in0BdN8ZVOA+mN>5?Oqj{*6AJnaev#4%a41wqG@ zn?{&Kz$22-$Lj;K#bENTX-SDkBw+#G8|!oqjwJM|dM>Kx zIU|l1H##1ym3c;_TfG-e!>+t$}SX8fFmc49}S10xsr5i*E@6nv5Fo1(P@3H z9-joV0N*z6+e|u&WV^L{sT%Vk4?og_kiP>!=L7QbK3prQ3vTvxB44>Eq%$)^*96sz` z@e;dVlN-5?$5QQBi>h6!n*KNW%_g08luj+u=DwDb}BSIpvB#bR~ zh>_i^m`wF*^uR<<%($@@@oO1vTdN=Z|6$!7W9O!J%c@phRvZgM%%rb|Z>rOj0qDd> z9^Ju5O!rHtw|_e?;DxRKsasWxtfTX|!aq-@1%$b zb%o^tpY7|gJZ0CJ=kvr)0=|9^AU(Mmpp_yrGE*DOM&G(1WzN#|H0DvZ_|`|1N`_>g zCw9AXa4i+X7pI{B@wXdN7HhRR^;_SqC5LZt6&@AbzAo|1^fphxGvd)%?|t3%cT{8@ zMmL;1X{dUTCC`?rHH{b+>o#-guDBkWWv^}#+Qg%M)D56zZx2`YzW(L*;eNN|AH6|g zSdJtOzAgz>R&Vd#Va23{E2}gFI91x!wcQ#mW6n3b4WX_LhS^7V#|LsS)ge zp<2d&h5Qj!K6NE$L$|1fG(%T}xg{6i#q^8!J=J3=E8G4@PtiIarGt0t7~ACdYp+6G z*;5rStJ7*wS$V=Ho~j}>>?3vVuv^Ehzo*>5uNk9uYy?YO`x^rMT<<=DqgnZ>JseXLlOI(Et zo7RdXtXd%9UXx6Gl;1l(71s!akk<8h-19o?6f(^vq!;yDieuBVmt2`91aTii+}7!y z+ZkajW)mAh(&COVFV1|-9n<|!wX zXmtT+-8s}8+;|T2rH2w;@R#h-^ZsZ)W0$OS?L^)DI9k-f%UvI7{;?PwY2xO5C%|y<_gNp4~lh;7XvqPM!Shobbfzld%i!yx@5~{+n0&-_u?c zw3&ee*E5Y62lze_Ij97f8|>O|WNZBdNPIylF75RqRn6lE-*@k-CQp-(IOY7S9c=)~Yu(%b zO6xUs6l)yjSM6G<<3Mdpp^1XVH%aq}Xi`l^J|h?vEUXvnjiN_Wj?;|Y_?n9C67RN! zGE zdI_FOoUawO@{eoFQ(0BanFap-!L>c8=hH_&BYWKxDGsDv%wFzZ z@_gfcK?-N46U3}svRR)f>y9l0kg~52hU0WToQmtypE0dm}nE z>S;alF_5_{*Pqh(36HfubHDkJ&*#-B+RCK&q`KN8bs!eFdhc8167)~xMc7Xsj}~RYB2S zkkix3*UI}C=FS_5yR9HL^Kh<{-7k7j^O~OO&~`giL8=PcfC7s*wEC;(_l{%W-L0=* zp|bOAj_etlgrv(xAF$Qz>EkK#^Vjh2RtvlgA@Y6S#G_lx%WY;PUbZyMx;&->zy1n8 z6{Nwc#p~)4^{IX{fYq_S7~oqr-bym_I>Z#*HsvbAiX5gWowX)`6cO0MQ}El2tgE(G z5GNIMPFh9Y6?z{U`l0UYSYKfbMzq)p+N&m#G*~rPl%Mh-Wq@Ym6`|~xOecqfciLwa z0v0N~xKJ`*LX|<3M6T}K5c@4a=~Y-J^&kZt>?gMxChJ~?)ihy5I|Cta!wnpNJ+}K+ z!^>cQr_wQ?MB9aXFSfonB%5tnkvaVN#vSXu1tTgYdO;SAaOQ4-by>&@aOx2PNMW37 z!vdW=6a(8!bk?5DEdRL(Eu8pf*)R&10x=)^xYs-ANkYFlF=h7AmB zm(=wN*aUbIVJA>X(g@=ri22O^*e?9_=1ew|%aBE#6=hlL>+i`I)f21DIqnr!X1HU} zB2a8-?A^OpL{OX%f^ej}jQRXSwpQs;Kn9gpx4AZtU1IsjmgpY8y&UZ`W2Lf6i?Ehn zfI0??UoCcV7a@$Sn!;cJn9WfE##``We`Ss=03T}+LDGglSrU;wY5!6d%vr)t_3?@n zAcE)$uel}*b1dkv&{$zvyCdM?A3D&(dmhrL&CbYT&0->~jhIj`RP&e^c1d_*M7V!2 zWq1oFV~6>w1fkgQg9SbmwOLP!YFHI-=B(=$wr6oSpLSq|0bU4qxg9@V zuV}y+SM)(wZ8yEni-A*V?CLT#JEZEV>sHTn*7LTuK3^$&3RXvC4tOD)PNz$Pm@3XG zGn;G?dN5$V2-F=WV=6`*qzA;*XoI*V5vTwnUU#vIu9PP_wpgD6exLEmX?RXDYO_X<+4uabCODt9`@}PNpCrqWIn_P#W2jh+ zP6MYAy{b+1#5Gmtr=IsB2=1P79dFnbXZr;U-9D%qCGjAIiW+4(x`IwhE=SP!X{da3 z`JfNkLq#*p3)d2`F(v*`WbH&d%#ug=sN?fM*(r}dx=|W;XwAJcE2k0n;ylx_J8Pz&EWWKRCQK}iX6X?27m9#% z6ccEX2e%|0MMX&@+A?;8&&qp9V1Cm1hAg*~8nq*u$5$C#B57<5C(K^1SDPtDHWZk@ zG`tUcj!*E8v5NQb9T@HjlnIygzEq#Vp4t_ZyRP8kRmm+n%s%(xDHCLMs~Raw(Lroy z&hRLN2K^b85K0Rsy@_jX6?Xg_QHc*Sg!eD@7?~O|i*0>=S>S1`pI(mWZdJe$=iyOz zdVaONR0iM$+e9jwZVOgxHM}}8L3n%T^gg=lX#s5IT8MSwWHnCtE5#P1gc;v zcIdyXi_Hjl!XG!-uHxba=DKh5mxIE27vommE2D^Li5?%mfM=^k2mGYtrOyse4XPyG z#;D)?3BZvnj2nvvnGy+Xzln-oUTnv)P9Z6$XnW%&>m?)~A-Cae*-y=~D7-uP877mJ zo+16?3NBYMM8@~r81;YHsjKaW-W!m8+1}Hg=4)YAVL%^eOh_&=wWK#7ACZ#phlgjn zix@P|^jz#!Q(SN}dE@<-t|V|5Ap$<*dxJpzzrrLz{`i$X^7DY{S86lGN zeB5$p3AOk?NgpZjJ=u8ng?U+zwYDlC0%4gu3KkiM6Sh~WGZK6!#C@Izb37$Wwe-o< zk}ADC0mY`I%Tax0WoY1ZN=`3#CwKnUdWPy7x0=|_N*|^i-u1k?~4b)jnl06t`CkMz- zK>t#H{k}@?!deu=pT*duF19^2lJ}XR(*pvhg4lqV9er=aC=XbPN1zaYFs!RWf{@Ny zFa$VDvlc~SZ7-5CW_Dc|xN97)C7M3WT~|x1Zfod?`1nRODA0_%p?{oa+R3k~VVJvd z`Oxa@h9{``-7Q}eHB49{j0pG2%V{QEeh)^(JAyi3=H?3Fj_0~Wx`~ZrE>W`gi0_Pv z#sD`<>M>d5)EiRBgflW}h|GtE78Qnv#fluc#eb{W>Zr}qN=A%kg{%*G_29!%4K|0x z_C9{!F#(~(jEP-3WH!t1wV!LlFbu_Wi_sL%;K&6H>lFdsSf_A0ntq2d)8yzBpi89} zMa}&k+-~LW8E|UDQFpn62St@|7z?DC+K&@S#o7HbS00;NDz_`2HZf7T0&%A&*aceP z8Nu?J45v%RbaVh|&6*Vzs?C{g6YJ*Jy!tA~LJ;AIF-Uc(tc5jSnXyMBx@rh$c_4g%wc3@3~vc2U!pz2m^at|H8z%p zfH|BZ0SgDvgL_`t&!M}N07TQQQQuM#Uqf>JvlHQQ9fDB?z!+T3XFAA#L-oE22}Wy7 zNW8oF6)~MG=INSLkfCRudT>We;L<7Ef~BGh-g7&Tso1J65^iqKw>X3jPLZQ?#xiPB z&U!Jon=hzLRQmPo0Db2cXyd~28zFUNw*f@5r1O{{{2HmH1}rP8dbA*jNq4WsM7Oqx zKX%qe0s5uB!70GOP>`DW%ZQsYC1+)J?SG267L>{YMRO~$gLLJx9(vxb3tX)_L8}h5 za(COJr4mC%Abbl8!wjpA?U#W>Vb|F;6lo(&lyk8!F?07vumfN^*kKQ;i>^Mpf|>g1 z_~_|-zTZ}*nUO_`eVNhL%Y@F>(%)0SsW%G5)V~kLV{oAGOk!_gU50=V4xTw#?aR4X zWjkca7yfr(ZjhfdBS?0l-|3q%Wd$q#=v#$!v_tMhfD(>HILVL z1>xE<8+t(~uOLemP=0XR9g4HcrMDC&NIAPb6kk6cHZ&((va8xI&t9fGU`(kJ3!OH+ z6&u;a+P3}H(B%w9`Mh?1*KCX`n(H}E{^i0^bd9sLLN-mK3y0VR$UFLKmYjV>7c{je zfVk#!0W4bWVJz?yK>1g@317|=xvH;0GRi_^89}tg<>~yNKzF+UmY99bm%*+TgqdRW zz7Y2qOH`1FMz_^v{CN4D=U;3MONot@s4MQ;a9u`;bxhFS91zmcQVMqsXo;CRA-r`z zjU%|HV3+bIV3Hg{!L@&%PctIG8DuAXt+&IY6g6ol+~D_E2HvZq68HmuCth zl>!{JpkC-lNCM^@J}ICBH@4?bC$#3%U*g0x$a==Q5XLr|QPuT$y|MN)W|7GQXW4;NqPXRSZur<#WXkZNe|*x-dE zwEa<|Qj){Nv<3`9W49VsxZhG#f88Eh7%rV09G|Hrez#NaQWsZtx7y@mF8Szn4GVJ2 zl0)y$q2f)^&h#-rC-NW9<7u0gN<+MK4B%>;x5h*vu- z$YZe_wA%Ceml2l95Co4;gs?9T3`)H1FbQtt84eCM#i8m}CNbb=tc=)H`x<0Bxh$&O z8xSj~_cGKA>`iuSbzK$W+GC1CzjML>iSXc$;k;?J23w#BjaDf?rEm}I5||- zutFbPcc@b^(Eeof2tm|cZ>!|0Oq#PVy6Q3VKJxZENA2i zMoSWYuXk*bK9Tv#n=i2^^*m&uK$`Sck)a#k-C@O~mq%-IU-gB|bAhpoHIGi)WXC|6 zl@M!q*`IV1;u~0XzmG|(ts)yotq}-d!!y0b8X2h`8xI~)Kt9E6;$}CZK@B9?7ZwR) z`I=RpA$3`0>NYG5kH9K>?OL*2&#c6*>8sc(x|hxK)amg72BMEvvQnS|zCOn&U+n zxbUOArO%8(PEQwnZkr4^xA8bGGGG-cYblZuCW+>c|8Sf)`}u0)<{gJAiVx#hDOZhv zCi4&Vo=TP8zNhu!I4yuS+iByRzWt&cO?mLuPO11-ct^^wB2nqDX1m4hETNrc;A;MQ zBRGnIbah0BW@txnyd@Q8BfA^5YPZ2#S&1FPHlv_9Y)a#gZuk$bn87>X2YSh0?bILI zDA$l&c0ZsG7bGeU{9>Hyy)w*R8yV|UqC3;U!6e1q48y<6gv0=*Y=CDL_83~ z)XJGnibqhN{I8Km4Zy1*znO4}(Prb`JPtGs9%kDs#lVi$if@*UjdRe;9|c5&T((Mz zrDM0Mml?#F)OHNs%8M9O#F&X1o~grSb4dH9^Vd za6(Oxg+$hT9>G5WQArueRBCA}1wLGz-HQD8j*>M4&p$JzGPuH1hKfRK?C*RCQyI56YAtznk7+q`WWp;fbGr(+AT-pCr5} zPba~MVcrI$2G;5>+DWr*+~%?ZZy8t2BdVRKM9%j$ox~Ak)IELqG5dn^cNx<`Kp9{3 zsXfC;$H$g5@c^8v3|PJv{@5X@Y-{TB_Yp%@3}4`8!5AdTZWD&TSIGr`9ze@SMb7Ti zwUQ2ewVYuhm%e)J{OPCBZ9|bxzKAke$bf(H0SFv!X5Qec zB|5%hocx*P%MeA78Y+dFaz3wdQ`Wi)_4l4A*?Y zb;`m0-q_o3d2L^>`fNieaTX_Uuf6@P;28FB_IXa0@m=W4S?~n2F#j${h`&Xpjl{n4 zEAJNg@~bZbpIfhoj?lak-nVKSfcNbU%F@lDbd<4?CJ_P{GG63gv|fM90Hwj?HM}fz z>YOQ2G?>|DWnkx+r<4O>YSv+mpX`6fK~>QhjfV3ZlEoW38m52XJSW&ykW+E|>JXY1 z8`d4nxqRFFH}w7W?5i`sHFMhYXT!TL|K9jgWyNHTG;(RhR>!oE3Ny5(WAdRC(-@Ck zDwu&?r#ul_ivy#O^3sK=yKb8M5BWh!)HrUMyU5BgT3eRL|88j}XNs_qs<%Ca*aj}J zNJNM)%ey{KJ)|#7=Q#ozk#1-QFp?yUy@FtRb3gNh-(}p(4GtNa^^VbfH8GN4Y%_ld zrzacaS55iI--_VS?@N=yw|*Hf6Zw!EN0?O`nBmQn)7STr^DagT`aA)hk?rI^tXghd zgl+nPMWo-h2`ppIl?YeKgtE`qZOjB~WQEgZ!r0XGP>d=7ZEerrEq&s!Y;Cy5oW(jH zu`xp0{8?qY5Qd!nCc`-Gd0LS<>2le=ZRuO#RB=58+8BsH7H*TGKvsW;=lT$1d4A%| zF>&1k?;aex#qv&xN;@-CJ`EePXd6L!woYpGup3;S{^X}J!ZW@8zkj($6uU@@)afw} z34Kif12g&66S7%yU*+FDL*2`9X-Ypz3gMEHH=S&-P{G;$|CB|0UnRmvCo_U_c z!WJtI?mvR0_P^Xty-O+su=2iW)BSaPc}D;&=x&UP8Gss60-hhzou{K)mzhuuK;?tJ zA$@|4U~7^)5euK=wfqD>8()$t|73Z_)BajB=hsW8IP5=$P`7I(j|lD3Rn0fv9(^O~ zYFcVA`HRJD3{MBqMS}2U)#p@G@zwJV*azftZHT&_)xl&!G|xDk?aw->vhGKpnRESa zMG;me(3}|-6_w}j8Oo{e^P+ty=LNV^$@Q*)9_=N2vk7DwW6Yp{7`+5zQgCJ;a zB}Cm^Wro*7#bKD$tweyr;4sRC4BuIOohoBW3FGf>y+apFihCy*Y#1&w%A^o(crGJZ z!eM~q=y_5DM$K6=^W+}FykoSjLM<5Vuu$?xbv;(xE~&(PET#}TW)HS-AI`>Sn}?*f3w zg4Flrvwe=c1h%BluGDa&Jc-=vr>z;BI~qUoh{oV)Xz^K#$mu+x*2dh}aRePRLsPN#p}IeeB{`XKnzZMh|_4jLMfO^CcZuMZ2NM1~7)ws4-dSOiWj-j9qF`;G@genlp0Zl|lh5N!ZAR3svrp z+gGhVj~VNlcqMuq1>jPV=5DKw}WWt zZU;L`7L$DjI^vnyqKrq&-)DVz^OsUM^aTZyE5*d4CZfIDh4allp~Dzv?6{50X9L6B0+M!X$a(Qr=a_-SHF~~@gVF&be1gQ*J1jw?=F0Wq?m8Yyi9iI{!eXm(^!wio z%mRox7|+y#&@;^tavf4HoR@Z$^S1tna$Q3n%fkD@K;dZN>()M*CGcf)+`e7f{C+U0 zhpzEXX5ME1fl{fRS_TB|x%bO?t+Af~W%|A@?dR&j^`2Z2Ld`*W0s&5Zsz_IhM}3DO zFuK>Z_ZLaG?XjMf@1|8p7tK-JGNx{LRtP3)S7s`#%S%xZ^6U-H4dAf*X` z>>0bhhOy0`_it7=g0wk<?aG{LQ&JVEq6_VDF-X&z+=+Ek zvnWRu%JFCsx*(ElTR$0T1vg9%T4Tm;(C0=atr^pOfYzQ6%)Diqd`aF}w!s6^f4n7s zX|YjajW-UPM8=+k?7J^*EbHXyNmVfPv@sDh8(E_OK_$E!CSB*rIn^qxf=AxZJ>n>ASMPsHJ$)?6vX|4in~?ZEBvd~rX9+_47MBkg&a(fVF03{_2e^*Dbr450(99?nd6fvB_FNy~e?|&qh z=vR7;_FskbD1W8uIr6;>i;ee+3XEOZmVRMa$Tlu73QE}b5+ru*6|3*NuY>fF*h^nR zIOTe;tcU7uFdRtupPz#I=c!6MlpFP>xmpIJ5;w|sZi*#9Ds!}LiX(!k6+7W?oO}F@ zq1?jmzXVQ*p0Ke{sExD|-?QHREq$v^mJ@Ysm$G}Xx7@Nh&&D?+;Ex~gh^A#oj>|+l zO64=gKc`e~8P9AZoAfi4vaz&lNg7>yPTXIoTvqpWseoJN(qn`#^#Pv>u|)*Wl6eC2 zWuxI!bCrLZSFE`urlu^}Az>v=)=}SVb=51$a&55uFEQL9km0Eu(`ifLfJErWu)nRq z{?w$~!IOq6_oi84&AI{!)>HnI81D}cCyI%4keWsqPKAEe!zt3b$4WgE7_S(oGot_Mo z-*j zx>6AM)`l5)?bLjva!byYbhajJqJG3@d+$+`9@*%@uFYl%t8PRu@XG*lP`zRizgvI( z6Wly{uesVej^A`@L770@L;hiio~cq*q1{Ud3ibs%fYoRg<^kU>r^?>7GCQv0NLGhW z-@8L%9fFRclDR>R_Z%d-f`)r_9xqtNJ^U6umn3(st8q7<)>g_DX7ir1yx&M%v zl5#7!qQ|)Mb3O~4(+b74DPnr*TFMf#HMkR*t76)5=lM+Y6KJhRR<(=Sh-k-sF4LJs zf6ulR$ivi`=2j!&y8+cMruc+v(9IWT6Yl+f>0e^zF01Yj1qPhFa7OHYj^ixcm?U}B zkK)W5l*(0DU9{_dHcaeouOGBuw%cS>YAld^6OIK1U4NFpN1v8#b}_2;BLGl?}B-5|!8QzoNK# z1S4rQI|(AaY7vyBXlMP;*&Lm*r?|{vL=d_}aZH)O;`^l{H#cOA!cONn(kz}vY#mfr z{^-g*h&Ff_EgtxX0}399ckj;wXQI!>9umOKUk8mUR+j~6Vh*>bQsHry!d&TW1sK>t zSihf9+`oUS@;p5HlTPHqQU}M_);;>TPKwpw%;&~Gd1Cr2tL+{Ci>Q=;vTOUNiQ>=l z=>PQX@po%2n_p|Ko~-rJRmK?+8nVhVvUtJmH=jfn-Fn|dwU7X>V$~^h`r5n@hy;jX zp>xRHS>GxJ=4n=ptX-5LcW3e(Uq(%yRX8KFJ~E=K6+0`c*&5qeAge`0HnYo7t8h_Y z)}I(72ijX~2-W$;=O({8X2{GRO~~yVJI4&=c>N-mRi*ZN`PjeJY(33mrQo0(B0eNh zTV}n;A)h9s;Cfj82O?(SI?e;t3j{e{z&9RZCtFt#OkvAvi?8wC${ze`T3t>oBu;9% zZih%W>lFJb1F})Z8Qi3;U%WZ)5m9I$t{Q~oUci^x?6n&B7cW*U{>v4k7Y#Zk#D&`{LcHoh(m^C z1?Jz21$TeJquvnWzx4~h@`?@@X~?-$GPM83#Vg%csB5h`ojT`xe5ASuc-I)4%u%%L zr=+1ZliS|YyO0$#BJIG6^4V*Y!DutpZ`O-9dj8|y@C?pQ2CJ2YehbU6C*D3RE)43^G*~P;M^o8PKCKE zE`{EDi_zRhMAJMYddFHjo>pllw&@$k!#ujY&Y-%6Tc&)FcecuOwoxS(BoaKKJ_h48 z_7LV|T2~md)_F%DeQ&9ev>}HhEFrhQqW}-kl}J^=Y;yGtfkwe#CUNQlVo56(z@nJ> zg(50Tq_ygnRSrNtreY`4`1KRDYa;iiIl_XrXvcG=o9^qg_f8x4UN>bgzyZ>Out^lX z)o3QwIzTf<=5|*N(1%21ScqU^)jsw$hGfvDnJEz>o`xYSnr8`=r{)HjL6 zV;kTz7PoVvmEt;bX_9D7jiTswo27Qji;md<*-tcaJ|5&aTfrv}GA^4}3DH4AM?%&L z(<3vWpd^QzsdNB+DmsZ`SLHl1^KPHVl#fk;vgXE`%n6EFZBZ$eV_6KcR<{x_xzMWk zc$1>Ik&Mx5k7H(scOPwZ8%*n1+a7VNEh>UUJiB(yujAVh%Lq;SKQLp+U3A7bp_Vl# z!x$!j9HjZEU|Tftl*f_N>|7RFHRfa4;{O`8C02+*ZJ-8^{%8l*zf_j&0DsHNb7!w1 z{Sl3mq7Ew>sD>k5bfkF zcT7YN+I$qi6+l3>Kz&mTrm{oAKhe4qJxfkO0Ucv_$J1dWx~x>T&i;fCKGaTTi2s1G zdvZLz_Is3fJ)t@VV{>SudPcs&c9E}G`0bg#F6)6eEcuSuSVOc5%Jf$wRzs?=;hw{^ zDv>ESpJH+rSl29;W|`>Sd)3CTW-Ua9tp%So%n}ImKGWYGcWU;B23@_+S>J5!G4kLcAYM=ct_w4YiofmMV zj7s>Mx$F@)^aGj5UtW@6zZYt=$?Y6LRIFr{?}cLxfWN~G$Nm~r-R%|cZ+f60SOk82 z??%j_@~$3IcXyiR&kzZ`)L`K^?=L3S4V8M(;!GQufX!<>n?E z2!#I{g^V75Q_$j4Eror!AqR@>i^2taH4@w4hs{Vyf+GhBK zo`ijn1rmP({B3xWYqRBpt?C041 zSfZjHS)kBAGB^ajit)-YSQU?w%Eq_vdR0E$y3wvVD2Ob6Qa!5|;`p-oXtN+#o1%EP z->9y&t53#A7`ut-kT`mT2ZS3$3xEN;th`(N@4nnWxmMl|(F?Fsf zHrpAmAOK)lUf8y`_E4X;akJ4S_juZ-bdG{qUdXb==EXpy2<=h$ z;T0)7HcoL(`o7tqE9sCRIo zBMS#Tpc5#TjH@ino?hUh!wD}aVtR0A15?*YhO+kIj@{+r8pdd|zHXMj^j2D>cSNKC z9)uOK6yiNA!xk0|Qv&(yEXUX>r|m9`>f+#~-zYn*SIkdfTHH4XMoqw|XJ6+ zMzNn@kz$id2vr68##Ii}-KSr>ijFzbamlFm{JX!Ob9G!37SCmeh0MAIV3b84BTOVM z(3NR~j*fhea}=U+Y#ih;*oX{Q{j#@Fk`2F=O2**}yFs^G!45=FQBATB0YrBnQ-NXR z(}GF?ON|gt?fYTxd>}$a9~BMsm#7+zvzXF7yAI=Tgd1pN+c3SZ=13On&}#~0giH(& z1!@XSxMgMcW%3VXBv?%^;6?*Mx=bH7|gmD{0 zfBtec34;1&uMH9bEQku1{AvkJ~>mz@uP?MO%V2bZyagtU3k!CKCzo z-}Fv1+s{&(JJ9VIVo=eINiPuLTMgj{=wq3e@jagfM(fbH-aVpV*A~T%CN<^$=KJL` z+7}UKMQei)FBK+q2A=mmo}Ge^Lih<-pprSB`o__~=}Xh&)%D`SgaLeKV=uwdO{<~G zxLdcrls4JAGdbL*=h1e171wer-=kybnDX7!;<-P5A~t~`)d39-d!JQAa*Eh>GlaK$&dSQ8Xm&9qE*Yvs-)c!k zQD27|8=Hy`@+ux~zPvNTb zR&47ByA3+tv+sS;aP*LgZ~X}<|9k^$fS|MQGI~hqZA#(3;zNg$_PcQli%Oq>^3INi z&E#P!K6U#_LpuU(Me`F-a*H18!%qOkt%20qT;H_h#^ytL19Zso#v4#vIAc{~Prfp4 z&#`P*C4rHn;#&qE{k}T|&QG*7={8(CrjL1{KLIa%p%mA1Un1f1 z`g9d!0{S7n#CmezPfETxs3(5M35UI7t$WEi?4yGIDUUaH5)CfJCXvG|w*0W zebf6~*$VAY&2-M^@9N`W*Lhr}lz-ec-J{Y|vVaIKBs!vyvj{E^;_JkyP3a{;@ulIQ zSt|zK%s47%3t>Ky9tmuqA3>sJ+{A6;x1NUTOH`YrdbC?cMn<1_4F+>ZG~(R371$3h z9pfzYQCijULpW}>PQRf}xxZ_4Bl9QVq0K@#qLKCC!lo4U`@_N->CJmeHX*g=p|}&m z@BCnw=I1qULmO&k{C{`Z>;Qhd9}Tod5!?>?B%0N#+3im5nG^b!bwL#Ozls0rXFv&* zgc1xhdF{l)&|`@yqiQ5$DzMb=p4Ri@ytL0(xi)VbO?{{>pS_{i&{1ml-|YWBlmEjP zqOPMLRr=ZlP8SX)ATxjCW$9r-z`%FSfY-blT@F9kw-SyY8tiDG-$C)EAEi?P;64)> zbNQ3ow6Gmy4>kc5hWw%7yVKJHCXj$xc0IO?hp9#&2g0+0XRPA!O=4$*efxHLf;U2r zH_^-N0=Y2sGW}nd;P>d1-)T+@67P~V%s8wu(%0<9@%KWdI2s|xiT~Y;WRwF^z}49G z24f1V#!CW)*B$b#8d*`%0pS{1A{>jhLpj&@(UdF^uJ48Di-7IWY3jENxIcgZ`s5PL zB^v4qsH3X~rc!@6XSnl@kVChS!y)rInuqKEj!FtxNFw48R4Am5%dN+4^8M-Uk8fJs v=l^txSaP4w9DlacsBwSO=s$B=b>t=cDIt=i_JTWsBnH=f~~n=Im{4>0!(5?q&b=SezUMg%(9cUPj+P z=P1`O$rzEf{vHW?N8tczTXb>L&|Q(&fhdfUt7I@4TC-h#win-tVX07MhmG1)w5zl0 zWvP_@O)6*PsnBO92v9NLOKnz?QzWVf_TvU5635(y1u*c_QJ0&&8n!33N6_wH)!`jIpk40vwn`sgL=kMlEO zpmIgw5DXhES((KPTu2#RN5tu%*taeC$I6$Qxr%RC=I=2PXvymlbr+=9f^`NmjF>W7 zAMA)UbL1z@jRUnTd1#Y&#=3jezer(Wu~L89)8qBW@(Qv%errKYCx9~tw>U-z5n)G< z6E|3|_dLx#DGDy3%SrsjWbKPO4ZWp7oyxy?nRdpr^>D3)WiKg1CK>&ADsK3eu_vnw zMfyciajLB{uP2f1!Sp3wL`Oju;dOW_4gD_#`O4_JBhc^uN3j!XVQxJVc&DY`cbv1nE|Q_J?N_g#wmW#d?6a?9b%N; zar)fbSVWvWwIlrM9!BJuoNulw{w4KoIKstm-f23g?`PfZvOA(j`Yqd!~Yt$3Ozum+arIyr5rjh9zSZEfw{ zd?Z{fydX&P@!yP=3M0{%vKV9lX=KmF2CXcXzCo!zJ6*Vig#`*7Xu>X4pIw0v)L_K* zYY&2wVQaZ8m|d1Uw!^ED8v1E()o)z^0UaD16b`wC#1UoD^iT?vflyJ zohQYPCjT!a?BVW;9|CFEKGPCE1M&0oo3wgG0ApLi z_?;)I%8W_6zgz?>-c;cd5CHQnbJVTSCzMTsBRx%psBJQAUkeK>lSV2p=6wJwSXx+w zy{U?@B)#rT+!AJum1Rzi5fi;V$$uNISo{J$60eAkfRd4tN*m1Ky>GEKynbBb5+(Q{ zE*Xk^a8s2G_{&&dU*CS(`@Cev@zjEAc18 zjDh)TW9z0iZ=|N%T6;_pt3LZHd@9lY{(jJ&Pxb+#2Czf;mT>#OUux3EySfG!c^;q5 z?LjhA6R@!zzUnt|fR`W|3|MFo*M~!H@V6o zE(Eh`tTEBYe0L9Oo^tr@rG7_A-JZ2co*=NBR(lC#VJ59a&r@Ht1$sI;#ah)ympRTl zHNF$YI29*g+lka(>-mJK{YpkM-}tKSy=j4?Xe=S%#h+Cs|0=a%2Y&}C-22aMJIw4) z);iQRHH!{hYqWt~QBj_^4`gTLWUjTp|Cg^q?sXw|zU1-1ZX1ZiAM)hIEv}&z5*2NO zy}w4h;P709@uQ=o=Lma#bZW$DC|wWin0(IDPH6s8@J)T@NN;S+J0cz#it_n)!Y>a2y5mTFj!eRJ~5FzVfWK*AJKy{lt`19 zcy+k=k_HZk`%xj!<2p4pX68fI-iNud7L}(qyDkqlxh}skFtkTjKil!jk}dpk7_i)A zeV|{G=?*EXux~|O3ox}=T2)^F+xVPqDoo}}B_adBaSBe2Lj`0*9h+2}?Sr6ftm6zALQWzMJ(a<34;gWq24X}dD z3X0 zCYyg5ulQ+XL}hSj$kQ~_sd0387;Dby)mYsy^4uXi($)|pBTb+D&a<-Be7DZuwB0|M z7R$1TF37gedy@+`t$@_qPJZxq_F&;R;wB>tCQBrzN`BG z%=Vi6@$m3;IEhDgyh?Q06crUosi;b|SuG6bE6JOvfdn)j`0;9#n`WL7IAjP5D%lf` zHVQ1F&@i82Mr3al5A${^8W_mJpOr_vtszo z!!VZmY}j!_kn^((zEn8ME4gdo*n~2yoDqt&wfomz7D>tLkC;_Wi)sb4rweY`phCuU zRjVNG4}>KKBkPlq;%NjTQjVV9og;4vaL*fi7hz3m&)jZ#J?I zTf9DQMo&JMBuWzJk;eMqxF(q=>h)HUU9H@@j~92ap6-K#63oBI)hS)~7w$%1K>ktW zWCH3-F7WnC(Kh;mKC(^KS8}Wnm{~uMR;vH?Yp?a2;%gC(o0dM_&k=pQ!nd=O`6bL) zeMylA{InoT!VQ(4pmO;6a7?j{d+6Zgk>MXN@__n>wY=rrtvmbj07rLN)$tYVZGdMK zS!Q))N+zkCm3n&sva$UyyjJU;V&&6&lm;=nWaZNyOd5}kZd4$o;D?8tLkS@-=sM^j&TbW(fhLp@CiR-vtqYL#xoTj=btgAX*ixa8M6qo~FK*`Eb}oD+fa_@+ zN;E=`UHUaUky1pKG*bNjv=61_c%MNUw{N#-+i8Xc{Cqzy%fieFM8ExfK*2hqGR()+ z2b68;qvN%6K~uleTC=76s?VX>7=iau`?A!AjcLv)-9<1ijgJ~PcRhioC_oTasdE8R zt&-tRwm>LcY|uebvsp9X#;oOy(8BHPHtF zoem~}LuF>L4vE*$nBJ}h#?QJ3KSMyzN@6)?FFk3o(FqW*)L2DQNBT1w13NSe_Jes( zuQ%rjp_~fLse^fbn+I>?Mt-Ppnsp4@)&Flu*4U3}DT>9@3L|B7Y*?=}goF>W z#XD9}432bp^h>$0$l9jug4z;tIu!nNGlbir$$^IPRWBoAD`J-?N19xVTevY%c>Yb5 zCXfU!G>zS4Y|PGb;WJAlxYJnd&l)o(N_|2Gj4yLr)_+hgJB69b{)zjd88o#t)7-N2 z%>PZg3-hzQIG!F)mldN|3=Z&tJ)b}W)TpJ7d6|>`Yx?kLF;1L`qY|6FphMn`w@egF zryfVdyK!#g3aXccl^E5;ohQFdZo%iZJilAHWkLR&x^;1}GIyrm8BJM;V&0iWgBmlo zv^`8Ky)}#~a5V;Y_Wwa9d+eu2Hhz24=Xa}TvyMRK_`4YYnc@Upp-y>_mng+8%ryyU zOyij>502woIJ3u?7$+FwF+E?PB(7O5^wZR20#(xAxJ)9iVpV($Wd2=O8a%@aCd%TJ zsrul3qleLT&DmMh#MTm(Qd}3BCjR=hOsk03nXk?s8#tiXnUYk;Rl;HlB48* zws~zTs-~|(bSuDl$ijob!waCq4S6b;&+N8!7IOD`wT@4G6;feNEt%O9R+cQ=?k?Yn z5SKs+CEOInmz~bgtt*=Ge~9|BbytflQ(<=!C%G%sD){zAHf}6@S8GJf@7M-F65UqY zjvGHW+DH|NC$O)5mNs|inqMsEDS2n(=3MK(r3&(5H$YhT^YUI%F}HXMP`8BAf3PGC zA1v}9bvoLbg3Lo$I8?P*;1d%%+?gt+x>$6ZC?9}Qw!6y;;AaXUHvsDbVD!mU>QqyQ z72M|1onZ^r@lu@r;gXqFX;I0ZjjJ8VYN+=N?E_!kE9Cu<%@yS#7QMa*v_oYM`?{l# z!E@0oZ3P>CzJ%_!&wP0Z9<62ZG>!7EdHU@BxaF>QyT^gsbsF#g0<8hlR7OBp4?0}F zRHYJJ{))gz3fGg00s*zIydX}HaCc3^mB?*(M!=xWS~ZxW$Zc)a640{eYp@JQxcn}i zen(tN$BhVielFe9cwBTZ)-UybsQd$lnP%2 zYfNdqH^(4r_s%fwk-Q7yn z7VT#5b8-mR=ie_%++jLx$F=?)e@8h?+Gg!b#ecZ3OLjh9)YNR!IV&M^IA6t|&4@t=dCk2+qzrj!#={;`RJ}Rr%vdYCPNt2c_G=l3J4a z=DhGg$k;1MN=a3Z`k4Ko;G_Ode?q&+W(!h9N@YSB9UpbaB+aJ9#;gghC887+h%`Ay zPvdi3$w%Dzp>nIP3nNxT*0Fc(C28+66a>GPF1lQHh!FJ`O`26lmC*2I;YQ(8#lH8O z9Fh6Po1x}uA*?W;7Hq2&%aL_d!eeG#6H4ukG=mrntppJg3rlZ&8wdCEejjQJ} zeVXrMa6Lfqd3!aiU@m=<6cQah=d9E|mJLG>V`kLl4lQ_)RJgM^YujWkJfS00R*=Dv z75+>Q1zm;9W7x=~8B%ATtCsvN%p7d_@eT#Bd!t+WQR;H#c*@ZWTvI>2rA&15E z$I@CDZ9e{VprSs)nW`9;1uWE(1pM;%lMm{{$Tor~mI14-y3;ErF zsF0mXLklT1nbx>(RcUZZSgL3y6@LWFV3w+~VSgtVdPKgrJL!6=2{~wg`7O3dWFq?o zzz!@Ii1pPqtledh8yPy3iYh_WZI7&Z1`!dhYFHFuzZ-hLaG#1#K~YOcDF1E*;8@lx z2K3;gxHhu>xL6P*jHK)9>+=)~UN4_>8hGvSKnZ?f)%Z>6^rK*uhbajiDDULOY$a=g z9G3d~6ZlB`=}*%uv5bLXsk@mL7btt$;rd91Ex@srnU@v+UEnl#7;k&Ewn${DmyOj> z!lRzAA88VYz)@?feyq2y)tRYMQ&N8Z#7u9k9IpA$QUCqzDIYF6l}Z7b1Db4K|BQe> z+|uHS!9gkE<(G;UYW_@?7SEXeGy8c)|7W37U8#B%`rxoL&53<{9=j{VIsneeS zRu@M)kOdkFmZX;w2;-(-Ih2dPGJ`aIR#ZlEUHhR}5#2)S4Xc=ECGusuSPC1MmfyMe z?h*<{3#yLibm|IDr~ttNj${Mlfa3=4mU^ZxRmmbh*czsd+2Z!m=DO8y0D?mOVn1X; zxZTgUDHkl5tJ#b^ZxvxrR}IcjOEIceY&41=b6Gy#v={^UxdkaklPfUOO>ve;OYQ16 zTlVF;u9e^CoFL?QDC+zlvXp5*+!#?QO)KN!H9y6L-#De))i1uGii*8f6a)Laxw+w0 zY1lkzu6@@wfMRpJlJ$xAoq_!H`XK$YUd%g2K3mq_G+E+NbH-R6y(=|B5!EjjD!>00 z*-oX11AS&l|0Aox`5&#v3`>~+oKGSqD2yo}?CMZk- zAM&g$dm7d**VyyY2@8?vdNM}E;b;)R4CK(TwcOX+^V;(t<_L5|UtI;A8R8pYret?N z?;-d-YttRYe>5$&oI<%;7}9yD0x+r*L4*nEr9~e+@tQY zzgJVF>_yr@ifKXO&3X!YORj28ADdyAxaB7y#?A>Q$_niMKJu1+X1rerg;DRFM5Pxw zYuw3(e?*z)i$Y`7nE^<}NECwyW$DeWDew=Q+j8%^>vMaxzff!KX;nX``H$eo&D7jT zcA4ugTAi7Ho}c-=$b+cNUZd%96BPwG!`Oq$FoAPS7OQq&0YRWEzDfbIOdN3b_03Q% zm1R$N8d7m2my?rQX%FD{1%LkhS(`nbl$zSk&MuM{myCiU?%ZO(ZBmY4l{pbgAP$dv zm;)ZJ)w)AHrp!YQ#MlL4m#du`u_0_5Hr=YD4>xkt?*Dvs|K-%U8L_IXY8HGpARu*s zzy?r&JiSbXK7BrXXfV6$W{hS<%6b^xH^lu^=>5$nH{a@U6D3Hcg%Fk>wfs1oFyY9y z(+Ch)zNA#mvEKye(PBA3$XR2Fo@)bS*{XJUTs;UAtT6)^0rMhI;*wp#KWWo(Yr4ah zFl?1};Q?zuoj=-$EQ?GY;-s#j0myI}w#mwj!!~^5I2=CX00D%& z{}GBZ<7r5~T|Iza46Tl0iQ=S`&l+mA4H|!@_SH(THgmuH6Fb~%mC^nQ)BQxtY=Zt` z!rJvNxVS2nXHNEUm4aLAMD;Y+S~7D?aA=TESYq*Ckt=r57VFPE!jG-3B3`YO7lBl! z2!MMFC)8W@re$T3{rU5!CGdiMYisKat(g`}*kc35z3V~HxXHy5z+@(!!DMVqohST~ zlAVQY6jHal44h+D4i2oEa(j1@z(X3=Au6P>xI5qs4lAJx9z{4%TdH z6}hb=XMsElc&$6@NW%`F-vvDooizvmD*}2M<&b4yV0K|49U%1O==~q!y?O{JdHkR& z`V&wDJ5Rh17hd=BGJc&%(JTkVkTRocuiYs+q8QLcVD8!3neRK7`c}p+WFW&Rk_voX~w1~6<8<9 zSc=`lRA{nimF$l<520&8o$lALpf#A-0KDs|*v4JyeqI5DHLi6%I zM!sraa)+Y2-MfUb-lQq0hnvY@YiPv%d6x!h3}bI#O{7Ebje0#OCu>-<2Q@RQ|=Z+Wv!e zM?lRRW5wbG-k0I&hT#WEA%9LTLSoWYpDj0SwN&qt!_RO3obO*9rcW9sBs%W4`xf>$ zeO9C^KCM4ECyku!Z=dM8-V_8Yd|%3%nH#b74m&x1!VpXOSfkpKy*4TtJp)lV4YbN| zT`mJ^s@eDGN`^PVut9gQ9SrwBvIo0lgVaYsABZ)8bKGltJPwAvU z4{6SJYuoCb+x68g42Bc>Z;x_aT%0oMSz;N^m&l$(c&7CC&=}l*K7Sh+vi|AYbtHAL zjdV6x3?`cdKT&ax#WthH>Qb$?2c!Zxkvp^RDg;pFS<_VkJr9ual9iLxnE_$2%TpAxi-$L$qEdX(ox6({bHrJ-%w5Sd4VKxxA_%Q%2o@hJ^R;$8%Ue2#_4-DRE3Q72o!??_mG+ zfJyvU5Dla|H{$wEshJzQKe1EnSsQfZu^8ekUfxP5>qz{o+r`@8;!=S<-Dv)aGLkNn z+f-Hu?0d4NLE7YPt(dPVZ#ylJYs-h*YI1cdJJQ@#Nvaq$Cr=1EYE82}hec_4g|@_3}=4MN+0-uzTQM7*wHhsiDyzP|ery%#^t9NTVdhzc zLPcHK?=su&rV@Xmzo%%rT@?*c9=T*s6w@W6vK*jK?b? zltaIP(&NYFZ{SLxhbiIVXP%LK0taw`nKvNV_foK#4oY{NIJmOa?Fzm`${b}$}t0$+T{e%shWLmO~Cg1v*y+i!NUO zcBjSc5JVp6{x#dDJIMpv+!{OuIyL9#GnoNvIV@+}Z`+PLD10R(B&aY+vdb0`-)eJu z`ku>@e}J!>eLt;MQd*8fm|(7Z|A6NYsxhiaoteC)&)fOTQPmjlp8W&Yz(52r@co_U zBD7#%xNv*8K>m3!TDi(mui+?cW3v>y^YRe8|J1tKRCp}_mT5|Y6R{w*LWUP0=6h7c zx0+(|9da{YT!eX-Ftz>Z@D^A6tnj6kTo5+RBwSLnn z(}=2mk(`Cna0cc8DH;+QMThT81I#iq^0rV~wBQb&ZtTT-))I=7J|qr;oUZ2PK;WPFLiby?SDM)p)zUHV@`Jd=NKr1o4x}hsz?{kiVvFfJW|5So;Tui^MbYbf;~E{AzH_vO?k%NJ z?!3U;05kSoWitAmU%93hwcq)v1kOxkteJ=l}9ZE zSuB2*%?%7_-7DC~Fx(pe{;Tw0r;hv~x7_C4xFvSKXkiiSJ;8&HBHlu_wh9@}lTT1& zxMRNyK}KEcWS!y?NA`Y4#&v7$5~GWksJN__4MdxDG9+AMy*Va+q+bE7Mlx> zxP3Ft)BTC9SC*U=KbB`bONcoQYH1h#lQp|O}c%e0m&`?QkG4DUP<(j#%GfZghXM+^&cgH_HS%F4h zj+kFU^Lpr4-HO?PU+xGPAao)Rb-@vK^&@*T%U-$fv2D7tcPsD&WJMc43 zC#v<@my=>Ar}M_{16LXv8fS6l_w?LxKa8B#AZ&Tm7S^Yi1feYf3l~|7NuN=R9V4_= zAIHSF4dqVV^Lel1v?TZx#X>gJf{tBNrLrB7gkqAFf8WFXO}>Q(BcmsM&yJf-}nibcQ4K@Yc?Cx_d5Wi)gM>tf0 zzCP3WDd^2zllnErv?hGwmPp9?XP~F~;zi)G@fph4R;P|v10%$U_X_AJ?_6nhD)|@4 zx{4nMu_*ap%)PZ%`^AtbUT5{@Bj!6YfTUQvxP0fBvefhWJ6)`_r6GM{yEFu62yC`& z-`cp1m)+J~0s5;ce=*-ltspcL+BFD%B=1gaK@-oUPnY~zyPq(v;UMQym4iFrvmX={ M1x Date: Mon, 29 Aug 2022 11:15:06 +0200 Subject: [PATCH 068/421] There was an error in the docs around continual learning and thread count (#7314) * Error in the docs --- docs/freqai.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/freqai.md b/docs/freqai.md index eb76850b6..140e8acf9 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -129,8 +129,8 @@ Mandatory parameters are marked as **Required**, which means that they are requi | `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. | `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. | `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. -| `continual_learning` | Number of threads to dedicate to the Reinforcement Learning training process.
**Datatype:** int. -| `thread_count` | If true, the agent will start new trainings from the model selected during the previous training. If false, a new agent is trained from scratch for each training.
**Datatype:** Bool. +| `continual_learning` | If true, the agent will start new trainings from the model selected during the previous training. If false, a new agent is trained from scratch for each training.
**Datatype:** Bool. +| `thread_count` | Number of threads to dedicate to the Reinforcement Learning training process.
**Datatype:** int. | `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | | **Extraneous parameters** | `keras` | If your model makes use of keras (typical of Tensorflow based prediction models), activate this flag so that the model save/loading follows keras standards. Default value `false`
**Datatype:** boolean. From 2493e0c8a53447c552601a645f18ea0f5172b5b7 Mon Sep 17 00:00:00 2001 From: Richard Jozsa <38407205+richardjozsa@users.noreply.github.com> Date: Wed, 31 Aug 2022 16:37:02 +0200 Subject: [PATCH 069/421] Unnecessary lines in Base4, and changes for box space, to fit better for our needs (#7324) --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 4 ---- freqtrade/freqai/RL/BaseEnvironment.py | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index ef5b1c107..d2b92a954 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -66,10 +66,6 @@ class Base4ActionRLEnv(BaseEnvironment): self._position = Positions.Neutral trade_type = "neutral" self._last_trade_tick = None - elif action == Actions.Exit.value: - self._position = Positions.Neutral - trade_type = "neutral" - self._last_trade_tick = None else: print("case not defined") diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index bba3c4a1b..bb43f5300 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -57,7 +57,7 @@ class BaseEnvironment(gym.Env): self.shape = (window_size, self.signal_features.shape[1] + 3) self.set_action_space() self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + low=-1, high=1, shape=self.shape, dtype=np.float32) # episode self._start_tick: int = self.window_size From 240b52953355602f096230c7063916905745edce Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 31 Aug 2022 16:50:39 +0200 Subject: [PATCH 070/421] fix tensorboard path so that users can track all historical models --- config_examples/config_freqai-rl.example.json | 4 ++-- freqtrade/freqai/prediction_models/ReinforcementLearner.py | 4 ++-- .../prediction_models/ReinforcementLearner_multiproc.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index dc7c62e4a..0ba71cdca 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -72,8 +72,8 @@ "5m", "30m" ], - "indicator_max_period_candles": 10, - "indicator_periods_candles": [5] + "indicator_max_period_candles": 20, + "indicator_periods_candles": [14] }, "data_split_parameters": { "test_size": 0.5, diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 0e156d28e..2e359d924 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -28,14 +28,14 @@ class ReinforcementLearner(BaseReinforcementLearningModel): if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path(dk.data_path / "tensorboard"), + tensorboard_log=Path( + dk.full_path / "tensorboard" / dk.pair.split('/')[0]), **self.freqai_info['model_training_parameters'] ) else: logger.info('Continual training activated - starting training from previously ' 'trained agent.') model = self.dd.model_dictionary[dk.pair] - model.tensorboard_log = Path(dk.data_path / "tensorboard") model.set_env(self.train_env) model.learn( diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 9f6a66729..e74423a98 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -31,14 +31,14 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path(dk.full_path / "tensorboard"), + tensorboard_log=Path( + dk.full_path / "tensorboard" / dk.pair.split('/')[0]), **self.freqai_info['model_training_parameters'] ) else: logger.info('Continual learning activated - starting training from previously ' 'trained agent.') model = self.dd.model_dictionary[dk.pair] - model.tensorboard_log = Path(dk.data_path / "tensorboard") model.set_env(self.train_env) model.learn( From 27dce20b294e2388804992882dece3e33d4a4fa7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 4 Sep 2022 11:21:54 +0200 Subject: [PATCH 071/421] fix bug in Base4ActionRLEnv, improve example strats --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 2 +- ...c.py => ReinforcementLearningExample4ac.py} | 18 +++++++----------- .../ReinforcementLearningExample5ac.py | 2 +- 3 files changed, 9 insertions(+), 13 deletions(-) rename freqtrade/freqai/example_strats/{ReinforcementLearningExample3ac.py => ReinforcementLearningExample4ac.py} (92%) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index d2b92a954..70a625136 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -31,7 +31,7 @@ class Base4ActionRLEnv(BaseEnvironment): if self._current_tick == self._end_tick: self._done = True - self.update_portfolio_log_returns(action) + self._update_unrealized_total_profit() self._update_profit(action) step_reward = self.calculate_reward(action) diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py similarity index 92% rename from freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py rename to freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py index ec0977455..d9932eea7 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py @@ -11,7 +11,7 @@ from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_ logger = logging.getLogger(__name__) -class ReinforcementLearningExample3ac(IStrategy): +class ReinforcementLearningExample4ac(IStrategy): """ Test strategy - used for testing freqAI functionalities. DO not use in production. @@ -106,8 +106,8 @@ class ReinforcementLearningExample3ac(IStrategy): # For RL, this is not a target, it is simply a filler until actions come out # of the model. - # for Base3ActionEnv, 2 is netural (hold) - df["&-action"] = 2 + # for Base4ActionEnv, 0 is netural (hold) + df["&-action"] = 0 return df @@ -119,14 +119,14 @@ class ReinforcementLearningExample3ac(IStrategy): def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] if enter_long_conditions: df.loc[ reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] ] = (1, "long") - enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] if enter_short_conditions: df.loc[ @@ -136,12 +136,8 @@ class ReinforcementLearningExample3ac(IStrategy): return df def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] if exit_long_conditions: - df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 - - exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 1] - if exit_short_conditions: - df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 + df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit"] = 1 return df diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index 15a263b94..2118e1221 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -107,7 +107,7 @@ class ReinforcementLearningExample5ac(IStrategy): # For RL, there are no direct targets to set. This is filler (neutral) # until the agent sends an action. - df["&-action"] = 2 + df["&-action"] = 0 return df From 48140bff91e9d0c5ae20518ed8dd2963b89506d5 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 14 Sep 2022 22:53:53 +0200 Subject: [PATCH 072/421] fix bug in 4ActRLEnv --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 1 - 1 file changed, 1 deletion(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 70a625136..bd5785b85 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -33,7 +33,6 @@ class Base4ActionRLEnv(BaseEnvironment): self._update_unrealized_total_profit() - self._update_profit(action) step_reward = self.calculate_reward(action) self.total_reward += step_reward From 8aac644009dd7a8ab8f006594b547abddad5aca9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 15 Sep 2022 00:46:35 +0200 Subject: [PATCH 073/421] add tests. add guardrails. --- config_examples/config_freqai-rl.example.json | 2 +- .../RL/BaseReinforcementLearningModel.py | 41 +++++++++++++------ freqtrade/freqai/freqai_interface.py | 3 +- .../prediction_models/ReinforcementLearner.py | 12 +++--- .../ReinforcementLearner_multiproc.py | 4 +- tests/freqai/conftest.py | 7 ++-- tests/freqai/test_freqai_datadrawer.py | 2 +- tests/freqai/test_freqai_datakitchen.py | 10 ++--- tests/freqai/test_freqai_interface.py | 40 +++++++++++++++--- 9 files changed, 84 insertions(+), 37 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 0ba71cdca..9dfea932d 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -62,6 +62,7 @@ "train_period_days": 5, "backtest_period_days": 2, "identifier": "unique-id", + "continual_learning": false, "data_kitchen_thread_count": 2, "feature_parameters": { "include_corr_pairlist": [ @@ -91,7 +92,6 @@ "max_trade_duration_candles": 300, "model_type": "PPO", "policy_type": "MlpPolicy", - "continual_learning": false, "max_training_drawdown_pct": 0.5, "model_reward_parameters": { "rr": 1, diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 77db9c655..f822208f8 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -21,7 +21,7 @@ from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions from freqtrade.persistence import Trade - +import pytest logger = logging.getLogger(__name__) @@ -45,7 +45,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_callback: EvalCallback = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] - self.continual_learning = self.rl_config.get('continual_learning', False) + self.continual_learning = self.freqai_info.get('continual_learning', False) if self.model_type in SB3_MODELS: import_str = 'stable_baselines3' elif self.model_type in SB3_CONTRIB_MODELS: @@ -59,14 +59,30 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.model_type]) self.MODELCLASS = getattr(mod, self.model_type) self.policy_type = self.freqai_info['rl_config']['policy_type'] + self.unset_outlier_removal() + + def unset_outlier_removal(self): + """ + If user has activated any function that may remove training points, this + function will set them to false and warn them + """ + if self.ft_params.get('use_SVM_to_remove_outliers', False): + self.ft_params.update({'use_SVM_to_remove_outliers': False}) + logger.warning('User tried to use SVM with RL. Deactivating SVM.') + if self.ft_params.get('use_DBSCAN_to_remove_outliers', False): + self.ft_params.update({'use_SVM_to_remove_outliers': False}) + logger.warning('User tried to use DBSCAN with RL. Deactivating DBSCAN.') + if self.freqai_info['data_split_parameters'].get('shuffle', False): + self.freqai_info['data_split_parameters'].update('shuffle', False) + logger.warning('User tried to shuffle training data. Setting shuffle to False') def train( - self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen + self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs ) -> Any: """ Filter the training data and train a model to it. Train makes heavy use of the datakitchen for storing, saving, loading, and analyzing the data. - :param unfiltered_dataframe: Full dataframe for the current training period + :param unfiltered_df: Full dataframe for the current training period :param metadata: pair metadata from strategy. :returns: :model: Trained model which can be used to inference (self.predict) @@ -75,7 +91,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): logger.info("--------------------Starting training " f"{pair} --------------------") features_filtered, labels_filtered = dk.filter_features( - unfiltered_dataframe, + unfiltered_df, dk.training_features_list, dk.label_list, training_filter=True, @@ -99,7 +115,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test, dk) - model = self.fit_rl(data_dictionary, dk) + model = self.fit(data_dictionary, dk) logger.info(f"--------------------done training {pair}--------------------") @@ -124,7 +140,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): best_model_save_path=str(dk.data_path)) @abstractmethod - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): + def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): """ Agent customizations and abstract Reinforcement Learning customizations go in here. Abstract method, so this function must be overridden by @@ -142,6 +158,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): # FIXME: mypy typing doesnt like that strategy may be "None" (it never will be) # FIXME: get_rate and trade_udration shouldn't work with backtesting, # we need to use candle dates and prices to compute that. + pytest.set_trace() current_value = self.strategy.dp._exchange.get_rate( pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate @@ -162,7 +179,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): return market_side, current_profit, int(trade_duration) def predict( - self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False + self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: """ Filter the prediction features data and predict with it. @@ -173,9 +190,9 @@ class BaseReinforcementLearningModel(IFreqaiModel): data (NaNs) or felt uncertain about data (PCA and DI index) """ - dk.find_features(unfiltered_dataframe) + dk.find_features(unfiltered_df) filtered_dataframe, _ = dk.filter_features( - unfiltered_dataframe, dk.training_features_list, training_filter=False + unfiltered_df, dk.training_features_list, training_filter=False ) filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) dk.data_dictionary["prediction_features"] = filtered_dataframe @@ -305,8 +322,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): # But FreqaiRL needs more objects passed to fit() (like DK) and we dont want to go refactor # all the other existing fit() functions to include dk argument. For now we instantiate and # leave it. - def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: - return + # def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: + # return def make_env(MyRLEnv: BaseEnvironment, env_id: str, rank: int, diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 3b10933dd..7b35cd918 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -553,7 +553,8 @@ class IFreqaiModel(ABC): # find the features indicated by strategy and store in datakitchen dk.find_features(unfiltered_dataframe) - + # import pytest + # pytest.set_trace() model = self.train(unfiltered_dataframe, pair, dk) self.dd.pair_dict[pair]["trained_timestamp"] = new_trained_timerange.stopts diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 2e359d924..2e5c9f97b 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -18,13 +18,13 @@ class ReinforcementLearner(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): + def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): train_df = data_dictionary["train_features"] total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[512, 512, 256]) + net_arch=[128, 128]) if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, @@ -69,8 +69,8 @@ class ReinforcementLearner(BaseReinforcementLearningModel): factor = 100 # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ - and self._position == Positions.Neutral: + if (action in (Actions.Long_enter.value, Actions.Short_enter.value) + and self._position == Positions.Neutral): return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: @@ -85,8 +85,8 @@ class ReinforcementLearner(BaseReinforcementLearningModel): factor *= 0.5 # discourage sitting in position - if self._position in (Positions.Short, Positions.Long) and \ - action == Actions.Neutral.value: + if (self._position in (Positions.Short, Positions.Long) and + action == Actions.Neutral.value): return -1 * trade_duration / max_trade_duration # close long diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index e74423a98..c14511921 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -20,14 +20,14 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): + def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): train_df = data_dictionary["train_features"] total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) + net_arch=[128, 128]) if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, diff --git a/tests/freqai/conftest.py b/tests/freqai/conftest.py index 2c6210a0e..026b45afc 100644 --- a/tests/freqai/conftest.py +++ b/tests/freqai/conftest.py @@ -29,15 +29,16 @@ def freqai_conf(default_conf, tmpdir): "enabled": True, "startup_candles": 10000, "purge_old_models": True, - "train_period_days": 5, + "train_period_days": 2, "backtest_period_days": 2, "live_retrain_hours": 0, "expiration_hours": 1, "identifier": "uniqe-id100", "live_trained_timestamp": 0, + "data_kitchen_thread_count": 2, "feature_parameters": { "include_timeframes": ["5m"], - "include_corr_pairlist": ["ADA/BTC", "DASH/BTC"], + "include_corr_pairlist": ["ADA/BTC"], "label_period_candles": 20, "include_shifted_candles": 1, "DI_threshold": 0.9, @@ -47,7 +48,7 @@ def freqai_conf(default_conf, tmpdir): "stratify_training_data": 0, "indicator_periods_candles": [10], }, - "data_split_parameters": {"test_size": 0.33, "random_state": 1}, + "data_split_parameters": {"test_size": 0.33, "shuffle": False}, "model_training_parameters": {"n_estimators": 100}, }, "config_files": [Path('config_examples', 'config_freqai.example.json')] diff --git a/tests/freqai/test_freqai_datadrawer.py b/tests/freqai/test_freqai_datadrawer.py index a6df60e61..1d1c44a1e 100644 --- a/tests/freqai/test_freqai_datadrawer.py +++ b/tests/freqai/test_freqai_datadrawer.py @@ -90,5 +90,5 @@ def test_use_strategy_to_populate_indicators(mocker, freqai_conf): df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, 'LTC/BTC') - assert len(df.columns) == 45 + assert len(df.columns) == 33 shutil.rmtree(Path(freqai.dk.full_path)) diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index a9e7eac51..74e8cc42f 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -72,7 +72,7 @@ def test_use_DBSCAN_to_remove_outliers(mocker, freqai_conf, caplog): # freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1}) freqai.dk.use_DBSCAN_to_remove_outliers(predict=False) assert log_has_re( - "DBSCAN found eps of 2.36.", + "DBSCAN found eps of 1.75.", caplog, ) @@ -81,7 +81,7 @@ def test_compute_distances(mocker, freqai_conf): freqai = make_data_dictionary(mocker, freqai_conf) freqai_conf['freqai']['feature_parameters'].update({"DI_threshold": 1}) avg_mean_dist = freqai.dk.compute_distances() - assert round(avg_mean_dist, 2) == 2.54 + assert round(avg_mean_dist, 2) == 1.99 def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, caplog): @@ -89,7 +89,7 @@ def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 0.1}) freqai.dk.use_SVM_to_remove_outliers(predict=False) assert log_has_re( - "SVM detected 8.09%", + "SVM detected 7.36%", caplog, ) @@ -128,7 +128,7 @@ def test_normalize_data(mocker, freqai_conf): freqai = make_data_dictionary(mocker, freqai_conf) data_dict = freqai.dk.data_dictionary freqai.dk.normalize_data(data_dict) - assert len(freqai.dk.data) == 56 + assert len(freqai.dk.data) == 32 def test_filter_features(mocker, freqai_conf): @@ -142,7 +142,7 @@ def test_filter_features(mocker, freqai_conf): training_filter=True, ) - assert len(filtered_df.columns) == 26 + assert len(filtered_df.columns) == 14 def test_make_train_test_datasets(mocker, freqai_conf): diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 2a7cfeb73..ac2d5446d 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -21,15 +21,40 @@ def is_arm() -> bool: 'LightGBMRegressor', 'XGBoostRegressor', 'CatboostRegressor', + 'ReinforcementLearner', + 'ReinforcementLearner_multiproc' ]) -def test_extract_data_and_train_model_Regressors(mocker, freqai_conf, model): +def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): if is_arm() and model == 'CatboostRegressor': pytest.skip("CatBoost is not supported on ARM") + model_save_ext = 'joblib' freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"strategy": "freqai_test_strat"}) + if 'ReinforcementLearner' in model: + model_save_ext = 'zip' + freqai_conf.update({"strategy": "freqai_rl_test_strat"}) + freqai_conf["freqai"].update({"model_training_parameters": { + "learning_rate": 0.00025, + "gamma": 0.9, + "verbose": 1 + }}) + freqai_conf["freqai"].update({"model_save_type": 'stable_baselines'}) + freqai_conf["freqai"]["rl_config"] = { + "train_cycles": 1, + "thread_count": 2, + "max_trade_duration_candles": 300, + "model_type": "PPO", + "policy_type": "MlpPolicy", + "max_training_drawdown_pct": 0.5, + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.02, + "win_reward_factor": 2 + }} + strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) @@ -42,16 +67,19 @@ def test_extract_data_and_train_model_Regressors(mocker, freqai_conf, model): freqai.dd.pair_dict = MagicMock() - data_load_timerange = TimeRange.parse_timerange("20180110-20180130") - new_timerange = TimeRange.parse_timerange("20180120-20180130") + data_load_timerange = TimeRange.parse_timerange("20180125-20180130") + new_timerange = TimeRange.parse_timerange("20180127-20180130") freqai.extract_data_and_train_model( new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) - assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file() + assert Path(freqai.dk.data_path / + f"{freqai.dk.model_filename}_model.{model_save_ext}").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file() - assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").is_file() + # if 'ReinforcementLearner' not in model: + # assert Path(freqai.dk.data_path / + # f"{freqai.dk.model_filename}_svm_model.joblib").is_file() shutil.rmtree(Path(freqai.dk.full_path)) @@ -91,7 +119,7 @@ def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model): assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").is_file() - assert len(freqai.dk.data['training_features_list']) == 26 + assert len(freqai.dk.data['training_features_list']) == 14 shutil.rmtree(Path(freqai.dk.full_path)) From 3b97b3d5c8158905e81fddb2ab36306bccf07e70 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 15 Sep 2022 00:56:51 +0200 Subject: [PATCH 074/421] fix mypy error for strategy --- .../RL/BaseReinforcementLearningModel.py | 9 +- freqtrade/freqai/freqai_interface.py | 2 + freqtrade/strategy/interface.py | 1 - tests/strategy/strats/freqai_rl_test_strat.py | 139 ++++++++++++++++++ 4 files changed, 146 insertions(+), 5 deletions(-) create mode 100644 tests/strategy/strats/freqai_rl_test_strat.py diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index f822208f8..a583fc9cd 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -155,12 +155,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): trade_duration = 0 for trade in open_trades: if trade.pair == pair: - # FIXME: mypy typing doesnt like that strategy may be "None" (it never will be) # FIXME: get_rate and trade_udration shouldn't work with backtesting, # we need to use candle dates and prices to compute that. - pytest.set_trace() - current_value = self.strategy.dp._exchange.get_rate( - pair, refresh=False, side="exit", is_short=trade.is_short) + if self.strategy.dp._exchange is None: + logger.error('No exchange available.') + else: + current_value = self.strategy.dp._exchange.get_rate( + pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate now = datetime.now(timezone.utc).timestamp() trade_duration = int((now - trade.open_date.timestamp()) / self.base_tf_seconds) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 7b35cd918..7550f1884 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -92,6 +92,7 @@ class IFreqaiModel(ABC): self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() + self.strategy: IStrategy = None def __getstate__(self): """ @@ -119,6 +120,7 @@ class IFreqaiModel(ABC): self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE) self.dd.set_pair_dict_info(metadata) + self.strategy = strategy if self.live: self.inference_timer('start') diff --git a/freqtrade/strategy/interface.py b/freqtrade/strategy/interface.py index 03ca4af70..9401ebebe 100644 --- a/freqtrade/strategy/interface.py +++ b/freqtrade/strategy/interface.py @@ -160,7 +160,6 @@ class IStrategy(ABC, HyperStrategyMixin): "already on disk." ) download_all_data_for_training(self.dp, self.config) - self.freqai.strategy = self else: # Gracious failures if freqAI is disabled but "start" is called. class DummyClass(): diff --git a/tests/strategy/strats/freqai_rl_test_strat.py b/tests/strategy/strats/freqai_rl_test_strat.py new file mode 100644 index 000000000..7b36dc6be --- /dev/null +++ b/tests/strategy/strats/freqai_rl_test_strat.py @@ -0,0 +1,139 @@ +import logging +from functools import reduce + +import pandas as pd +import talib.abstract as ta +from pandas import DataFrame + +from freqtrade.strategy import IStrategy, merge_informative_pair + + +logger = logging.getLogger(__name__) + + +class freqai_rl_test_strat(IStrategy): + """ + Test strategy - used for testing freqAI functionalities. + DO not use in production. + """ + + minimal_roi = {"0": 0.1, "240": -1} + + plot_config = { + "main_plot": {}, + "subplots": { + "prediction": {"prediction": {"color": "blue"}}, + "target_roi": { + "target_roi": {"color": "brown"}, + }, + "do_predict": { + "do_predict": {"color": "brown"}, + }, + }, + } + + process_only_new_candles = True + stoploss = -0.05 + use_exit_signal = True + startup_candle_count: int = 30 + can_short = False + + def informative_pairs(self): + whitelist_pairs = self.dp.current_whitelist() + corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] + informative_pairs = [] + for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: + for pair in whitelist_pairs: + informative_pairs.append((pair, tf)) + for pair in corr_pairs: + if pair in whitelist_pairs: + continue # avoid duplication + informative_pairs.append((pair, tf)) + return informative_pairs + + def populate_any_indicators( + self, pair, df, tf, informative=None, set_generalized_indicators=False + ): + + coin = pair.split('/')[0] + + if informative is None: + informative = self.dp.get_pair_dataframe(pair, tf) + + # first loop is automatically duplicating indicators for time periods + for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: + + t = int(t) + informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + + # FIXME: add these outside the user strategy? + # The following columns are necessary for RL models. + informative[f"%-{coin}raw_close"] = informative["close"] + informative[f"%-{coin}raw_open"] = informative["open"] + informative[f"%-{coin}raw_high"] = informative["high"] + informative[f"%-{coin}raw_low"] = informative["low"] + + indicators = [col for col in informative if col.startswith("%")] + # This loop duplicates and shifts all indicators to add a sense of recency to data + for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): + if n == 0: + continue + informative_shift = informative[indicators].shift(n) + informative_shift = informative_shift.add_suffix("_shift-" + str(n)) + informative = pd.concat((informative, informative_shift), axis=1) + + df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) + skip_columns = [ + (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] + ] + df = df.drop(columns=skip_columns) + + # Add generalized indicators here (because in live, it will call this + # function to populate indicators during training). Notice how we ensure not to + # add them multiple times + if set_generalized_indicators: + df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 + df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + + # For RL, there are no direct targets to set. This is filler (neutral) + # until the agent sends an action. + df["&-action"] = 0 + + return df + + def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: + + dataframe = self.freqai.start(dataframe, metadata, self) + + return dataframe + + def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + + enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + + if enter_long_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] + ] = (1, "long") + + enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] + + if enter_short_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] + ] = (1, "short") + + return df + + def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + if exit_long_conditions: + df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 + + exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4] + if exit_short_conditions: + df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 + + return df From 025b98decd7ca0d5ca0713d9b989bf37e941e7ab Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 15 Sep 2022 01:01:33 +0200 Subject: [PATCH 075/421] bring back doc sentence --- docs/freqai.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai.md b/docs/freqai.md index f9e7546c3..86cf6558d 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -123,7 +123,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi | `test_size` | Fraction of data that should be used for testing instead of training.
**Datatype:** Positive float < 1. | `shuffle` | Shuffle the training data points during training. Typically, for time-series forecasting, this is set to `False`.
**Datatype:** Boolean. | | **Model training parameters** -| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, this dictionary can contain any parameter from that model.
**Datatype:** Dictionary. +| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, such as `PPO` from stable_baselines3, this dictionary can contain any parameter from that model.
**Datatype:** Dictionary. | `n_estimators` | The number of boosted trees to fit in regression.
**Datatype:** Integer. | `learning_rate` | Boosting learning rate during regression.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. From d056d766ed02ed15ab206f8f5a4fcc20a465c847 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 17 Sep 2022 17:46:47 +0200 Subject: [PATCH 076/421] make tests pass --- tests/rpc/test_rpc_apiserver.py | 1 + tests/strategy/test_strategy_loading.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/rpc/test_rpc_apiserver.py b/tests/rpc/test_rpc_apiserver.py index 5dfa77d8b..898ab4767 100644 --- a/tests/rpc/test_rpc_apiserver.py +++ b/tests/rpc/test_rpc_apiserver.py @@ -1428,6 +1428,7 @@ def test_api_strategies(botclient): 'StrategyTestV2', 'StrategyTestV3', 'StrategyTestV3Futures', + 'freqai_rl_test_strat', 'freqai_test_classifier', 'freqai_test_multimodel_strat', 'freqai_test_strat' diff --git a/tests/strategy/test_strategy_loading.py b/tests/strategy/test_strategy_loading.py index bf81cd068..c728a81b0 100644 --- a/tests/strategy/test_strategy_loading.py +++ b/tests/strategy/test_strategy_loading.py @@ -34,7 +34,7 @@ def test_search_all_strategies_no_failed(): directory = Path(__file__).parent / "strats" strategies = StrategyResolver.search_all_objects(directory, enum_failed=False) assert isinstance(strategies, list) - assert len(strategies) == 9 + assert len(strategies) == 10 assert isinstance(strategies[0], dict) @@ -42,10 +42,10 @@ def test_search_all_strategies_with_failed(): directory = Path(__file__).parent / "strats" strategies = StrategyResolver.search_all_objects(directory, enum_failed=True) assert isinstance(strategies, list) - assert len(strategies) == 10 + assert len(strategies) == 11 # with enum_failed=True search_all_objects() shall find 2 good strategies # and 1 which fails to load - assert len([x for x in strategies if x['class'] is not None]) == 9 + assert len([x for x in strategies if x['class'] is not None]) == 10 assert len([x for x in strategies if x['class'] is None]) == 1 directory = Path(__file__).parent / "strats_nonexistingdir" From 7b1d409c9814ad7450c9cfe28ec31a732b86f233 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 17 Sep 2022 17:51:06 +0200 Subject: [PATCH 077/421] fix mypy/flake8 --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 6 +++--- freqtrade/freqai/freqai_interface.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a583fc9cd..69ae52f38 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -21,7 +21,7 @@ from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions from freqtrade.persistence import Trade -import pytest + logger = logging.getLogger(__name__) @@ -157,10 +157,10 @@ class BaseReinforcementLearningModel(IFreqaiModel): if trade.pair == pair: # FIXME: get_rate and trade_udration shouldn't work with backtesting, # we need to use candle dates and prices to compute that. - if self.strategy.dp._exchange is None: + if self.strategy.dp._exchange is None: # type: ignore logger.error('No exchange available.') else: - current_value = self.strategy.dp._exchange.get_rate( + current_value = self.strategy.dp._exchange.get_rate( # type: ignore pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate now = datetime.now(timezone.utc).timestamp() diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 7550f1884..7e952d981 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -92,7 +92,7 @@ class IFreqaiModel(ABC): self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() - self.strategy: IStrategy = None + self.strategy: Optional[IStrategy] = None def __getstate__(self): """ From eeebb78a5c772b0c3e569fd476587facb1f8a9dc Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 22 Sep 2022 21:16:21 +0200 Subject: [PATCH 078/421] skip darwin in RL tests, remove example scripts, improve doc --- docs/freqai.md | 126 +++++ .../RL/ReinforcementLearnerCustomAgent.py | 456 +++++++++--------- .../ReinforcementLearningExample4ac.py | 143 ------ .../ReinforcementLearningExample5ac.py | 147 ------ tests/freqai/test_freqai_interface.py | 8 + 5 files changed, 362 insertions(+), 518 deletions(-) delete mode 100644 freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py delete mode 100644 freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py diff --git a/docs/freqai.md b/docs/freqai.md index 028a7623e..a2058b0ed 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -805,3 +805,129 @@ Code review, software architecture brainstorming: Beta testing and bug reporting: @bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm, Juha Nykänen @suikula, Wagner Costa @wagnercosta + + +## Reinforcement Learning + +Setting up and running a Reinforcement Learning model is as quick and simple as running a Regressor. Users can start training and trading live from example files using: + +```bash +freqtrade trade --freqaimodel ReinforcementLearner --strategy ReinforcementLearningExample5ac --strategy-path freqtrade/freqai/example_strats --config config_examples/config_freqai-rl.example.json +``` + +As users begin to modify the strategy and the prediction model, they will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, the user sets a `calculate_reward()` function inside their custom `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to give users the necessary building blocks to start their own models. It is inside the `calculate_reward()` where users express their creative theories about the market. For example, the user wants to reward their agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: + +```python + class MyRLEnv(Base5ActionRLEnv): + """ + User made custom environment. This class inherits from BaseEnvironment and gym.env. + Users can override any functions from those parent classes. Here is an example + of a user customized `calculate_reward()` function. + """ + def calculate_reward(self, action): + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and \ + action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + return 0. +``` + +After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: + +```python + def populate_any_indicators( + self, pair, df, tf, informative=None, set_generalized_indicators=False + ): + ... + + if set_generalized_indicators: + # For RL, there are no direct targets to set. This sets the base action to neutral + # until the agent sends an action. + df["&-action"] = 0 + + return df + +``` + +and then the `&-action` will be used in `populate_entry/exit` functions: + +```python + def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + + enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + + if enter_long_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] + ] = (1, "long") + + enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] + + if enter_short_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] + ] = (1, "short") + + return df + + def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + if exit_long_conditions: + df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 + + exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4] + if exit_short_conditions: + df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 + + return df +``` + +Users should be careful to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short. + +### Using Tensorboard + +Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer: + +```bash +pip3 install tensorboard +``` + +Next, the user can activate Tensorboard with the following command: + +```bash +cd freqtrade +tensorboard --logdir user_data/models/unique-id +``` + +where `unique-id` is the `identifier` set in the `freqai` configuration file. + +![tensorboard](assets/tensorboard.png) \ No newline at end of file diff --git a/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py index 4ad95c214..31d21d459 100644 --- a/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py +++ b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py @@ -1,262 +1,262 @@ -import logging -from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Type, Union +# import logging +# from pathlib import Path +# from typing import Any, Dict, List, Optional, Tuple, Type, Union -import gym -import torch as th -from stable_baselines3 import DQN -from stable_baselines3.common.buffers import ReplayBuffer -from stable_baselines3.common.policies import BasePolicy -from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor -from stable_baselines3.common.type_aliases import GymEnv, Schedule -from stable_baselines3.dqn.policies import CnnPolicy, DQNPolicy, MlpPolicy, QNetwork -from torch import nn +# import gym +# import torch as th +# from stable_baselines3 import DQN +# from stable_baselines3.common.buffers import ReplayBuffer +# from stable_baselines3.common.policies import BasePolicy +# from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor +# from stable_baselines3.common.type_aliases import GymEnv, Schedule +# from stable_baselines3.dqn.policies import CnnPolicy, DQNPolicy, MlpPolicy, QNetwork +# from torch import nn -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +# from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +# from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel -logger = logging.getLogger(__name__) +# logger = logging.getLogger(__name__) -class ReinforcementLearnerCustomAgent(BaseReinforcementLearningModel): - """ - User can customize agent by defining the class and using it directly. - Here the example is "TDQN" +# class ReinforcementLearnerCustomAgent(BaseReinforcementLearningModel): +# """ +# User can customize agent by defining the class and using it directly. +# Here the example is "TDQN" - Warning! - This is an advanced example of how a user may create and use a highly - customized model class (which can inherit from existing classes, - similar to how the example below inherits from DQN). - This file is for example purposes only, and should not be run. - """ +# Warning! +# This is an advanced example of how a user may create and use a highly +# customized model class (which can inherit from existing classes, +# similar to how the example below inherits from DQN). +# This file is for example purposes only, and should not be run. +# """ - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): +# def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): - train_df = data_dictionary["train_features"] - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) +# train_df = data_dictionary["train_features"] +# total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) +# policy_kwargs = dict(activation_fn=th.nn.ReLU, +# net_arch=[256, 256, 128]) - # TDQN is a custom agent defined below - model = TDQN(self.policy_type, self.train_env, - tensorboard_log=str(Path(dk.data_path / "tensorboard")), - policy_kwargs=policy_kwargs, - **self.freqai_info['model_training_parameters'] - ) +# # TDQN is a custom agent defined below +# model = TDQN(self.policy_type, self.train_env, +# tensorboard_log=str(Path(dk.data_path / "tensorboard")), +# policy_kwargs=policy_kwargs, +# **self.freqai_info['model_training_parameters'] +# ) - model.learn( - total_timesteps=int(total_timesteps), - callback=self.eval_callback - ) +# model.learn( +# total_timesteps=int(total_timesteps), +# callback=self.eval_callback +# ) - if Path(dk.data_path / "best_model.zip").is_file(): - logger.info('Callback found a best model.') - best_model = self.MODELCLASS.load(dk.data_path / "best_model") - return best_model +# if Path(dk.data_path / "best_model.zip").is_file(): +# logger.info('Callback found a best model.') +# best_model = self.MODELCLASS.load(dk.data_path / "best_model") +# return best_model - logger.info('Couldnt find best model, using final model instead.') +# logger.info('Couldnt find best model, using final model instead.') - return model +# return model -# User creates their custom agent and networks as shown below +# # User creates their custom agent and networks as shown below -def create_mlp_( - input_dim: int, - output_dim: int, - net_arch: List[int], - activation_fn: Type[nn.Module] = nn.ReLU, - squash_output: bool = False, -) -> List[nn.Module]: - dropout = 0.2 - if len(net_arch) > 0: - number_of_neural = net_arch[0] +# def create_mlp_( +# input_dim: int, +# output_dim: int, +# net_arch: List[int], +# activation_fn: Type[nn.Module] = nn.ReLU, +# squash_output: bool = False, +# ) -> List[nn.Module]: +# dropout = 0.2 +# if len(net_arch) > 0: +# number_of_neural = net_arch[0] - modules = [ - nn.Linear(input_dim, number_of_neural), - nn.BatchNorm1d(number_of_neural), - nn.LeakyReLU(), - nn.Dropout(dropout), - nn.Linear(number_of_neural, number_of_neural), - nn.BatchNorm1d(number_of_neural), - nn.LeakyReLU(), - nn.Dropout(dropout), - nn.Linear(number_of_neural, number_of_neural), - nn.BatchNorm1d(number_of_neural), - nn.LeakyReLU(), - nn.Dropout(dropout), - nn.Linear(number_of_neural, number_of_neural), - nn.BatchNorm1d(number_of_neural), - nn.LeakyReLU(), - nn.Dropout(dropout), - nn.Linear(number_of_neural, output_dim) - ] - return modules +# modules = [ +# nn.Linear(input_dim, number_of_neural), +# nn.BatchNorm1d(number_of_neural), +# nn.LeakyReLU(), +# nn.Dropout(dropout), +# nn.Linear(number_of_neural, number_of_neural), +# nn.BatchNorm1d(number_of_neural), +# nn.LeakyReLU(), +# nn.Dropout(dropout), +# nn.Linear(number_of_neural, number_of_neural), +# nn.BatchNorm1d(number_of_neural), +# nn.LeakyReLU(), +# nn.Dropout(dropout), +# nn.Linear(number_of_neural, number_of_neural), +# nn.BatchNorm1d(number_of_neural), +# nn.LeakyReLU(), +# nn.Dropout(dropout), +# nn.Linear(number_of_neural, output_dim) +# ] +# return modules -class TDQNetwork(QNetwork): - def __init__(self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - features_extractor: nn.Module, - features_dim: int, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - normalize_images: bool = True - ): - super().__init__( - observation_space=observation_space, - action_space=action_space, - features_extractor=features_extractor, - features_dim=features_dim, - net_arch=net_arch, - activation_fn=activation_fn, - normalize_images=normalize_images - ) - action_dim = self.action_space.n - q_net = create_mlp_(self.features_dim, action_dim, self.net_arch, self.activation_fn) - self.q_net = nn.Sequential(*q_net).apply(self.init_weights) +# class TDQNetwork(QNetwork): +# def __init__(self, +# observation_space: gym.spaces.Space, +# action_space: gym.spaces.Space, +# features_extractor: nn.Module, +# features_dim: int, +# net_arch: Optional[List[int]] = None, +# activation_fn: Type[nn.Module] = nn.ReLU, +# normalize_images: bool = True +# ): +# super().__init__( +# observation_space=observation_space, +# action_space=action_space, +# features_extractor=features_extractor, +# features_dim=features_dim, +# net_arch=net_arch, +# activation_fn=activation_fn, +# normalize_images=normalize_images +# ) +# action_dim = self.action_space.n +# q_net = create_mlp_(self.features_dim, action_dim, self.net_arch, self.activation_fn) +# self.q_net = nn.Sequential(*q_net).apply(self.init_weights) - def init_weights(self, m): - if type(m) == nn.Linear: - th.nn.init.kaiming_uniform_(m.weight) +# def init_weights(self, m): +# if type(m) == nn.Linear: +# th.nn.init.kaiming_uniform_(m.weight) -class TDQNPolicy(DQNPolicy): +# class TDQNPolicy(DQNPolicy): - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super().__init__( - observation_space=observation_space, - action_space=action_space, - lr_schedule=lr_schedule, - net_arch=net_arch, - activation_fn=activation_fn, - features_extractor_class=features_extractor_class, - features_extractor_kwargs=features_extractor_kwargs, - normalize_images=normalize_images, - optimizer_class=optimizer_class, - optimizer_kwargs=optimizer_kwargs - ) +# def __init__( +# self, +# observation_space: gym.spaces.Space, +# action_space: gym.spaces.Space, +# lr_schedule: Schedule, +# net_arch: Optional[List[int]] = None, +# activation_fn: Type[nn.Module] = nn.ReLU, +# features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, +# features_extractor_kwargs: Optional[Dict[str, Any]] = None, +# normalize_images: bool = True, +# optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, +# optimizer_kwargs: Optional[Dict[str, Any]] = None, +# ): +# super().__init__( +# observation_space=observation_space, +# action_space=action_space, +# lr_schedule=lr_schedule, +# net_arch=net_arch, +# activation_fn=activation_fn, +# features_extractor_class=features_extractor_class, +# features_extractor_kwargs=features_extractor_kwargs, +# normalize_images=normalize_images, +# optimizer_class=optimizer_class, +# optimizer_kwargs=optimizer_kwargs +# ) - @staticmethod - def init_weights(module: nn.Module, gain: float = 1) -> None: - """ - Orthogonal initialization (used in PPO and A2C) - """ - if isinstance(module, (nn.Linear, nn.Conv2d)): - nn.init.kaiming_uniform_(module.weight) - if module.bias is not None: - module.bias.data.fill_(0.0) +# @staticmethod +# def init_weights(module: nn.Module, gain: float = 1) -> None: +# """ +# Orthogonal initialization (used in PPO and A2C) +# """ +# if isinstance(module, (nn.Linear, nn.Conv2d)): +# nn.init.kaiming_uniform_(module.weight) +# if module.bias is not None: +# module.bias.data.fill_(0.0) - def make_q_net(self) -> TDQNetwork: - # Make sure we always have separate networks for features extractors etc - net_args = self._update_features_extractor(self.net_args, features_extractor=None) - return TDQNetwork(**net_args).to(self.device) +# def make_q_net(self) -> TDQNetwork: +# # Make sure we always have separate networks for features extractors etc +# net_args = self._update_features_extractor(self.net_args, features_extractor=None) +# return TDQNetwork(**net_args).to(self.device) -class TMultiInputPolicy(TDQNPolicy): - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super().__init__( - observation_space, - action_space, - lr_schedule, - net_arch, - activation_fn, - features_extractor_class, - features_extractor_kwargs, - normalize_images, - optimizer_class, - optimizer_kwargs, - ) +# class TMultiInputPolicy(TDQNPolicy): +# def __init__( +# self, +# observation_space: gym.spaces.Space, +# action_space: gym.spaces.Space, +# lr_schedule: Schedule, +# net_arch: Optional[List[int]] = None, +# activation_fn: Type[nn.Module] = nn.ReLU, +# features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, +# features_extractor_kwargs: Optional[Dict[str, Any]] = None, +# normalize_images: bool = True, +# optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, +# optimizer_kwargs: Optional[Dict[str, Any]] = None, +# ): +# super().__init__( +# observation_space, +# action_space, +# lr_schedule, +# net_arch, +# activation_fn, +# features_extractor_class, +# features_extractor_kwargs, +# normalize_images, +# optimizer_class, +# optimizer_kwargs, +# ) -class TDQN(DQN): +# class TDQN(DQN): - policy_aliases: Dict[str, Type[BasePolicy]] = { - "MlpPolicy": MlpPolicy, - "CnnPolicy": CnnPolicy, - "TMultiInputPolicy": TMultiInputPolicy, - } +# policy_aliases: Dict[str, Type[BasePolicy]] = { +# "MlpPolicy": MlpPolicy, +# "CnnPolicy": CnnPolicy, +# "TMultiInputPolicy": TMultiInputPolicy, +# } - def __init__( - self, - policy: Union[str, Type[TDQNPolicy]], - env: Union[GymEnv, str], - learning_rate: Union[float, Schedule] = 1e-4, - buffer_size: int = 1000000, # 1e6 - learning_starts: int = 50000, - batch_size: int = 32, - tau: float = 1.0, - gamma: float = 0.99, - train_freq: Union[int, Tuple[int, str]] = 4, - gradient_steps: int = 1, - replay_buffer_class: Optional[ReplayBuffer] = None, - replay_buffer_kwargs: Optional[Dict[str, Any]] = None, - optimize_memory_usage: bool = False, - target_update_interval: int = 10000, - exploration_fraction: float = 0.1, - exploration_initial_eps: float = 1.0, - exploration_final_eps: float = 0.05, - max_grad_norm: float = 10, - tensorboard_log: Optional[str] = None, - create_eval_env: bool = False, - policy_kwargs: Optional[Dict[str, Any]] = None, - verbose: int = 1, - seed: Optional[int] = None, - device: Union[th.device, str] = "auto", - _init_setup_model: bool = True, - ): +# def __init__( +# self, +# policy: Union[str, Type[TDQNPolicy]], +# env: Union[GymEnv, str], +# learning_rate: Union[float, Schedule] = 1e-4, +# buffer_size: int = 1000000, # 1e6 +# learning_starts: int = 50000, +# batch_size: int = 32, +# tau: float = 1.0, +# gamma: float = 0.99, +# train_freq: Union[int, Tuple[int, str]] = 4, +# gradient_steps: int = 1, +# replay_buffer_class: Optional[ReplayBuffer] = None, +# replay_buffer_kwargs: Optional[Dict[str, Any]] = None, +# optimize_memory_usage: bool = False, +# target_update_interval: int = 10000, +# exploration_fraction: float = 0.1, +# exploration_initial_eps: float = 1.0, +# exploration_final_eps: float = 0.05, +# max_grad_norm: float = 10, +# tensorboard_log: Optional[str] = None, +# create_eval_env: bool = False, +# policy_kwargs: Optional[Dict[str, Any]] = None, +# verbose: int = 1, +# seed: Optional[int] = None, +# device: Union[th.device, str] = "auto", +# _init_setup_model: bool = True, +# ): - super().__init__( - policy=policy, - env=env, - learning_rate=learning_rate, - buffer_size=buffer_size, - learning_starts=learning_starts, - batch_size=batch_size, - tau=tau, - gamma=gamma, - train_freq=train_freq, - gradient_steps=gradient_steps, - replay_buffer_class=replay_buffer_class, # No action noise - replay_buffer_kwargs=replay_buffer_kwargs, - optimize_memory_usage=optimize_memory_usage, - target_update_interval=target_update_interval, - exploration_fraction=exploration_fraction, - exploration_initial_eps=exploration_initial_eps, - exploration_final_eps=exploration_final_eps, - max_grad_norm=max_grad_norm, - tensorboard_log=tensorboard_log, - create_eval_env=create_eval_env, - policy_kwargs=policy_kwargs, - verbose=verbose, - seed=seed, - device=device, - _init_setup_model=_init_setup_model - ) +# super().__init__( +# policy=policy, +# env=env, +# learning_rate=learning_rate, +# buffer_size=buffer_size, +# learning_starts=learning_starts, +# batch_size=batch_size, +# tau=tau, +# gamma=gamma, +# train_freq=train_freq, +# gradient_steps=gradient_steps, +# replay_buffer_class=replay_buffer_class, # No action noise +# replay_buffer_kwargs=replay_buffer_kwargs, +# optimize_memory_usage=optimize_memory_usage, +# target_update_interval=target_update_interval, +# exploration_fraction=exploration_fraction, +# exploration_initial_eps=exploration_initial_eps, +# exploration_final_eps=exploration_final_eps, +# max_grad_norm=max_grad_norm, +# tensorboard_log=tensorboard_log, +# create_eval_env=create_eval_env, +# policy_kwargs=policy_kwargs, +# verbose=verbose, +# seed=seed, +# device=device, +# _init_setup_model=_init_setup_model +# ) diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py deleted file mode 100644 index d9932eea7..000000000 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py +++ /dev/null @@ -1,143 +0,0 @@ -import logging -from functools import reduce - -import pandas as pd -import talib.abstract as ta -from pandas import DataFrame - -from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_informative_pair - - -logger = logging.getLogger(__name__) - - -class ReinforcementLearningExample4ac(IStrategy): - """ - Test strategy - used for testing freqAI functionalities. - DO not use in production. - """ - - minimal_roi = {"0": 0.1, "240": -1} - - plot_config = { - "main_plot": {}, - "subplots": { - "prediction": {"prediction": {"color": "blue"}}, - "target_roi": { - "target_roi": {"color": "brown"}, - }, - "do_predict": { - "do_predict": {"color": "brown"}, - }, - }, - } - - process_only_new_candles = True - stoploss = -0.05 - use_exit_signal = True - startup_candle_count: int = 300 - can_short = True - - linear_roi_offset = DecimalParameter( - 0.00, 0.02, default=0.005, space="sell", optimize=False, load=True - ) - max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True) - - def informative_pairs(self): - whitelist_pairs = self.dp.current_whitelist() - corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] - informative_pairs = [] - for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: - for pair in whitelist_pairs: - informative_pairs.append((pair, tf)) - for pair in corr_pairs: - if pair in whitelist_pairs: - continue # avoid duplication - informative_pairs.append((pair, tf)) - return informative_pairs - - def populate_any_indicators( - self, pair, df, tf, informative=None, set_generalized_indicators=False - ): - - coin = pair.split('/')[0] - - if informative is None: - informative = self.dp.get_pair_dataframe(pair, tf) - - # first loop is automatically duplicating indicators for time periods - for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: - - t = int(t) - informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) - - informative[f"%-{coin}pct-change"] = informative["close"].pct_change() - informative[f"%-{coin}raw_volume"] = informative["volume"] - - # The following features are necessary for RL models - informative[f"%-{coin}raw_close"] = informative["close"] - informative[f"%-{coin}raw_open"] = informative["open"] - informative[f"%-{coin}raw_high"] = informative["high"] - informative[f"%-{coin}raw_low"] = informative["low"] - - indicators = [col for col in informative if col.startswith("%")] - # This loop duplicates and shifts all indicators to add a sense of recency to data - for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): - if n == 0: - continue - informative_shift = informative[indicators].shift(n) - informative_shift = informative_shift.add_suffix("_shift-" + str(n)) - informative = pd.concat((informative, informative_shift), axis=1) - - df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) - skip_columns = [ - (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] - ] - df = df.drop(columns=skip_columns) - - # Add generalized indicators here (because in live, it will call this - # function to populate indicators during training). Notice how we ensure not to - # add them multiple times - if set_generalized_indicators: - df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 - df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 - - # For RL, this is not a target, it is simply a filler until actions come out - # of the model. - # for Base4ActionEnv, 0 is netural (hold) - df["&-action"] = 0 - - return df - - def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: - - dataframe = self.freqai.start(dataframe, metadata, self) - - return dataframe - - def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - - enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] - - if enter_long_conditions: - df.loc[ - reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] - ] = (1, "long") - - enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] - - if enter_short_conditions: - df.loc[ - reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] - ] = (1, "short") - - return df - - def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] - if exit_long_conditions: - df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit"] = 1 - - return df diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py deleted file mode 100644 index 2118e1221..000000000 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ /dev/null @@ -1,147 +0,0 @@ -import logging -from functools import reduce - -import pandas as pd -import talib.abstract as ta -from pandas import DataFrame - -from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_informative_pair - - -logger = logging.getLogger(__name__) - - -class ReinforcementLearningExample5ac(IStrategy): - """ - Test strategy - used for testing freqAI functionalities. - DO not use in production. - """ - - minimal_roi = {"0": 0.1, "240": -1} - - plot_config = { - "main_plot": {}, - "subplots": { - "prediction": {"prediction": {"color": "blue"}}, - "target_roi": { - "target_roi": {"color": "brown"}, - }, - "do_predict": { - "do_predict": {"color": "brown"}, - }, - }, - } - - process_only_new_candles = True - stoploss = -0.05 - use_exit_signal = True - startup_candle_count: int = 300 - can_short = True - - linear_roi_offset = DecimalParameter( - 0.00, 0.02, default=0.005, space="sell", optimize=False, load=True - ) - max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True) - - def informative_pairs(self): - whitelist_pairs = self.dp.current_whitelist() - corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] - informative_pairs = [] - for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: - for pair in whitelist_pairs: - informative_pairs.append((pair, tf)) - for pair in corr_pairs: - if pair in whitelist_pairs: - continue # avoid duplication - informative_pairs.append((pair, tf)) - return informative_pairs - - def populate_any_indicators( - self, pair, df, tf, informative=None, set_generalized_indicators=False - ): - - coin = pair.split('/')[0] - - if informative is None: - informative = self.dp.get_pair_dataframe(pair, tf) - - # first loop is automatically duplicating indicators for time periods - for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: - - t = int(t) - informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) - - informative[f"%-{coin}pct-change"] = informative["close"].pct_change() - informative[f"%-{coin}raw_volume"] = informative["volume"] - - # FIXME: add these outside the user strategy? - # The following columns are necessary for RL models. - informative[f"%-{coin}raw_close"] = informative["close"] - informative[f"%-{coin}raw_open"] = informative["open"] - informative[f"%-{coin}raw_high"] = informative["high"] - informative[f"%-{coin}raw_low"] = informative["low"] - - indicators = [col for col in informative if col.startswith("%")] - # This loop duplicates and shifts all indicators to add a sense of recency to data - for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): - if n == 0: - continue - informative_shift = informative[indicators].shift(n) - informative_shift = informative_shift.add_suffix("_shift-" + str(n)) - informative = pd.concat((informative, informative_shift), axis=1) - - df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) - skip_columns = [ - (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] - ] - df = df.drop(columns=skip_columns) - - # Add generalized indicators here (because in live, it will call this - # function to populate indicators during training). Notice how we ensure not to - # add them multiple times - if set_generalized_indicators: - df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 - df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 - - # For RL, there are no direct targets to set. This is filler (neutral) - # until the agent sends an action. - df["&-action"] = 0 - - return df - - def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: - - dataframe = self.freqai.start(dataframe, metadata, self) - - return dataframe - - def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - - enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] - - if enter_long_conditions: - df.loc[ - reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] - ] = (1, "long") - - enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] - - if enter_short_conditions: - df.loc[ - reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] - ] = (1, "short") - - return df - - def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] - if exit_long_conditions: - df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 - - exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4] - if exit_short_conditions: - df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 - - return df diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 52577f2d3..a50e7e04c 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -18,6 +18,11 @@ def is_arm() -> bool: return "arm" in machine or "aarch64" in machine +def is_mac() -> bool: + machine = platform.system() + return "Darwin" in machine + + @pytest.mark.parametrize('model', [ 'LightGBMRegressor', 'XGBoostRegressor', @@ -29,6 +34,9 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): if is_arm() and model == 'CatboostRegressor': pytest.skip("CatBoost is not supported on ARM") + if is_mac(): + pytest.skip("Reinforcement learning module not available on intel based Mac OS") + model_save_ext = 'joblib' freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180110-20180130"}) From f6e9753c990d4a697e88d37e43ed8d963c301767 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 22 Sep 2022 21:18:09 +0200 Subject: [PATCH 079/421] show advanced users how they can customize agent indepth` --- docs/freqai.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/freqai.md b/docs/freqai.md index a2058b0ed..938fb70f4 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -913,6 +913,10 @@ and then the `&-action` will be used in `populate_entry/exit` functions: Users should be careful to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short. +### Creating a custom agent + +Users can inherit from `stable_baselines3` and customize anything they wish about their agent. Doing this is for advanced users only, an example is presented in `freqai/RL/ReinforcementLearnerCustomAgent.py` + ### Using Tensorboard Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer: From 7295ba0fb2c408c3ce5ec413edae75b93805d829 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 22 Sep 2022 23:42:33 +0200 Subject: [PATCH 080/421] add test for Base4ActionEnv --- .../RL/BaseReinforcementLearningModel.py | 7 - .../RL/ReinforcementLearnerCustomAgent.py | 262 ------------------ tests/freqai/test_freqai_interface.py | 9 +- 3 files changed, 5 insertions(+), 273 deletions(-) delete mode 100644 freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 69ae52f38..d10bf4dc3 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -319,13 +319,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): return 0. - # TODO take care of this appendage. Right now it needs to be called because FreqAI enforces it. - # But FreqaiRL needs more objects passed to fit() (like DK) and we dont want to go refactor - # all the other existing fit() functions to include dk argument. For now we instantiate and - # leave it. - # def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: - # return - def make_env(MyRLEnv: BaseEnvironment, env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, diff --git a/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py deleted file mode 100644 index 31d21d459..000000000 --- a/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py +++ /dev/null @@ -1,262 +0,0 @@ -# import logging -# from pathlib import Path -# from typing import Any, Dict, List, Optional, Tuple, Type, Union - -# import gym -# import torch as th -# from stable_baselines3 import DQN -# from stable_baselines3.common.buffers import ReplayBuffer -# from stable_baselines3.common.policies import BasePolicy -# from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor -# from stable_baselines3.common.type_aliases import GymEnv, Schedule -# from stable_baselines3.dqn.policies import CnnPolicy, DQNPolicy, MlpPolicy, QNetwork -# from torch import nn - -# from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -# from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel - - -# logger = logging.getLogger(__name__) - - -# class ReinforcementLearnerCustomAgent(BaseReinforcementLearningModel): -# """ -# User can customize agent by defining the class and using it directly. -# Here the example is "TDQN" - -# Warning! -# This is an advanced example of how a user may create and use a highly -# customized model class (which can inherit from existing classes, -# similar to how the example below inherits from DQN). -# This file is for example purposes only, and should not be run. -# """ - -# def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): - -# train_df = data_dictionary["train_features"] -# total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - -# policy_kwargs = dict(activation_fn=th.nn.ReLU, -# net_arch=[256, 256, 128]) - -# # TDQN is a custom agent defined below -# model = TDQN(self.policy_type, self.train_env, -# tensorboard_log=str(Path(dk.data_path / "tensorboard")), -# policy_kwargs=policy_kwargs, -# **self.freqai_info['model_training_parameters'] -# ) - -# model.learn( -# total_timesteps=int(total_timesteps), -# callback=self.eval_callback -# ) - -# if Path(dk.data_path / "best_model.zip").is_file(): -# logger.info('Callback found a best model.') -# best_model = self.MODELCLASS.load(dk.data_path / "best_model") -# return best_model - -# logger.info('Couldnt find best model, using final model instead.') - -# return model - -# # User creates their custom agent and networks as shown below - - -# def create_mlp_( -# input_dim: int, -# output_dim: int, -# net_arch: List[int], -# activation_fn: Type[nn.Module] = nn.ReLU, -# squash_output: bool = False, -# ) -> List[nn.Module]: -# dropout = 0.2 -# if len(net_arch) > 0: -# number_of_neural = net_arch[0] - -# modules = [ -# nn.Linear(input_dim, number_of_neural), -# nn.BatchNorm1d(number_of_neural), -# nn.LeakyReLU(), -# nn.Dropout(dropout), -# nn.Linear(number_of_neural, number_of_neural), -# nn.BatchNorm1d(number_of_neural), -# nn.LeakyReLU(), -# nn.Dropout(dropout), -# nn.Linear(number_of_neural, number_of_neural), -# nn.BatchNorm1d(number_of_neural), -# nn.LeakyReLU(), -# nn.Dropout(dropout), -# nn.Linear(number_of_neural, number_of_neural), -# nn.BatchNorm1d(number_of_neural), -# nn.LeakyReLU(), -# nn.Dropout(dropout), -# nn.Linear(number_of_neural, output_dim) -# ] -# return modules - - -# class TDQNetwork(QNetwork): -# def __init__(self, -# observation_space: gym.spaces.Space, -# action_space: gym.spaces.Space, -# features_extractor: nn.Module, -# features_dim: int, -# net_arch: Optional[List[int]] = None, -# activation_fn: Type[nn.Module] = nn.ReLU, -# normalize_images: bool = True -# ): -# super().__init__( -# observation_space=observation_space, -# action_space=action_space, -# features_extractor=features_extractor, -# features_dim=features_dim, -# net_arch=net_arch, -# activation_fn=activation_fn, -# normalize_images=normalize_images -# ) -# action_dim = self.action_space.n -# q_net = create_mlp_(self.features_dim, action_dim, self.net_arch, self.activation_fn) -# self.q_net = nn.Sequential(*q_net).apply(self.init_weights) - -# def init_weights(self, m): -# if type(m) == nn.Linear: -# th.nn.init.kaiming_uniform_(m.weight) - - -# class TDQNPolicy(DQNPolicy): - -# def __init__( -# self, -# observation_space: gym.spaces.Space, -# action_space: gym.spaces.Space, -# lr_schedule: Schedule, -# net_arch: Optional[List[int]] = None, -# activation_fn: Type[nn.Module] = nn.ReLU, -# features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, -# features_extractor_kwargs: Optional[Dict[str, Any]] = None, -# normalize_images: bool = True, -# optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, -# optimizer_kwargs: Optional[Dict[str, Any]] = None, -# ): -# super().__init__( -# observation_space=observation_space, -# action_space=action_space, -# lr_schedule=lr_schedule, -# net_arch=net_arch, -# activation_fn=activation_fn, -# features_extractor_class=features_extractor_class, -# features_extractor_kwargs=features_extractor_kwargs, -# normalize_images=normalize_images, -# optimizer_class=optimizer_class, -# optimizer_kwargs=optimizer_kwargs -# ) - -# @staticmethod -# def init_weights(module: nn.Module, gain: float = 1) -> None: -# """ -# Orthogonal initialization (used in PPO and A2C) -# """ -# if isinstance(module, (nn.Linear, nn.Conv2d)): -# nn.init.kaiming_uniform_(module.weight) -# if module.bias is not None: -# module.bias.data.fill_(0.0) - -# def make_q_net(self) -> TDQNetwork: -# # Make sure we always have separate networks for features extractors etc -# net_args = self._update_features_extractor(self.net_args, features_extractor=None) -# return TDQNetwork(**net_args).to(self.device) - - -# class TMultiInputPolicy(TDQNPolicy): -# def __init__( -# self, -# observation_space: gym.spaces.Space, -# action_space: gym.spaces.Space, -# lr_schedule: Schedule, -# net_arch: Optional[List[int]] = None, -# activation_fn: Type[nn.Module] = nn.ReLU, -# features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, -# features_extractor_kwargs: Optional[Dict[str, Any]] = None, -# normalize_images: bool = True, -# optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, -# optimizer_kwargs: Optional[Dict[str, Any]] = None, -# ): -# super().__init__( -# observation_space, -# action_space, -# lr_schedule, -# net_arch, -# activation_fn, -# features_extractor_class, -# features_extractor_kwargs, -# normalize_images, -# optimizer_class, -# optimizer_kwargs, -# ) - - -# class TDQN(DQN): - -# policy_aliases: Dict[str, Type[BasePolicy]] = { -# "MlpPolicy": MlpPolicy, -# "CnnPolicy": CnnPolicy, -# "TMultiInputPolicy": TMultiInputPolicy, -# } - -# def __init__( -# self, -# policy: Union[str, Type[TDQNPolicy]], -# env: Union[GymEnv, str], -# learning_rate: Union[float, Schedule] = 1e-4, -# buffer_size: int = 1000000, # 1e6 -# learning_starts: int = 50000, -# batch_size: int = 32, -# tau: float = 1.0, -# gamma: float = 0.99, -# train_freq: Union[int, Tuple[int, str]] = 4, -# gradient_steps: int = 1, -# replay_buffer_class: Optional[ReplayBuffer] = None, -# replay_buffer_kwargs: Optional[Dict[str, Any]] = None, -# optimize_memory_usage: bool = False, -# target_update_interval: int = 10000, -# exploration_fraction: float = 0.1, -# exploration_initial_eps: float = 1.0, -# exploration_final_eps: float = 0.05, -# max_grad_norm: float = 10, -# tensorboard_log: Optional[str] = None, -# create_eval_env: bool = False, -# policy_kwargs: Optional[Dict[str, Any]] = None, -# verbose: int = 1, -# seed: Optional[int] = None, -# device: Union[th.device, str] = "auto", -# _init_setup_model: bool = True, -# ): - -# super().__init__( -# policy=policy, -# env=env, -# learning_rate=learning_rate, -# buffer_size=buffer_size, -# learning_starts=learning_starts, -# batch_size=batch_size, -# tau=tau, -# gamma=gamma, -# train_freq=train_freq, -# gradient_steps=gradient_steps, -# replay_buffer_class=replay_buffer_class, # No action noise -# replay_buffer_kwargs=replay_buffer_kwargs, -# optimize_memory_usage=optimize_memory_usage, -# target_update_interval=target_update_interval, -# exploration_fraction=exploration_fraction, -# exploration_initial_eps=exploration_initial_eps, -# exploration_final_eps=exploration_final_eps, -# max_grad_norm=max_grad_norm, -# tensorboard_log=tensorboard_log, -# create_eval_env=create_eval_env, -# policy_kwargs=policy_kwargs, -# verbose=verbose, -# seed=seed, -# device=device, -# _init_setup_model=_init_setup_model -# ) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index a50e7e04c..252b8fc37 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -28,7 +28,8 @@ def is_mac() -> bool: 'XGBoostRegressor', 'CatboostRegressor', 'ReinforcementLearner', - 'ReinforcementLearner_multiproc' + 'ReinforcementLearner_multiproc', + 'ReinforcementLearner_test_4ac' ]) def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): if is_arm() and model == 'CatboostRegressor': @@ -64,6 +65,9 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): "win_reward_factor": 2 }} + if 'test_4ac' in model: + freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") + strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) @@ -86,9 +90,6 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): f"{freqai.dk.model_filename}_model.{model_save_ext}").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file() - # if 'ReinforcementLearner' not in model: - # assert Path(freqai.dk.data_path / - # f"{freqai.dk.model_filename}_svm_model.joblib").is_file() shutil.rmtree(Path(freqai.dk.full_path)) From 1c56fa034f908ae005e0167830e18ef54667f1a4 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 23 Sep 2022 09:19:16 +0200 Subject: [PATCH 081/421] add test_models folder --- .../ReinforcementLearner_test_4ac.py | 104 ++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 tests/freqai/test_models/ReinforcementLearner_test_4ac.py diff --git a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py new file mode 100644 index 000000000..9a8f800bd --- /dev/null +++ b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py @@ -0,0 +1,104 @@ +import logging +from pathlib import Path +from typing import Any, Dict + +import numpy as np +import torch as th + +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.RL.Base4ActionRLEnv import Actions, Base4ActionRLEnv, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel + + +logger = logging.getLogger(__name__) + + +class ReinforcementLearner_test_4ac(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): + + train_df = data_dictionary["train_features"] + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) + + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[128, 128]) + + if dk.pair not in self.dd.model_dictionary or not self.continual_learning: + model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, + tensorboard_log=Path( + dk.full_path / "tensorboard" / dk.pair.split('/')[0]), + **self.freqai_info['model_training_parameters'] + ) + else: + logger.info('Continual training activated - starting training from previously ' + 'trained agent.') + model = self.dd.model_dictionary[dk.pair] + model.set_env(self.train_env) + + model.learn( + total_timesteps=int(total_timesteps), + callback=self.eval_callback + ) + + if Path(dk.data_path / "best_model.zip").is_file(): + logger.info('Callback found a best model.') + best_model = self.MODELCLASS.load(dk.data_path / "best_model") + return best_model + + logger.info('Couldnt find best model, using final model instead.') + + return model + + class MyRLEnv(Base4ActionRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env. Here the user + sets a custom reward based on profit and trade duration. + """ + + def calculate_reward(self, action): + + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + + # reward agent for entering trades + if (action in (Actions.Long_enter.value, Actions.Short_enter.value) + and self._position == Positions.Neutral): + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick + + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + + # discourage sitting in position + if (self._position in (Positions.Short, Positions.Long) and + action == Actions.Neutral.value): + return -1 * trade_duration / max_trade_duration + + # close long + if action == Actions.Exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + + # close short + if action == Actions.Exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + + return 0. From f5cd8f62c6d406372f13bd1626dc586e11919953 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Fri, 23 Sep 2022 10:24:39 +0200 Subject: [PATCH 082/421] Remove unused code from BaseEnv --- freqtrade/freqai/RL/BaseEnvironment.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index bb43f5300..200b7d138 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -195,8 +195,6 @@ class BaseEnvironment(gym.Env): be inherited/edited by the user made ReinforcementLearner file. """ - return 0. - def _update_unrealized_total_profit(self): """ Update the unrealized total profit incase of episode end. @@ -250,21 +248,8 @@ class BaseEnvironment(gym.Env): return 0 - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - def update_portfolio_log_returns(self, action): self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) def current_price(self) -> float: return self.prices.iloc[self._current_tick].open - - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick - 1].open - - def sharpe_ratio(self): - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward From f7dd3045f7287f3480f4452b4d969ac734f43080 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Fri, 23 Sep 2022 10:30:52 +0200 Subject: [PATCH 083/421] Parameterize backtesting test --- tests/freqai/test_freqai_interface.py | 47 ++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 252b8fc37..592499a34 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -173,9 +173,54 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): shutil.rmtree(Path(freqai.dk.full_path)) +@pytest.mark.parametrize('model', [ + 'LightGBMRegressor', + 'XGBoostRegressor', + 'CatboostRegressor', + 'ReinforcementLearner', + 'ReinforcementLearner_multiproc', + 'ReinforcementLearner_test_4ac' + ]) def test_start_backtesting(mocker, freqai_conf): - freqai_conf.update({"timerange": "20180120-20180130"}) freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) + + if is_arm() and model == 'CatboostRegressor': + pytest.skip("CatBoost is not supported on ARM") + + if is_mac(): + pytest.skip("Reinforcement learning module not available on intel based Mac OS") + + model_save_ext = 'joblib' + freqai_conf.update({"freqaimodel": model}) + freqai_conf.update({"timerange": "20180110-20180130"}) + freqai_conf.update({"strategy": "freqai_test_strat"}) + + if 'ReinforcementLearner' in model: + model_save_ext = 'zip' + freqai_conf.update({"strategy": "freqai_rl_test_strat"}) + freqai_conf["freqai"].update({"model_training_parameters": { + "learning_rate": 0.00025, + "gamma": 0.9, + "verbose": 1 + }}) + freqai_conf["freqai"].update({"model_save_type": 'stable_baselines'}) + freqai_conf["freqai"]["rl_config"] = { + "train_cycles": 1, + "thread_count": 2, + "max_trade_duration_candles": 300, + "model_type": "PPO", + "policy_type": "MlpPolicy", + "max_training_drawdown_pct": 0.5, + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.02, + "win_reward_factor": 2 + }} + + if 'test_4ac' in model: + freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") + + strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) From 95121550efd6417583f6bfa2ab251c564e22d5d8 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Fri, 23 Sep 2022 10:37:34 +0200 Subject: [PATCH 084/421] Remove unnecessary models, add model arg --- tests/freqai/test_freqai_interface.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 592499a34..ac8fd2b42 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -177,11 +177,9 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): 'LightGBMRegressor', 'XGBoostRegressor', 'CatboostRegressor', - 'ReinforcementLearner', - 'ReinforcementLearner_multiproc', - 'ReinforcementLearner_test_4ac' + 'ReinforcementLearner' ]) -def test_start_backtesting(mocker, freqai_conf): +def test_start_backtesting(mocker, freqai_conf, model): freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) if is_arm() and model == 'CatboostRegressor': From 9c361f442262007ef77e2f899224eca1874ad298 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 23 Sep 2022 18:04:43 +0200 Subject: [PATCH 085/421] increase test coverage for RL and FreqAI --- config_examples/config_freqai-rl.example.json | 109 ------------------ freqtrade/freqai/data_drawer.py | 19 --- tests/freqai/test_freqai_interface.py | 79 ++++++++++--- 3 files changed, 61 insertions(+), 146 deletions(-) delete mode 100644 config_examples/config_freqai-rl.example.json diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json deleted file mode 100644 index 9dfea932d..000000000 --- a/config_examples/config_freqai-rl.example.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "trading_mode": "futures", - "new_pairs_days": 30, - "margin_mode": "isolated", - "max_open_trades": 8, - "stake_currency": "USDT", - "stake_amount": 1000, - "tradable_balance_ratio": 1, - "fiat_display_currency": "USD", - "dry_run": true, - "timeframe": "5m", - "dataformat_ohlcv": "json", - "dry_run_wallet": 12000, - "cancel_open_orders_on_exit": true, - "unfilledtimeout": { - "entry": 10, - "exit": 30 - }, - "exchange": { - "name": "binance", - "key": "", - "secret": "", - "ccxt_config": { - "enableRateLimit": true - }, - "ccxt_async_config": { - "enableRateLimit": true, - "rateLimit": 200 - }, - "pair_whitelist": [ - "1INCH/USDT", - "AAVE/USDT" - ], - "pair_blacklist": [] - }, - "entry_pricing": { - "price_side": "same", - "use_order_book": true, - "order_book_top": 1, - "price_last_balance": 0.0, - "check_depth_of_market": { - "enabled": false, - "bids_to_ask_delta": 1 - } - }, - "exit_pricing": { - "price_side": "other", - "use_order_book": true, - "order_book_top": 1 - }, - "pairlists": [ - { - "method": "StaticPairList" - } - ], - "freqai": { - "enabled": true, - "model_save_type": "stable_baselines", - "conv_width": 4, - "purge_old_models": true, - "limit_ram_usage": false, - "train_period_days": 5, - "backtest_period_days": 2, - "identifier": "unique-id", - "continual_learning": false, - "data_kitchen_thread_count": 2, - "feature_parameters": { - "include_corr_pairlist": [ - "BTC/USDT", - "ETH/USDT" - ], - "include_timeframes": [ - "5m", - "30m" - ], - "indicator_max_period_candles": 20, - "indicator_periods_candles": [14] - }, - "data_split_parameters": { - "test_size": 0.5, - "random_state": 1, - "shuffle": false - }, - "model_training_parameters": { - "learning_rate": 0.00025, - "gamma": 0.9, - "verbose": 1 - }, - "rl_config": { - "train_cycles": 6, - "thread_count": 4, - "max_trade_duration_candles": 300, - "model_type": "PPO", - "policy_type": "MlpPolicy", - "max_training_drawdown_pct": 0.5, - "model_reward_parameters": { - "rr": 1, - "profit_aim": 0.02, - "win_reward_factor": 2 - } - } - }, - "bot_name": "RL_test", - "force_entry_enable": true, - "initial_state": "running", - "internals": { - "process_throttle_secs": 5 - } -} diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index b58bed9ba..03840317f 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -602,22 +602,3 @@ class FreqaiDataDrawer: ) return corr_dataframes, base_dataframes - - # to be used if we want to send predictions directly to the follower instead of forcing - # follower to load models and inference - # def save_model_return_values_to_disk(self) -> None: - # with open(self.full_path / str('model_return_values.json'), "w") as fp: - # json.dump(self.model_return_values, fp, default=self.np_encoder) - - # def load_model_return_values_from_disk(self, dk: FreqaiDataKitchen) -> FreqaiDataKitchen: - # exists = Path(self.full_path / str('model_return_values.json')).resolve().exists() - # if exists: - # with open(self.full_path / str('model_return_values.json'), "r") as fp: - # self.model_return_values = json.load(fp) - # elif not self.follow_mode: - # logger.info("Could not find existing datadrawer, starting from scratch") - # else: - # logger.warning(f'Follower could not find pair_dictionary at {self.full_path} ' - # 'sending null values back to strategy') - - # return exists, dk diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index ac8fd2b42..f0af90f18 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -4,13 +4,15 @@ from pathlib import Path from unittest.mock import MagicMock import pytest - +from freqtrade.enums import RunMode from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.plugins.pairlistmanager import PairListManager from tests.conftest import get_patched_exchange, log_has_re from tests.freqai.conftest import get_patched_freqai_strategy +from freqtrade.persistence import Trade +from freqtrade.freqai.utils import download_all_data_for_training, get_required_data_timerange def is_arm() -> bool: @@ -173,29 +175,34 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): shutil.rmtree(Path(freqai.dk.full_path)) -@pytest.mark.parametrize('model', [ - 'LightGBMRegressor', - 'XGBoostRegressor', - 'CatboostRegressor', - 'ReinforcementLearner' - ]) -def test_start_backtesting(mocker, freqai_conf, model): +@pytest.mark.parametrize( + "model, num_files, strat", + [ + ("LightGBMRegressor", 6, "freqai_test_strat"), + ("XGBoostRegressor", 6, "freqai_test_strat"), + ("CatboostRegressor", 6, "freqai_test_strat"), + ("ReinforcementLearner", 7, "freqai_rl_test_strat"), + ("XGBoostClassifier", 6, "freqai_test_classifier"), + ("LightGBMClassifier", 6, "freqai_test_classifier"), + ("CatboostClassifier", 6, "freqai_test_classifier") + ], + ) +def test_start_backtesting(mocker, freqai_conf, model, num_files, strat): freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) - - if is_arm() and model == 'CatboostRegressor': + freqai_conf['runmode'] = RunMode.BACKTEST + Trade.use_db = False + if is_arm() and "Catboost" in model: pytest.skip("CatBoost is not supported on ARM") if is_mac(): pytest.skip("Reinforcement learning module not available on intel based Mac OS") - model_save_ext = 'joblib' freqai_conf.update({"freqaimodel": model}) - freqai_conf.update({"timerange": "20180110-20180130"}) - freqai_conf.update({"strategy": "freqai_test_strat"}) + freqai_conf.update({"timerange": "20180120-20180130"}) + freqai_conf.update({"strategy": strat}) if 'ReinforcementLearner' in model: - model_save_ext = 'zip' - freqai_conf.update({"strategy": "freqai_rl_test_strat"}) + freqai_conf["freqai"].update({"model_training_parameters": { "learning_rate": 0.00025, "gamma": 0.9, @@ -217,8 +224,7 @@ def test_start_backtesting(mocker, freqai_conf, model): if 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") - - + strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) @@ -237,7 +243,7 @@ def test_start_backtesting(mocker, freqai_conf, model): freqai.start_backtesting(df, metadata, freqai.dk) model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] - assert len(model_folders) == 6 + assert len(model_folders) == num_files shutil.rmtree(Path(freqai.dk.full_path)) @@ -455,3 +461,40 @@ def test_freqai_informative_pairs(mocker, freqai_conf, timeframes, corr_pairs): pairs_b = strategy.gather_informative_pairs() # we expect unique pairs * timeframes assert len(pairs_b) == len(set(pairlist + corr_pairs)) * len(timeframes) + + +def test_start_set_train_queue(mocker, freqai_conf, caplog): + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + pairlist = PairListManager(exchange, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange, pairlist) + strategy.freqai_info = freqai_conf.get("freqai", {}) + freqai = strategy.freqai + freqai.live = False + + freqai.train_queue = freqai._set_train_queue() + + assert log_has_re( + "Set fresh train queue from whitelist.", + caplog, + ) + + +def test_get_required_data_timerange(mocker, freqai_conf): + time_range = get_required_data_timerange(freqai_conf) + assert (time_range.stopts - time_range.startts) == 177300 + + +def test_download_all_data_for_training(mocker, freqai_conf, caplog, tmpdir): + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + pairlist = PairListManager(exchange, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange, pairlist) + freqai_conf['pairs'] = freqai_conf['exchange']['pair_whitelist'] + freqai_conf['datadir'] = Path(tmpdir) + download_all_data_for_training(strategy.dp, freqai_conf) + + assert log_has_re( + "Downloading", + caplog, + ) From 77c360b264c9dee489081c2761cc3be4ba0b01d1 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 23 Sep 2022 19:17:27 +0200 Subject: [PATCH 086/421] improve typing, improve docstrings, ensure global tests pass --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 13 +++- freqtrade/freqai/RL/Base5ActionRLEnv.py | 11 +++ freqtrade/freqai/RL/BaseEnvironment.py | 22 ++++-- .../RL/BaseReinforcementLearningModel.py | 75 +++++++++++++------ .../prediction_models/ReinforcementLearner.py | 20 ++++- .../ReinforcementLearner_multiproc.py | 19 +++-- tests/freqai/test_freqai_interface.py | 4 +- 7 files changed, 124 insertions(+), 40 deletions(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index bd5785b85..b4fe78b71 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -25,6 +25,17 @@ class Base4ActionRLEnv(BaseEnvironment): self.action_space = spaces.Discrete(len(Actions)) def step(self, action: int): + """ + Logic for a single step (incrementing one candle in time) + by the agent + :param: action: int = the action type that the agent plans + to take for the current step. + :returns: + observation = current state of environment + step_reward = the reward from `calculate_reward()` + _done = if the agent "died" or if the candles finished + info = dict passed back to openai gym lib + """ self._done = False self._current_tick += 1 @@ -92,7 +103,6 @@ class Base4ActionRLEnv(BaseEnvironment): return observation, step_reward, self._done, info def is_tradesignal(self, action: int): - # trade signal """ Determine if the signal is a trade signal e.g.: agent wants a Actions.Long_exit while it is in a Positions.short @@ -107,7 +117,6 @@ class Base4ActionRLEnv(BaseEnvironment): (action == Actions.Long_enter.value and self._position == Positions.Short)) def _is_valid(self, action: int): - # trade signal """ Determine if the signal is valid. e.g.: agent wants a Actions.Long_exit while it is in a Positions.short diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index e0a38f9d1..80543bf72 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -60,6 +60,17 @@ class Base5ActionRLEnv(BaseEnvironment): return self._get_observation() def step(self, action: int): + """ + Logic for a single step (incrementing one candle in time) + by the agent + :param: action: int = the action type that the agent plans + to take for the current step. + :returns: + observation = current state of environment + step_reward = the reward from `calculate_reward()` + _done = if the agent "died" or if the candles finished + info = dict passed back to openai gym lib + """ self._done = False self._current_tick += 1 diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 200b7d138..6474483c6 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -43,6 +43,10 @@ class BaseEnvironment(gym.Env): def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, reward_kwargs: dict, starting_point=True): + """ + Resets the environment when the agent fails (in our case, if the drawdown + exceeds the user set max_training_drawdown_pct) + """ self.df = df self.signal_features = self.df self.prices = prices @@ -133,13 +137,18 @@ class BaseEnvironment(gym.Env): return features_and_state def get_trade_duration(self): + """ + Get the trade duration if the agent is in a trade + """ if self._last_trade_tick is None: return 0 else: return self._current_tick - self._last_trade_tick def get_unrealized_profit(self): - + """ + Get the unrealized profit if the agent is in a trade + """ if self._last_trade_tick is None: return 0. @@ -158,7 +167,6 @@ class BaseEnvironment(gym.Env): @abstractmethod def is_tradesignal(self, action: int): - # trade signal """ Determine if the signal is a trade signal. This is unique to the actions in the environment, and therefore must be @@ -167,7 +175,6 @@ class BaseEnvironment(gym.Env): return def _is_valid(self, action: int): - # trade signal """ Determine if the signal is valid.This is unique to the actions in the environment, and therefore must be @@ -191,8 +198,13 @@ class BaseEnvironment(gym.Env): @abstractmethod def calculate_reward(self, action): """ - Reward is created by BaseReinforcementLearningModel and can - be inherited/edited by the user made ReinforcementLearner file. + An example reward function. This is the one function that users will likely + wish to inject their own creativity into. + :params: + action: int = The action made by the agent for the current candle. + :returns: + float = the reward to give to the agent for current step (used for optimization + of weights in NN) """ def _update_unrealized_total_profit(self): diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index d10bf4dc3..c82fd1ea9 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -2,7 +2,7 @@ import logging from abc import abstractmethod from datetime import datetime, timezone from pathlib import Path -from typing import Any, Callable, Dict, Tuple +from typing import Any, Callable, Dict, Tuple, Type, Union import gym import numpy as np @@ -19,8 +19,9 @@ from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv -from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions +from freqtrade.freqai.RL.BaseEnvironment import Positions from freqtrade.persistence import Trade +from stable_baselines3.common.vec_env import SubprocVecEnv logger = logging.getLogger(__name__) @@ -33,15 +34,15 @@ SB3_CONTRIB_MODELS = ['TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO'] class BaseReinforcementLearningModel(IFreqaiModel): """ - User created Reinforcement Learning Model prediction model. + User created Reinforcement Learning Model prediction class """ def __init__(self, **kwargs): super().__init__(config=kwargs['config']) th.set_num_threads(self.freqai_info['rl_config'].get('thread_count', 4)) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] - self.train_env: BaseEnvironment = None - self.eval_env: BaseEnvironment = None + self.train_env: Union[SubprocVecEnv, gym.Env] = None + self.eval_env: Union[SubprocVecEnv, gym.Env] = None self.eval_callback: EvalCallback = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] @@ -126,6 +127,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): dk: FreqaiDataKitchen): """ User can override this if they are using a custom MyRLEnv + :params: + data_dictionary: dict = common data dictionary containing train and test + features/labels/weights. + prices_train/test: DataFrame = dataframe comprised of the prices to be used in the + environment during training + or testing + dk: FreqaiDataKitchen = the datakitchen for the current pair """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] @@ -148,15 +156,24 @@ class BaseReinforcementLearningModel(IFreqaiModel): """ return - def get_state_info(self, pair: str): + def get_state_info(self, pair: str) -> Tuple[float, float, int]: + """ + State info during dry/live/backtesting which is fed back + into the model. + :param: + pair: str = COIN/STAKE to get the environment information for + :returns: + market_side: float = representing short, long, or neutral for + pair + trade_duration: int = the number of candles that the trade has + been open for + """ open_trades = Trade.get_trades_proxy(is_open=True) market_side = 0.5 current_profit: float = 0 trade_duration = 0 for trade in open_trades: if trade.pair == pair: - # FIXME: get_rate and trade_udration shouldn't work with backtesting, - # we need to use candle dates and prices to compute that. if self.strategy.dp._exchange is None: # type: ignore logger.error('No exchange available.') else: @@ -172,11 +189,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): market_side = 0 current_profit = (openrate - current_value) / openrate - # total_profit = 0 - # closed_trades = Trade.get_trades_proxy(pair=pair, is_open=False) - # for trade in closed_trades: - # total_profit += trade.close_profit - return market_side, current_profit, int(trade_duration) def predict( @@ -209,7 +221,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): def rl_model_predict(self, dataframe: DataFrame, dk: FreqaiDataKitchen, model: Any) -> DataFrame: - + """ + A helper function to make predictions in the Reinforcement learning module. + :params: + dataframe: DataFrame = the dataframe of features to make the predictions on + dk: FreqaiDatakitchen = data kitchen for the current pair + model: Any = the trained model used to inference the features. + """ output = pd.DataFrame(np.zeros(len(dataframe)), columns=dk.label_list) def _predict(window): @@ -274,26 +292,37 @@ class BaseReinforcementLearningModel(IFreqaiModel): sets a custom reward based on profit and trade duration. """ - def calculate_reward(self, action): - + def calculate_reward(self, action: int) -> float: + """ + An example reward function. This is the one function that users will likely + wish to inject their own creativity into. + :params: + action: int = The action made by the agent for the current candle. + :returns: + float = the reward to give to the agent for current step (used for optimization + of weights in NN) + """ # first, penalize if the action is not valid if not self._is_valid(action): return -2 pnl = self.get_unrealized_profit() rew = np.sign(pnl) * (pnl + 1) - factor = 100 + factor = 100. # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ - and self._position == Positions.Neutral: + if (action in (Actions.Long_enter.value, Actions.Short_enter.value) + and self._position == Positions.Neutral): return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) - trade_duration = self._current_tick - self._last_trade_tick + if self._last_trade_tick: + trade_duration = self._current_tick - self._last_trade_tick + else: + trade_duration = 0 if trade_duration <= max_trade_duration: factor *= 1.5 @@ -301,8 +330,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): factor *= 0.5 # discourage sitting in position - if self._position in (Positions.Short, Positions.Long) and \ - action == Actions.Neutral.value: + if (self._position in (Positions.Short, Positions.Long) and + action == Actions.Neutral.value): return -1 * trade_duration / max_trade_duration # close long @@ -320,7 +349,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): return 0. -def make_env(MyRLEnv: BaseEnvironment, env_id: str, rank: int, +def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, reward_params: Dict[str, int], window_size: int, monitor: bool = False, config: Dict[str, Any] = {}) -> Callable: diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 2e5c9f97b..00afd61d4 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -19,7 +19,15 @@ class ReinforcementLearner(BaseReinforcementLearningModel): """ def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): - + """ + User customizable fit method + :params: + data_dictionary: dict = common data dictionary containing all train/test + features/labels/weights. + dk: FreqaiDatakitchen = data kitchen for current pair. + :returns: + model: Any = trained model to be used for inference in dry/live/backtesting + """ train_df = data_dictionary["train_features"] total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) @@ -59,7 +67,15 @@ class ReinforcementLearner(BaseReinforcementLearningModel): """ def calculate_reward(self, action): - + """ + An example reward function. This is the one function that users will likely + wish to inject their own creativity into. + :params: + action: int = The action made by the agent for the current candle. + :returns: + float = the reward to give to the agent for current step (used for optimization + of weights in NN) + """ # first, penalize if the action is not valid if not self._is_valid(action): return -2 diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index c14511921..5b2ea2ef5 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -6,7 +6,7 @@ from typing import Any, Dict # , Tuple import torch as th from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv - +from pandas import DataFrame from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.BaseReinforcementLearningModel import (BaseReinforcementLearningModel, make_env) @@ -55,11 +55,18 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): return model - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test, dk): + def set_train_and_eval_environments(self, data_dictionary: Dict[str, Any], + prices_train: DataFrame, prices_test: DataFrame, + dk: FreqaiDataKitchen): """ - If user has particular environment configuration needs, they can do that by - overriding this function. In the present case, the user wants to setup training - environments for multiple workers. + User can override this if they are using a custom MyRLEnv + :params: + data_dictionary: dict = common data dictionary containing train and test + features/labels/weights. + prices_train/test: DataFrame = dataframe comprised of the prices to be used in + the environment during training + or testing + dk: FreqaiDataKitchen = the datakitchen for the current pair """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] @@ -79,4 +86,4 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): in range(num_cpu)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), - best_model_save_path=dk.data_path) + best_model_save_path=str(dk.data_path)) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index f0af90f18..1bc30a670 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -244,7 +244,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat): model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] assert len(model_folders) == num_files - + Trade.use_db = True shutil.rmtree(Path(freqai.dk.full_path)) @@ -297,7 +297,7 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): assert len(model_folders) == 6 - # without deleting the exiting folder structure, re-run + # without deleting the existing folder structure, re-run freqai_conf.update({"timerange": "20180120-20180130"}) strategy = get_patched_freqai_strategy(mocker, freqai_conf) From 647200e8a72ad20c4eb7d890486fb9c869ff0b3f Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 23 Sep 2022 19:30:56 +0200 Subject: [PATCH 087/421] isort --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- .../prediction_models/ReinforcementLearner_multiproc.py | 3 ++- tests/freqai/test_freqai_interface.py | 7 ++++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index c82fd1ea9..70b3e58ef 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -14,6 +14,7 @@ from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed +from stable_baselines3.common.vec_env import SubprocVecEnv from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen @@ -21,7 +22,6 @@ from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.BaseEnvironment import Positions from freqtrade.persistence import Trade -from stable_baselines3.common.vec_env import SubprocVecEnv logger = logging.getLogger(__name__) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 5b2ea2ef5..0e6449dcd 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -4,9 +4,10 @@ from typing import Any, Dict # , Tuple # import numpy.typing as npt import torch as th +from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv -from pandas import DataFrame + from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.BaseReinforcementLearningModel import (BaseReinforcementLearningModel, make_env) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 1bc30a670..3a200e0af 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -4,15 +4,16 @@ from pathlib import Path from unittest.mock import MagicMock import pytest -from freqtrade.enums import RunMode + from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider +from freqtrade.enums import RunMode from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.utils import download_all_data_for_training, get_required_data_timerange +from freqtrade.persistence import Trade from freqtrade.plugins.pairlistmanager import PairListManager from tests.conftest import get_patched_exchange, log_has_re from tests.freqai.conftest import get_patched_freqai_strategy -from freqtrade.persistence import Trade -from freqtrade.freqai.utils import download_all_data_for_training, get_required_data_timerange def is_arm() -> bool: From caa47a2f47f6c6ce936a0762fee5bbaa39fc492d Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Wed, 28 Sep 2022 03:06:05 +0000 Subject: [PATCH 088/421] close subproc env on shutdown --- freqtrade/freqai/freqai_interface.py | 9 +++++++++ .../ReinforcementLearner_multiproc.py | 14 +++++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 1a847a25e..f8ca34ddb 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -158,6 +158,13 @@ class IFreqaiModel(ABC): self.model = None self.dk = None + def _on_stop(self): + """ + Callback for Subclasses to override to include logic for shutting down resources + when SIGINT is sent. + """ + return + def shutdown(self): """ Cleans up threads on Shutdown, set stop event. Join threads to wait @@ -166,6 +173,8 @@ class IFreqaiModel(ABC): logger.info("Stopping FreqAI") self._stop_event.set() + self._on_stop() + logger.info("Waiting on Training iteration") for _thread in self._threads: _thread.join() diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 0e6449dcd..efdd4883c 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -73,7 +73,7 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): test_df = data_dictionary["test_features"] env_id = "train_env" - num_cpu = int(self.freqai_info["rl_config"]["thread_count"]) + num_cpu = int(self.freqai_info["rl_config"].get("cpu_count", 2)) self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i @@ -88,3 +88,15 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) + + + def _on_stop(self): + """ + Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. + """ + + if hasattr(self, "train_env") and self.train_env: + self.train_env.close() + + if hasattr(self, "eval_env") and self.eval_env: + self.eval_env.close() \ No newline at end of file From 9e36b0d2ea89a1bbbcf6aab411727a9ccedb4c32 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 27 Sep 2022 22:02:33 -0600 Subject: [PATCH 089/421] fix formatting --- .../freqai/prediction_models/ReinforcementLearner_multiproc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index efdd4883c..034c752e7 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -89,7 +89,6 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) - def _on_stop(self): """ Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. @@ -99,4 +98,4 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): self.train_env.close() if hasattr(self, "eval_env") and self.eval_env: - self.eval_env.close() \ No newline at end of file + self.eval_env.close() From 099137adaca3d81f5e5cada2cb70ea159ee6ffa1 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 27 Sep 2022 22:35:15 -0600 Subject: [PATCH 090/421] remove hasattr calls --- .../prediction_models/ReinforcementLearner_multiproc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 034c752e7..d01c409c3 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -94,8 +94,8 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. """ - if hasattr(self, "train_env") and self.train_env: + if self.train_env: self.train_env.close() - if hasattr(self, "eval_env") and self.eval_env: + if self.eval_env: self.eval_env.close() From 83343dc2f11988cc2ee384ebdcba2731d156e26d Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 29 Sep 2022 00:10:18 +0200 Subject: [PATCH 091/421] control number of threads, update doc --- docs/freqai.md | 2 +- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 4 +++- freqtrade/freqai/data_kitchen.py | 6 +++++- freqtrade/freqai/freqai_interface.py | 2 ++ .../prediction_models/ReinforcementLearner_multiproc.py | 5 ++--- 5 files changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/freqai.md b/docs/freqai.md index 938fb70f4..20562aadc 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -131,7 +131,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi | | *Reinforcement Learning Parameters** | `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. | `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. -| `thread_count` | Number of threads to dedicate to the Reinforcement Learning training process.
**Datatype:** int. +| `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process.
**Datatype:** int. | `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. | `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. | `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 70b3e58ef..8785192f4 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -39,7 +39,9 @@ class BaseReinforcementLearningModel(IFreqaiModel): def __init__(self, **kwargs): super().__init__(config=kwargs['config']) - th.set_num_threads(self.freqai_info['rl_config'].get('thread_count', 4)) + self.max_threads = max(self.freqai_info['rl_config'].get( + 'cpu_count', 0), int(self.max_system_threads / 2)) + th.set_num_threads(self.max_threads) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] self.train_env: Union[SubprocVecEnv, gym.Env] = None self.eval_env: Union[SubprocVecEnv, gym.Env] = None diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 005005368..9f84e63b7 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -9,6 +9,7 @@ from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt import pandas as pd +import psutil from pandas import DataFrame from scipy import stats from sklearn import linear_model @@ -95,7 +96,10 @@ class FreqaiDataKitchen: ) self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {}) - self.thread_count = self.freqai_config.get("data_kitchen_thread_count", -1) + if not self.freqai_config.get("data_kitchen_thread_count", 0): + self.thread_count = int(psutil.cpu_count() * 2 - 2) + else: + self.thread_count = self.freqai_config["data_kitchen_thread_count"] self.train_dates: DataFrame = pd.DataFrame() self.unique_classes: Dict[str, list] = {} self.unique_class_list: list = [] diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index f8ca34ddb..5fe3c318c 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -11,6 +11,7 @@ from typing import Any, Dict, List, Optional, Tuple import numpy as np import pandas as pd +import psutil from numpy.typing import NDArray from pandas import DataFrame @@ -96,6 +97,7 @@ class IFreqaiModel(ABC): self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() self.strategy: Optional[IStrategy] = None + self.max_system_threads = int(psutil.cpu_count() * 2 - 2) def __getstate__(self): """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index d01c409c3..a644c0c04 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -73,18 +73,17 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): test_df = data_dictionary["test_features"] env_id = "train_env" - num_cpu = int(self.freqai_info["rl_config"].get("cpu_count", 2)) self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i - in range(num_cpu)]) + in range(self.max_threads)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, test_df, prices_test, self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i - in range(num_cpu)]) + in range(self.max_threads)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) From dcf6ebe273729bf9634c44804f016941805d68d9 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Thu, 29 Sep 2022 00:37:03 +0200 Subject: [PATCH 092/421] Update BaseReinforcementLearningModel.py --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 8785192f4..33568fa0b 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -39,7 +39,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def __init__(self, **kwargs): super().__init__(config=kwargs['config']) - self.max_threads = max(self.freqai_info['rl_config'].get( + self.max_threads = min(self.freqai_info['rl_config'].get( 'cpu_count', 0), int(self.max_system_threads / 2)) th.set_num_threads(self.max_threads) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] From 555cc4263003fc57599896f912beba66a46376b1 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Thu, 29 Sep 2022 14:00:09 +0200 Subject: [PATCH 093/421] Ensure 1 thread is available (for testing purposes) --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 33568fa0b..705c35297 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -40,7 +40,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def __init__(self, **kwargs): super().__init__(config=kwargs['config']) self.max_threads = min(self.freqai_info['rl_config'].get( - 'cpu_count', 0), int(self.max_system_threads / 2)) + 'cpu_count', 1), max(int(self.max_system_threads / 2), 1)) th.set_num_threads(self.max_threads) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] self.train_env: Union[SubprocVecEnv, gym.Env] = None From 7ef56e30296ad3a32fb88a01feb58b5b9b236944 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Thu, 29 Sep 2022 14:01:22 +0200 Subject: [PATCH 094/421] Ensure at least 1 thread is available --- freqtrade/freqai/freqai_interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 5fe3c318c..44535f191 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -97,7 +97,7 @@ class IFreqaiModel(ABC): self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() self.strategy: Optional[IStrategy] = None - self.max_system_threads = int(psutil.cpu_count() * 2 - 2) + self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1) def __getstate__(self): """ From 6e74d46660ac47aa44fc26a5c1c439d88d96576e Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Thu, 29 Sep 2022 14:02:00 +0200 Subject: [PATCH 095/421] Ensure 1 thread available --- freqtrade/freqai/data_kitchen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 9f84e63b7..73717abce 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -97,7 +97,7 @@ class FreqaiDataKitchen: self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {}) if not self.freqai_config.get("data_kitchen_thread_count", 0): - self.thread_count = int(psutil.cpu_count() * 2 - 2) + self.thread_count = max(int(psutil.cpu_count() * 2 - 2), 1) else: self.thread_count = self.freqai_config["data_kitchen_thread_count"] self.train_dates: DataFrame = pd.DataFrame() From ab9d781b06c44ff331e7d094193963c0960d7dfb Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 1 Oct 2022 17:50:05 +0200 Subject: [PATCH 096/421] add reinforcement learning page to docs --- docs/freqai-parameter-table.md | 10 ++ docs/freqai-reinforcement-learning.md | 200 ++++++++++++++++++++++++++ mkdocs.yml | 1 + 3 files changed, 211 insertions(+) create mode 100644 docs/freqai-reinforcement-learning.md diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 8e19226ba..e80ab6fb6 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -46,6 +46,16 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `n_estimators` | The number of boosted trees to fit in regression.
**Datatype:** Integer. | `learning_rate` | Boosting learning rate during regression.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. +| | *Reinforcement Learning Parameters** +| `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. +| `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. +| `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process.
**Datatype:** int. +| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. +| `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. +| `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. +| `continual_learning` | If true, the agent will start new trainings from the model selected during the previous training. If false, a new agent is trained from scratch for each training.
**Datatype:** Bool. +| `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not).
**Datatype:** int. +| `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | | **Extraneous parameters** | `keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards.
**Datatype:** Boolean.
Default: `False`. | `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction.
**Datatype:** Integer.
Default: 2. diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md new file mode 100644 index 000000000..0aef015ed --- /dev/null +++ b/docs/freqai-reinforcement-learning.md @@ -0,0 +1,200 @@ +# Reinforcement Learning + +Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line: + +```bash +freqtrade trade --freqaimodel ReinforcementLearner --strategy MyRLStrategy --config config.json +``` + +where `ReinforcementLearner` will use the templated `ReinforcementLearner` from `freqai/prediction_models/ReinforcementLearner`. The strategy, on the other hand, follows the same base [feature engineering](freqai-feature-engineering.md) with `populate_any_indicators` as a typical Regressor: + +```python + def populate_any_indicators( + self, pair, df, tf, informative=None, set_generalized_indicators=False + ): + + coin = pair.split('/')[0] + + if informative is None: + informative = self.dp.get_pair_dataframe(pair, tf) + + # first loop is automatically duplicating indicators for time periods + for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: + + t = int(t) + informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + + # The following features are necessary for RL models + informative[f"%-{coin}raw_close"] = informative["close"] + informative[f"%-{coin}raw_open"] = informative["open"] + informative[f"%-{coin}raw_high"] = informative["high"] + informative[f"%-{coin}raw_low"] = informative["low"] + + indicators = [col for col in informative if col.startswith("%")] + # This loop duplicates and shifts all indicators to add a sense of recency to data + for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): + if n == 0: + continue + informative_shift = informative[indicators].shift(n) + informative_shift = informative_shift.add_suffix("_shift-" + str(n)) + informative = pd.concat((informative, informative_shift), axis=1) + + df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) + skip_columns = [ + (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] + ] + df = df.drop(columns=skip_columns) + + # Add generalized indicators here (because in live, it will call this + # function to populate indicators during training). Notice how we ensure not to + # add them multiple times + if set_generalized_indicators: + + # For RL, there are no direct targets to set. This is filler (neutral) + # until the agent sends an action. + df["&-action"] = 0 + + return df +``` + +Most of the function remains the same as for typical Regressors, however, the function above shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environent: + +```python + # The following features are necessary for RL models + informative[f"%-{coin}raw_close"] = informative["close"] + informative[f"%-{coin}raw_open"] = informative["open"] + informative[f"%-{coin}raw_high"] = informative["high"] + informative[f"%-{coin}raw_low"] = informative["low"] +``` + +Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the user set the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. + +After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: + +```python + def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + + enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + + if enter_long_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] + ] = (1, "long") + + enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] + + if enter_short_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] + ] = (1, "short") + + return df + + def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + if exit_long_conditions: + df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 + + exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4] + if exit_short_conditions: + df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 + + return df +``` + +It is important to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short. + +## Configuring the Reinforcement Learner + +In order to configure the `Reinforcement Learner` the following dictionary to their `freqai` config: + +```json + "rl_config": { + "train_cycles": 25, + "max_trade_duration_candles": 300, + "max_training_drawdown_pct": 0.02, + "cpu_count": 8, + "model_type": "PPO", + "policy_type": "MlpPolicy", + "continual_learning": false, + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.025 + } + } +``` + +Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environemtn to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link). + +## Creating the reward + +As users begin to modify the strategy and the prediction model, they will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, the user sets a `calculate_reward()` function inside their custom `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to give users the necessary building blocks to start their own models. It is inside the `calculate_reward()` where users express their creative theories about the market. For example, the user wants to reward their agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: + +```python + class MyRLEnv(Base5ActionRLEnv): + """ + User made custom environment. This class inherits from BaseEnvironment and gym.env. + Users can override any functions from those parent classes. Here is an example + of a user customized `calculate_reward()` function. + """ + def calculate_reward(self, action): + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and \ + action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + return 0. +``` + +### Creating a custom agent + +Users can inherit from `stable_baselines3` and customize anything they wish about their agent. Doing this is for advanced users only, an example is presented in `freqai/RL/ReinforcementLearnerCustomAgent.py` + +### Using Tensorboard + +Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer: + +```bash +pip3 install tensorboard +``` + +Next, the user can activate Tensorboard with the following command: + +```bash +cd freqtrade +tensorboard --logdir user_data/models/unique-id +``` + +where `unique-id` is the `identifier` set in the `freqai` configuration file. + +![tensorboard](assets/tensorboard.png) \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 6477c1feb..81f2b7b0b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -29,6 +29,7 @@ nav: - Parameter table: freqai-parameter-table.md - Feature engineering: freqai-feature-engineering.md - Running FreqAI: freqai-running.md + - Reinforcement Learning: freqai-reinforcement-learning.md - Developer guide: freqai-developers.md - Short / Leverage: leverage.md - Utility Sub-commands: utils.md From cf882fa84eccd3ce3418451224c1621bf79ee689 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 1 Oct 2022 20:26:41 +0200 Subject: [PATCH 097/421] fix tests --- docs/freqai-reinforcement-learning.md | 2 +- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- tests/freqai/test_freqai_datakitchen.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 0aef015ed..742b2fb97 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -197,4 +197,4 @@ tensorboard --logdir user_data/models/unique-id where `unique-id` is the `identifier` set in the `freqai` configuration file. -![tensorboard](assets/tensorboard.png) \ No newline at end of file +![tensorboard](assets/tensorboard.png) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 705c35297..115ee59ce 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -213,7 +213,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): dk.data_dictionary["prediction_features"] = filtered_dataframe # optional additional data cleaning/analysis - self.data_cleaning_predict(dk, filtered_dataframe) + self.data_cleaning_predict(dk) pred_df = self.rl_model_predict( dk.data_dictionary["prediction_features"], dk, self.model) diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index 4a0eadeb5..023193818 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -71,7 +71,7 @@ def test_use_DBSCAN_to_remove_outliers(mocker, freqai_conf, caplog): freqai = make_data_dictionary(mocker, freqai_conf) # freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1}) freqai.dk.use_DBSCAN_to_remove_outliers(predict=False) - assert log_has_re(r"DBSCAN found eps of 2\.3\d\.", caplog) + assert log_has_re(r"DBSCAN found eps of 1.75", caplog) def test_compute_distances(mocker, freqai_conf): @@ -86,7 +86,7 @@ def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 0.1}) freqai.dk.use_SVM_to_remove_outliers(predict=False) assert log_has_re( - "SVM detected 8.66%", + "SVM detected 7.36%", caplog, ) From 292d72d59325b35c047569f88892364bc2e9c027 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 3 Oct 2022 18:42:20 +0200 Subject: [PATCH 098/421] automatically handle model_save_type for user --- docs/freqai-parameter-table.md | 2 +- docs/freqai-reinforcement-learning.md | 1 - freqtrade/freqai/data_drawer.py | 20 ++++++++++++-------- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index e80ab6fb6..2fa54b590 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -53,7 +53,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. | `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. | `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. -| `continual_learning` | If true, the agent will start new trainings from the model selected during the previous training. If false, a new agent is trained from scratch for each training.
**Datatype:** Bool. +| `max_training_drawdown_pct` | The maximum drawdown that the agent is allowed to experience during training.
**Datatype:** float.
Default: 0.8 | `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not).
**Datatype:** int. | `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | | **Extraneous parameters** diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 742b2fb97..87a4a7646 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -118,7 +118,6 @@ In order to configure the `Reinforcement Learner` the following dictionary to th "cpu_count": 8, "model_type": "PPO", "policy_type": "MlpPolicy", - "continual_learning": false, "model_reward_parameters": { "rr": 1, "profit_aim": 0.025 diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 9bbcdad8b..143b4c172 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -92,6 +92,12 @@ class FreqaiDataDrawer: "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False) + if 'rl_config' in self.freqai_info: + self.model_type = 'stable_baselines' + logger.warning('User indicated rl_config, FreqAI will now use stable_baselines3' + ' to save models.') + else: + self.model_type = self.freqai_info.get('model_save_type', 'joblib') def load_drawer_from_disk(self): """ @@ -414,12 +420,11 @@ class FreqaiDataDrawer: save_path = Path(dk.data_path) # Save the trained model - model_type = self.freqai_info.get('model_save_type', 'joblib') - if model_type == 'joblib': + if self.model_type == 'joblib': dump(model, save_path / f"{dk.model_filename}_model.joblib") - elif model_type == 'keras': + elif self.model_type == 'keras': model.save(save_path / f"{dk.model_filename}_model.h5") - elif 'stable_baselines' in model_type: + elif 'stable_baselines' in self.model_type: model.save(save_path / f"{dk.model_filename}_model.zip") if dk.svm_model is not None: @@ -496,16 +501,15 @@ class FreqaiDataDrawer: dk.data_path / f"{dk.model_filename}_trained_df.pkl" ) - model_type = self.freqai_info.get('model_save_type', 'joblib') # try to access model in memory instead of loading object from disk to save time if dk.live and coin in self.model_dictionary and not self.limit_ram_use: model = self.model_dictionary[coin] - elif model_type == 'joblib': + elif self.model_type == 'joblib': model = load(dk.data_path / f"{dk.model_filename}_model.joblib") - elif model_type == 'keras': + elif self.model_type == 'keras': from tensorflow import keras model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") - elif model_type == 'stable_baselines': + elif self.model_type == 'stable_baselines': mod = __import__('stable_baselines3', fromlist=[ self.freqai_info['rl_config']['model_type']]) MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) From 8c7f478724fcc1f897c20b727fa64bf304bc350e Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Wed, 5 Oct 2022 10:59:33 +0200 Subject: [PATCH 099/421] Update requirements-freqai.txt --- requirements-freqai.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements-freqai.txt b/requirements-freqai.txt index dae13ced0..0affbc238 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -11,5 +11,4 @@ torch==1.12.1 stable-baselines3==1.6.0 gym==0.21.0 tensorboard==2.9.1 -optuna==2.10.1 -sb3-contrib==1.6.0 \ No newline at end of file +sb3-contrib==1.6.0 From 936ca244821960ed0b7fc8ae92588f9819aaffd0 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 15:58:54 +0200 Subject: [PATCH 100/421] separate RL install from general FAI install, update docs --- docs/freqai-reinforcement-learning.md | 11 +++++++---- .../freqai/prediction_models/ReinforcementLearner.py | 6 ++---- requirements-freqai-rl.txt | 8 ++++++++ requirements-freqai.txt | 10 ++++------ setup.sh | 9 ++++++++- tests/freqai/test_freqai_interface.py | 1 - 6 files changed, 29 insertions(+), 16 deletions(-) create mode 100644 requirements-freqai-rl.txt diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 87a4a7646..8a390ac34 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -1,5 +1,8 @@ # Reinforcement Learning +!!! Note + Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?" Users who prefer docker should ensure they use the docker image appended with `_freqaiRL`. + Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line: ```bash @@ -143,7 +146,7 @@ As users begin to modify the strategy and the prediction model, they will quickl if not self._is_valid(action): return -2 pnl = self.get_unrealized_profit() - rew = np.sign(pnl) * (pnl + 1) + factor = 100 # reward agent for entering trades if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ @@ -166,12 +169,12 @@ As users begin to modify the strategy and the prediction model, they will quickl if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) + return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) + return float(pnl * factor) return 0. ``` @@ -194,6 +197,6 @@ cd freqtrade tensorboard --logdir user_data/models/unique-id ``` -where `unique-id` is the `identifier` set in the `freqai` configuration file. +where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if the user wishes to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). ![tensorboard](assets/tensorboard.png) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 00afd61d4..48519c34c 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -2,7 +2,6 @@ import logging from pathlib import Path from typing import Any, Dict -import numpy as np import torch as th from freqtrade.freqai.data_kitchen import FreqaiDataKitchen @@ -81,7 +80,6 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return -2 pnl = self.get_unrealized_profit() - rew = np.sign(pnl) * (pnl + 1) factor = 100 # reward agent for entering trades @@ -109,12 +107,12 @@ class ReinforcementLearner(BaseReinforcementLearningModel): if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) + return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) + return float(pnl * factor) return 0. diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt new file mode 100644 index 000000000..e29df34ac --- /dev/null +++ b/requirements-freqai-rl.txt @@ -0,0 +1,8 @@ +# Include all requirements to run the bot. +-r requirements-freqai.txt + +# Required for freqai-rl +torch==1.12.1 +stable-baselines3==1.6.1 +gym==0.26.2 +sb3-contrib==1.6.1 diff --git a/requirements-freqai.txt b/requirements-freqai.txt index dae13ced0..d4a741c29 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -1,5 +1,5 @@ # Include all requirements to run the bot. --r requirements-hyperopt.txt +-r requirements.txt # Required for freqai scikit-learn==1.1.2 @@ -8,8 +8,6 @@ catboost==1.1; platform_machine != 'aarch64' lightgbm==3.3.2 xgboost==1.6.2 torch==1.12.1 -stable-baselines3==1.6.0 -gym==0.21.0 -tensorboard==2.9.1 -optuna==2.10.1 -sb3-contrib==1.6.0 \ No newline at end of file +stable-baselines3==1.6.1 +gym==0.26.2 +sb3-contrib==1.6.1 diff --git a/setup.sh b/setup.sh index 1a4a285a3..f57e820af 100755 --- a/setup.sh +++ b/setup.sh @@ -78,14 +78,21 @@ function updateenv() { fi REQUIREMENTS_FREQAI="" + REQUIREMENTS_FREQAI_RL="" read -p "Do you want to install dependencies for freqai [y/N]? " dev=$REPLY if [[ $REPLY =~ ^[Yy]$ ]] then REQUIREMENTS_FREQAI="-r requirements-freqai.txt" + read -p "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]? " + dev=$REPLY + if [[ $REPLY =~ ^[Yy]$ ]] + then + REQUIREMENTS_FREQAI="-r requirements-freqai-rl.txt" + fi fi - ${PYTHON} -m pip install --upgrade -r ${REQUIREMENTS} ${REQUIREMENTS_HYPEROPT} ${REQUIREMENTS_PLOT} ${REQUIREMENTS_FREQAI} + ${PYTHON} -m pip install --upgrade -r ${REQUIREMENTS} ${REQUIREMENTS_HYPEROPT} ${REQUIREMENTS_PLOT} ${REQUIREMENTS_FREQAI} ${REQUIREMENTS_FREQAI_RL} if [ $? -ne 0 ]; then echo "Failed installing dependencies" exit 1 diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 1f05f881e..b3e61b590 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -8,7 +8,6 @@ import pytest from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider from freqtrade.enums import RunMode -from freqtrade.enums import RunMode from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.utils import download_all_data_for_training, get_required_data_timerange from freqtrade.optimize.backtesting import Backtesting From b5dd92f85ae7f9a0a3eac96541bdf4707136ec4f Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 16:25:24 +0200 Subject: [PATCH 101/421] remove RL reqs from general FAI reqs --- requirements-freqai.txt | 4 ---- 1 file changed, 4 deletions(-) diff --git a/requirements-freqai.txt b/requirements-freqai.txt index d4a741c29..cf0d2eb07 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -7,7 +7,3 @@ joblib==1.2.0 catboost==1.1; platform_machine != 'aarch64' lightgbm==3.3.2 xgboost==1.6.2 -torch==1.12.1 -stable-baselines3==1.6.1 -gym==0.26.2 -sb3-contrib==1.6.1 From ab4705efd23f89326f59ede44bcb4192850b9d67 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 16:39:38 +0200 Subject: [PATCH 102/421] provide background and goals for RL in doc --- docs/freqai-reinforcement-learning.md | 28 ++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 8a390ac34..a59c5b9d3 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -3,6 +3,28 @@ !!! Note Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?" Users who prefer docker should ensure they use the docker image appended with `_freqaiRL`. + +## Background and terminology + +### What is RL and why does FreqAI need it? + +Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-the-reward)). The reward is used to train weights in a neural network. + +A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live. *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployements.* + +Reinforcement learning is a natural progression for FreqAI, since it adds a new layer of adaptivity and market reactivity that Classifiers and Regressors cannot match. However, Classifiers and Regressors have strengths that RL does not have such as robust predictions. Improperly trained RL agents may find "cheats" and "tricks" to maximize reward without actually winning any trades. For this reason, RL is more complex and demands a higher level of understanding than typical Classifiers and Regressors. + +### The RL interface + +With the current framework, we aim to expose the training environment to the user via the common "prediction model" file (i.e. CatboostClassifier, LightGBMRegressor, etc.). Users inherit our base environment in this file, which allows them to override as much or as little of the environment as they wish. + +We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configruation settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. + +The framework is built on stable_baselines3 (torch) and openai gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework (albeit with some basic assistance from core-dev). For the environment, it is inheriting from `gym.env` which means that a user would need to write an entirely new environment if they wish to switch to a different library. + + +## Running Reinforcement Learning + Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line: ```bash @@ -178,10 +200,6 @@ As users begin to modify the strategy and the prediction model, they will quickl return 0. ``` -### Creating a custom agent - -Users can inherit from `stable_baselines3` and customize anything they wish about their agent. Doing this is for advanced users only, an example is presented in `freqai/RL/ReinforcementLearnerCustomAgent.py` - ### Using Tensorboard Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer: @@ -199,4 +217,4 @@ tensorboard --logdir user_data/models/unique-id where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if the user wishes to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). -![tensorboard](assets/tensorboard.png) +![tensorboard](assets/tensorboard.jpg) From 17fb7f7a3b35319e7717e9ebdcfa79bde0a64fe9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 16:46:02 +0200 Subject: [PATCH 103/421] gym needs 0.21 to match stable_baselines3 --- requirements-freqai-rl.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index e29df34ac..b6bd7ef15 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -4,5 +4,5 @@ # Required for freqai-rl torch==1.12.1 stable-baselines3==1.6.1 -gym==0.26.2 +gym==0.21 sb3-contrib==1.6.1 From cf10a76a2a6796dde396afd53c6ab984a24e58e2 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 17:06:18 +0200 Subject: [PATCH 104/421] bring back Trades.use_db = True --- tests/freqai/test_freqai_interface.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index b3e61b590..65a79a580 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -246,6 +246,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat): model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] assert len(model_folders) == num_files + Trade.use_db = True Backtesting.cleanup() shutil.rmtree(Path(freqai.dk.full_path)) From 017e476f49b48938c5a9b5415417d0e0f6122c4e Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 17:20:40 +0200 Subject: [PATCH 105/421] add extras to setup.py for RL --- setup.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b3693c9f9..304567bcc 100644 --- a/setup.py +++ b/setup.py @@ -15,6 +15,14 @@ freqai = [ 'scikit-learn', 'catboost; platform_machine != "aarch64"', 'lightgbm', + 'xgboost' +] + +freqai_rl = [ + 'torch', + 'stable-baselines3', + 'gym==0.21', + 'sb3-contrib' ] develop = [ @@ -36,7 +44,7 @@ jupyter = [ 'nbconvert', ] -all_extra = plot + develop + jupyter + hyperopt + freqai +all_extra = plot + develop + jupyter + hyperopt + freqai + freqai_rl setup( tests_require=[ @@ -90,6 +98,7 @@ setup( 'jupyter': jupyter, 'hyperopt': hyperopt, 'freqai': freqai, + 'freqai_rl': freqai_rl, 'all': all_extra, }, ) From 488739424d07b5902569ef53e9e8a09a9a301718 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 20:55:50 +0200 Subject: [PATCH 106/421] fix reward inconsistency in template --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 115ee59ce..e89320668 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -309,7 +309,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): return -2 pnl = self.get_unrealized_profit() - rew = np.sign(pnl) * (pnl + 1) factor = 100. # reward agent for entering trades @@ -340,13 +339,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) + return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) + return float(pnl * factor) return 0. From e5204101d9a1bc938d4b9312cdc0ddd9fd35d803 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 21:34:10 +0200 Subject: [PATCH 107/421] add tensorboard back to reqs to keep default integration working (and for docker) --- docs/freqai-reinforcement-learning.md | 8 +------- requirements-freqai-rl.txt | 1 + 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index a59c5b9d3..8b775e046 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -202,13 +202,7 @@ As users begin to modify the strategy and the prediction model, they will quickl ### Using Tensorboard -Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer: - -```bash -pip3 install tensorboard -``` - -Next, the user can activate Tensorboard with the following command: +Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command: ```bash cd freqtrade diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index b6bd7ef15..22e077241 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -6,3 +6,4 @@ torch==1.12.1 stable-baselines3==1.6.1 gym==0.21 sb3-contrib==1.6.1 +tensorboard==2.10.1 From b9f1872d518349c1686b2db2e1ebc9c5ccd7fcc7 Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 6 Oct 2022 08:28:15 +0200 Subject: [PATCH 108/421] Install RL dependencies as dev dependency --- .gitignore | 1 - requirements-dev.txt | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 2d2d526d9..e400c01f5 100644 --- a/.gitignore +++ b/.gitignore @@ -113,4 +113,3 @@ target/ !config_examples/config_full.example.json !config_examples/config_kraken.example.json !config_examples/config_freqai.example.json -!config_examples/config_freqai-rl.example.json diff --git a/requirements-dev.txt b/requirements-dev.txt index ebe278e10..dccd5baba 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,6 +3,7 @@ -r requirements-plot.txt -r requirements-hyperopt.txt -r requirements-freqai.txt +-r requirements-freqai-rl.txt -r docs/requirements-docs.txt coveralls==3.3.1 From 3e258e000ebed48a10c90c40b060af69e5cb3470 Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 7 Oct 2022 07:05:56 +0200 Subject: [PATCH 109/421] Don't set use_db without resetting it --- tests/freqai/test_freqai_interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 65a79a580..bd7c62c5f 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -192,12 +192,12 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): def test_start_backtesting(mocker, freqai_conf, model, num_files, strat): freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) freqai_conf['runmode'] = RunMode.BACKTEST - Trade.use_db = False if is_arm() and "Catboost" in model: pytest.skip("CatBoost is not supported on ARM") if is_mac(): pytest.skip("Reinforcement learning module not available on intel based Mac OS") + Trade.use_db = False freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180120-20180130"}) From 8d7adfabe97e7e7db23df2108e181452fd9f14ac Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 8 Oct 2022 12:10:38 +0200 Subject: [PATCH 110/421] clean RL tests to avoid dir pollution and increase speed --- .../RL/BaseReinforcementLearningModel.py | 12 ++++++ .../prediction_models/ReinforcementLearner.py | 2 +- .../ReinforcementLearner_multiproc.py | 13 +----- tests/freqai/conftest.py | 24 +++++++++++ tests/freqai/test_freqai_interface.py | 43 ++----------------- .../ReinforcementLearner_test_4ac.py | 2 +- 6 files changed, 43 insertions(+), 53 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index e89320668..64af31c45 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -63,6 +63,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.MODELCLASS = getattr(mod, self.model_type) self.policy_type = self.freqai_info['rl_config']['policy_type'] self.unset_outlier_removal() + self.net_arch = self.rl_config.get('net_arch', [128, 128]) def unset_outlier_removal(self): """ @@ -287,6 +288,17 @@ class BaseReinforcementLearningModel(IFreqaiModel): return model + def _on_stop(self): + """ + Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. + """ + + if self.train_env: + self.train_env.close() + + if self.eval_env: + self.eval_env.close() + # Nested class which can be overridden by user to customize further class MyRLEnv(Base5ActionRLEnv): """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 48519c34c..4bf990172 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -31,7 +31,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[128, 128]) + net_arch=self.net_arch) if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index a644c0c04..41345b967 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -28,7 +28,7 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[128, 128]) + net_arch=self.net_arch) if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, @@ -87,14 +87,3 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) - - def _on_stop(self): - """ - Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. - """ - - if self.train_env: - self.train_env.close() - - if self.eval_env: - self.eval_env.close() diff --git a/tests/freqai/conftest.py b/tests/freqai/conftest.py index 026b45afc..7f4897439 100644 --- a/tests/freqai/conftest.py +++ b/tests/freqai/conftest.py @@ -58,6 +58,30 @@ def freqai_conf(default_conf, tmpdir): return freqaiconf +def make_rl_config(conf): + conf.update({"strategy": "freqai_rl_test_strat"}) + conf["freqai"].update({"model_training_parameters": { + "learning_rate": 0.00025, + "gamma": 0.9, + "verbose": 1 + }}) + conf["freqai"]["rl_config"] = { + "train_cycles": 1, + "thread_count": 2, + "max_trade_duration_candles": 300, + "model_type": "PPO", + "policy_type": "MlpPolicy", + "max_training_drawdown_pct": 0.5, + "net_arch": [32, 32], + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.02, + "win_reward_factor": 2 + }} + + return conf + + def get_patched_data_kitchen(mocker, freqaiconf): dk = FreqaiDataKitchen(freqaiconf) return dk diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index bd7c62c5f..40a573547 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -14,7 +14,7 @@ from freqtrade.optimize.backtesting import Backtesting from freqtrade.persistence import Trade from freqtrade.plugins.pairlistmanager import PairListManager from tests.conftest import get_patched_exchange, log_has_re -from tests.freqai.conftest import get_patched_freqai_strategy +from tests.freqai.conftest import get_patched_freqai_strategy, make_rl_config def is_arm() -> bool: @@ -49,25 +49,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): if 'ReinforcementLearner' in model: model_save_ext = 'zip' - freqai_conf.update({"strategy": "freqai_rl_test_strat"}) - freqai_conf["freqai"].update({"model_training_parameters": { - "learning_rate": 0.00025, - "gamma": 0.9, - "verbose": 1 - }}) - freqai_conf["freqai"].update({"model_save_type": 'stable_baselines'}) - freqai_conf["freqai"]["rl_config"] = { - "train_cycles": 1, - "thread_count": 2, - "max_trade_duration_candles": 300, - "model_type": "PPO", - "policy_type": "MlpPolicy", - "max_training_drawdown_pct": 0.5, - "model_reward_parameters": { - "rr": 1, - "profit_aim": 0.02, - "win_reward_factor": 2 - }} + freqai_conf = make_rl_config(freqai_conf) if 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") @@ -79,6 +61,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): freqai = strategy.freqai freqai.live = True freqai.dk = FreqaiDataKitchen(freqai_conf) + freqai.dk.set_paths('ADA/BTC', 10000) timerange = TimeRange.parse_timerange("20180110-20180130") freqai.dd.load_all_pair_histories(timerange, freqai.dk) @@ -204,25 +187,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat): freqai_conf.update({"strategy": strat}) if 'ReinforcementLearner' in model: - - freqai_conf["freqai"].update({"model_training_parameters": { - "learning_rate": 0.00025, - "gamma": 0.9, - "verbose": 1 - }}) - freqai_conf["freqai"].update({"model_save_type": 'stable_baselines'}) - freqai_conf["freqai"]["rl_config"] = { - "train_cycles": 1, - "thread_count": 2, - "max_trade_duration_candles": 300, - "model_type": "PPO", - "policy_type": "MlpPolicy", - "max_training_drawdown_pct": 0.5, - "model_reward_parameters": { - "rr": 1, - "profit_aim": 0.02, - "win_reward_factor": 2 - }} + freqai_conf = make_rl_config(freqai_conf) if 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") diff --git a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py index 9a8f800bd..13e5af02f 100644 --- a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py +++ b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py @@ -24,7 +24,7 @@ class ReinforcementLearner_test_4ac(BaseReinforcementLearningModel): total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[128, 128]) + net_arch=[64, 64]) if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, From d4272269007b3f1d7027ca7b4f5e1c99980666b1 Mon Sep 17 00:00:00 2001 From: Matteo Manzi <33622899+matteoettam09@users.noreply.github.com> Date: Tue, 18 Oct 2022 19:15:20 +0200 Subject: [PATCH 111/421] Update docker_quickstart.md --- docs/docker_quickstart.md | 62 +++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/docs/docker_quickstart.md b/docs/docker_quickstart.md index 84c1d596a..6b48a7877 100644 --- a/docs/docker_quickstart.md +++ b/docs/docker_quickstart.md @@ -10,14 +10,14 @@ Start by downloading and installing Docker CE for your platform: * [Windows](https://docs.docker.com/docker-for-windows/install/) * [Linux](https://docs.docker.com/install/) -To simplify running freqtrade, [`docker-compose`](https://docs.docker.com/compose/install/) should be installed and available to follow the below [docker quick start guide](#docker-quick-start). +To simplify running freqtrade, [`docker compose`](https://docs.docker.com/compose/install/) should be installed and available to follow the below [docker quick start guide](#docker-quick-start). -## Freqtrade with docker-compose +## Freqtrade with docker -Freqtrade provides an official Docker image on [Dockerhub](https://hub.docker.com/r/freqtradeorg/freqtrade/), as well as a [docker-compose file](https://github.com/freqtrade/freqtrade/blob/stable/docker-compose.yml) ready for usage. +Freqtrade provides an official Docker image on [Dockerhub](https://hub.docker.com/r/freqtradeorg/freqtrade/), as well as a [docker compose file](https://github.com/freqtrade/freqtrade/blob/stable/docker-compose.yml) ready for usage. !!! Note - - The following section assumes that `docker` and `docker-compose` are installed and available to the logged in user. + - The following section assumes that `docker` is installed and available to the logged in user. - All below commands use relative directories and will have to be executed from the directory containing the `docker-compose.yml` file. ### Docker quick start @@ -31,13 +31,13 @@ cd ft_userdata/ curl https://raw.githubusercontent.com/freqtrade/freqtrade/stable/docker-compose.yml -o docker-compose.yml # Pull the freqtrade image -docker-compose pull +docker compose pull # Create user directory structure -docker-compose run --rm freqtrade create-userdir --userdir user_data +docker compose run --rm freqtrade create-userdir --userdir user_data # Create configuration - Requires answering interactive questions -docker-compose run --rm freqtrade new-config --config user_data/config.json +docker compose run --rm freqtrade new-config --config user_data/config.json ``` The above snippet creates a new directory called `ft_userdata`, downloads the latest compose file and pulls the freqtrade image. @@ -64,7 +64,7 @@ The `SampleStrategy` is run by default. Once this is done, you're ready to launch the bot in trading mode (Dry-run or Live-trading, depending on your answer to the corresponding question you made above). ``` bash -docker-compose up -d +docker compose up -d ``` !!! Warning "Default configuration" @@ -84,27 +84,27 @@ You can now access the UI by typing localhost:8080 in your browser. #### Monitoring the bot -You can check for running instances with `docker-compose ps`. +You can check for running instances with `docker compose ps`. This should list the service `freqtrade` as `running`. If that's not the case, best check the logs (see next point). -#### Docker-compose logs +#### Docker compose logs Logs will be written to: `user_data/logs/freqtrade.log`. -You can also check the latest log with the command `docker-compose logs -f`. +You can also check the latest log with the command `docker compose logs -f`. #### Database The database will be located at: `user_data/tradesv3.sqlite` -#### Updating freqtrade with docker-compose +#### Updating freqtrade with docker -Updating freqtrade when using `docker-compose` is as simple as running the following 2 commands: +Updating freqtrade when using `docker` is as simple as running the following 2 commands: ``` bash # Download the latest image -docker-compose pull +docker compose pull # Restart the image -docker-compose up -d +docker compose up -d ``` This will first pull the latest image, and will then restart the container with the just pulled version. @@ -116,43 +116,43 @@ This will first pull the latest image, and will then restart the container with Advanced users may edit the docker-compose file further to include all possible options or arguments. -All freqtrade arguments will be available by running `docker-compose run --rm freqtrade `. +All freqtrade arguments will be available by running `docker compose run --rm freqtrade `. -!!! Warning "`docker-compose` for trade commands" - Trade commands (`freqtrade trade <...>`) should not be ran via `docker-compose run` - but should use `docker-compose up -d` instead. +!!! Warning "`docker compose` for trade commands" + Trade commands (`freqtrade trade <...>`) should not be ran via `docker compose run` - but should use `docker compose up -d` instead. This makes sure that the container is properly started (including port forwardings) and will make sure that the container will restart after a system reboot. If you intend to use freqUI, please also ensure to adjust the [configuration accordingly](rest-api.md#configuration-with-docker), otherwise the UI will not be available. -!!! Note "`docker-compose run --rm`" +!!! Note "`docker compose run --rm`" Including `--rm` will remove the container after completion, and is highly recommended for all modes except trading mode (running with `freqtrade trade` command). -??? Note "Using docker without docker-compose" - "`docker-compose run --rm`" will require a compose file to be provided. +??? Note "Using docker without docker" + "`docker compose run --rm`" will require a compose file to be provided. Some freqtrade commands that don't require authentication such as `list-pairs` can be run with "`docker run --rm`" instead. For example `docker run --rm freqtradeorg/freqtrade:stable list-pairs --exchange binance --quote BTC --print-json`. This can be useful for fetching exchange information to add to your `config.json` without affecting your running containers. -#### Example: Download data with docker-compose +#### Example: Download data with docker Download backtesting data for 5 days for the pair ETH/BTC and 1h timeframe from Binance. The data will be stored in the directory `user_data/data/` on the host. ``` bash -docker-compose run --rm freqtrade download-data --pairs ETH/BTC --exchange binance --days 5 -t 1h +docker compose run --rm freqtrade download-data --pairs ETH/BTC --exchange binance --days 5 -t 1h ``` Head over to the [Data Downloading Documentation](data-download.md) for more details on downloading data. -#### Example: Backtest with docker-compose +#### Example: Backtest with docker Run backtesting in docker-containers for SampleStrategy and specified timerange of historical data, on 5m timeframe: ``` bash -docker-compose run --rm freqtrade backtesting --config user_data/config.json --strategy SampleStrategy --timerange 20190801-20191001 -i 5m +docker compose run --rm freqtrade backtesting --config user_data/config.json --strategy SampleStrategy --timerange 20190801-20191001 -i 5m ``` Head over to the [Backtesting Documentation](backtesting.md) to learn more. -### Additional dependencies with docker-compose +### Additional dependencies with docker If your strategy requires dependencies not included in the default image - it will be necessary to build the image on your host. For this, please create a Dockerfile containing installation steps for the additional dependencies (have a look at [docker/Dockerfile.custom](https://github.com/freqtrade/freqtrade/blob/develop/docker/Dockerfile.custom) for an example). @@ -166,15 +166,15 @@ You'll then also need to modify the `docker-compose.yml` file and uncomment the dockerfile: "./Dockerfile." ``` -You can then run `docker-compose build --pull` to build the docker image, and run it using the commands described above. +You can then run `docker compose build --pull` to build the docker image, and run it using the commands described above. -### Plotting with docker-compose +### Plotting with docker Commands `freqtrade plot-profit` and `freqtrade plot-dataframe` ([Documentation](plotting.md)) are available by changing the image to `*_plot` in your docker-compose.yml file. You can then use these commands as follows: ``` bash -docker-compose run --rm freqtrade plot-dataframe --strategy AwesomeStrategy -p BTC/ETH --timerange=20180801-20180805 +docker compose run --rm freqtrade plot-dataframe --strategy AwesomeStrategy -p BTC/ETH --timerange=20180801-20180805 ``` The output will be stored in the `user_data/plot` directory, and can be opened with any modern browser. @@ -185,7 +185,7 @@ Freqtrade provides a docker-compose file which starts up a jupyter lab server. You can run this server using the following command: ``` bash -docker-compose -f docker/docker-compose-jupyter.yml up +docker compose -f docker/docker-compose-jupyter.yml up ``` This will create a docker-container running jupyter lab, which will be accessible using `https://127.0.0.1:8888/lab`. @@ -194,7 +194,7 @@ Please use the link that's printed in the console after startup for simplified l Since part of this image is built on your machine, it is recommended to rebuild the image from time to time to keep freqtrade (and dependencies) up-to-date. ``` bash -docker-compose -f docker/docker-compose-jupyter.yml build --no-cache +docker compose -f docker/docker-compose-jupyter.yml build --no-cache ``` ## Troubleshooting From abcbe7a42153740208f7ac4186fdecb0bc45f3f4 Mon Sep 17 00:00:00 2001 From: Matteo Manzi <33622899+matteoettam09@users.noreply.github.com> Date: Tue, 18 Oct 2022 19:15:59 +0200 Subject: [PATCH 112/421] Update updating.md --- docs/updating.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/updating.md b/docs/updating.md index 893bc846e..1e5dc8ffe 100644 --- a/docs/updating.md +++ b/docs/updating.md @@ -6,14 +6,14 @@ To update your freqtrade installation, please use one of the below methods, corr Breaking changes / changed behavior will be documented in the changelog that is posted alongside every release. For the develop branch, please follow PR's to avoid being surprised by changes. -## docker-compose +## docker !!! Note "Legacy installations using the `master` image" We're switching from master to stable for the release Images - please adjust your docker-file and replace `freqtradeorg/freqtrade:master` with `freqtradeorg/freqtrade:stable` ``` bash -docker-compose pull -docker-compose up -d +docker compose pull +docker compose up -d ``` ## Installation via setup script From 11d6d0be9e25b1fac1be5cde8addbedeaedf129f Mon Sep 17 00:00:00 2001 From: Matteo Manzi <33622899+matteoettam09@users.noreply.github.com> Date: Tue, 18 Oct 2022 19:22:07 +0200 Subject: [PATCH 113/421] Update sql_cheatsheet.md --- docs/sql_cheatsheet.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sql_cheatsheet.md b/docs/sql_cheatsheet.md index c42cb5575..67c081d4c 100644 --- a/docs/sql_cheatsheet.md +++ b/docs/sql_cheatsheet.md @@ -13,12 +13,12 @@ Feel free to use a visual Database editor like SqliteBrowser if you feel more co sudo apt-get install sqlite3 ``` -### Using sqlite3 via docker-compose +### Using sqlite3 via docker The freqtrade docker image does contain sqlite3, so you can edit the database without having to install anything on the host system. ``` bash -docker-compose exec freqtrade /bin/bash +docker compose exec freqtrade /bin/bash sqlite3 .sqlite ``` From fe3d99b5685ad681347a448681f045c05e7f541e Mon Sep 17 00:00:00 2001 From: Matteo Manzi <33622899+matteoettam09@users.noreply.github.com> Date: Tue, 18 Oct 2022 19:22:49 +0200 Subject: [PATCH 114/421] Update feature_request.md --- .github/ISSUE_TEMPLATE/feature_request.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index a18915462..db335bf09 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -18,7 +18,7 @@ Have you search for this feature before requesting it? It's highly likely that a * Operating system: ____ * Python Version: _____ (`python -V`) * CCXT version: _____ (`pip freeze | grep ccxt`) - * Freqtrade Version: ____ (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker) + * Freqtrade Version: ____ (`freqtrade -V` or `docker compose run --rm freqtrade -V` for Freqtrade running in docker) ## Describe the enhancement From 67850d92af1c81dfc139b7045ac33f4e2056e9fb Mon Sep 17 00:00:00 2001 From: Matteo Manzi <33622899+matteoettam09@users.noreply.github.com> Date: Tue, 18 Oct 2022 19:24:46 +0200 Subject: [PATCH 115/421] Update question.md --- .github/ISSUE_TEMPLATE/question.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index 4b02e5f19..9283f0e4f 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -18,7 +18,7 @@ Please do not use the question template to report bugs or to request new feature * Operating system: ____ * Python Version: _____ (`python -V`) * CCXT version: _____ (`pip freeze | grep ccxt`) - * Freqtrade Version: ____ (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker) + * Freqtrade Version: ____ (`freqtrade -V` or `docker compose run --rm freqtrade -V` for Freqtrade running in docker) ## Your question From 35cc6aa966cc509cc9dc528c813928be4e6f3757 Mon Sep 17 00:00:00 2001 From: Matteo Manzi <33622899+matteoettam09@users.noreply.github.com> Date: Tue, 18 Oct 2022 19:25:37 +0200 Subject: [PATCH 116/421] Update data-analysis.md --- docs/data-analysis.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/data-analysis.md b/docs/data-analysis.md index 926ed3eae..5f01ae38f 100644 --- a/docs/data-analysis.md +++ b/docs/data-analysis.md @@ -5,7 +5,7 @@ You can analyze the results of backtests and trading history easily using Jupyte ## Quick start with docker Freqtrade provides a docker-compose file which starts up a jupyter lab server. -You can run this server using the following command: `docker-compose -f docker/docker-compose-jupyter.yml up` +You can run this server using the following command: `docker compose -f docker/docker-compose-jupyter.yml up` This will create a dockercontainer running jupyter lab, which will be accessible using `https://127.0.0.1:8888/lab`. Please use the link that's printed in the console after startup for simplified login. From 8c39b37223ec3ea449bf277957794f19a5bd9c58 Mon Sep 17 00:00:00 2001 From: Matteo Manzi <33622899+matteoettam09@users.noreply.github.com> Date: Tue, 18 Oct 2022 19:26:09 +0200 Subject: [PATCH 117/421] Update bug_report.md --- .github/ISSUE_TEMPLATE/bug_report.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 54c9eab50..8637c0d68 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -20,7 +20,7 @@ Please do not use bug reports to request new features. * Operating system: ____ * Python Version: _____ (`python -V`) * CCXT version: _____ (`pip freeze | grep ccxt`) - * Freqtrade Version: ____ (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker) + * Freqtrade Version: ____ (`freqtrade -V` or `docker compose run --rm freqtrade -V` for Freqtrade running in docker) Note: All issues other than enhancement requests will be closed without further comment if the above template is deleted or not filled out. From 51b410ac1a333e5ae744e68be13b5dca8b3a1748 Mon Sep 17 00:00:00 2001 From: Matteo Manzi <33622899+matteoettam09@users.noreply.github.com> Date: Tue, 18 Oct 2022 19:28:29 +0200 Subject: [PATCH 118/421] Update utils.md --- docs/utils.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/utils.md b/docs/utils.md index ee8793159..2f4604323 100644 --- a/docs/utils.md +++ b/docs/utils.md @@ -654,7 +654,7 @@ Common arguments: You can also use webserver mode via docker. Starting a one-off container requires the configuration of the port explicitly, as ports are not exposed by default. -You can use `docker-compose run --rm -p 127.0.0.1:8080:8080 freqtrade webserver` to start a one-off container that'll be removed once you stop it. This assumes that port 8080 is still available and no other bot is running on that port. +You can use `docker compose run --rm -p 127.0.0.1:8080:8080 freqtrade webserver` to start a one-off container that'll be removed once you stop it. This assumes that port 8080 is still available and no other bot is running on that port. Alternatively, you can reconfigure the docker-compose file to have the command updated: @@ -664,7 +664,7 @@ Alternatively, you can reconfigure the docker-compose file to have the command u --config /freqtrade/user_data/config.json ``` -You can now use `docker-compose up` to start the webserver. +You can now use `docker compose up` to start the webserver. This assumes that the configuration has a webserver enabled and configured for docker (listening port = `0.0.0.0`). !!! Tip From a2843165e18e9a4ab46b686a825a1456ea45fbf8 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 30 Oct 2022 10:31:38 +0100 Subject: [PATCH 119/421] fix leftovers from merge --- freqtrade/freqai/data_drawer.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index a4d5b5d5c..5640dcb55 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -98,6 +98,13 @@ class FreqaiDataDrawer: "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} self.metric_tracker: Dict[str, Dict[str, Dict[str, list]]] = {} + self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False) + if 'rl_config' in self.freqai_info: + self.model_type = 'stable_baselines' + logger.warning('User indicated rl_config, FreqAI will now use stable_baselines3' + ' to save models.') + else: + self.model_type = self.freqai_info.get('model_save_type', 'joblib') def update_metric_tracker(self, metric: str, value: float, pair: str) -> None: """ @@ -124,13 +131,6 @@ class FreqaiDataDrawer: self.update_metric_tracker('cpu_load1min', load1 / cpus, pair) self.update_metric_tracker('cpu_load5min', load5 / cpus, pair) self.update_metric_tracker('cpu_load15min', load15 / cpus, pair) - self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False) - if 'rl_config' in self.freqai_info: - self.model_type = 'stable_baselines' - logger.warning('User indicated rl_config, FreqAI will now use stable_baselines3' - ' to save models.') - else: - self.model_type = self.freqai_info.get('model_save_type', 'joblib') def load_drawer_from_disk(self): """ From a11d579bc2338fa3087fc5f7d079fa43314cecd5 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 22 Oct 2022 16:22:55 +0200 Subject: [PATCH 120/421] Verify order fills on "detail" timeframe --- freqtrade/optimize/backtesting.py | 69 ++++++++++++++++--------------- 1 file changed, 36 insertions(+), 33 deletions(-) diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index 4d98f1f5a..b3395a2c3 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -688,10 +688,11 @@ class Backtesting: trade.orders.append(order) return trade - def _get_exit_trade_entry(self, trade: LocalTrade, row: Tuple) -> Optional[LocalTrade]: + def _get_exit_trade_entry( + self, trade: LocalTrade, row: Tuple, is_first: bool) -> Optional[LocalTrade]: exit_candle_time: datetime = row[DATE_IDX].to_pydatetime() - if self.trading_mode == TradingMode.FUTURES: + if is_first and self.trading_mode == TradingMode.FUTURES: trade.funding_fees = self.exchange.calculate_funding_fees( self.futures_data[trade.pair], amount=trade.amount, @@ -700,32 +701,7 @@ class Backtesting: close_date=exit_candle_time, ) - if self.timeframe_detail and trade.pair in self.detail_data: - exit_candle_end = exit_candle_time + timedelta(minutes=self.timeframe_min) - - detail_data = self.detail_data[trade.pair] - detail_data = detail_data.loc[ - (detail_data['date'] >= exit_candle_time) & - (detail_data['date'] < exit_candle_end) - ].copy() - if len(detail_data) == 0: - # Fall back to "regular" data if no detail data was found for this candle - return self._get_exit_trade_entry_for_candle(trade, row) - detail_data.loc[:, 'enter_long'] = row[LONG_IDX] - detail_data.loc[:, 'exit_long'] = row[ELONG_IDX] - detail_data.loc[:, 'enter_short'] = row[SHORT_IDX] - detail_data.loc[:, 'exit_short'] = row[ESHORT_IDX] - detail_data.loc[:, 'enter_tag'] = row[ENTER_TAG_IDX] - detail_data.loc[:, 'exit_tag'] = row[EXIT_TAG_IDX] - for det_row in detail_data[HEADERS].values.tolist(): - res = self._get_exit_trade_entry_for_candle(trade, det_row) - if res: - return res - - return None - - else: - return self._get_exit_trade_entry_for_candle(trade, row) + return self._get_exit_trade_entry_for_candle(trade, row) def get_valid_price_and_stake( self, pair: str, row: Tuple, propose_rate: float, stake_amount: float, @@ -1070,7 +1046,7 @@ class Backtesting: def backtest_loop( self, row: Tuple, pair: str, current_time: datetime, end_date: datetime, - max_open_trades: int, open_trade_count_start: int) -> int: + max_open_trades: int, open_trade_count_start: int, is_first: bool = True) -> int: """ NOTE: This method is used by Hyperopt at each iteration. Please keep it optimized. @@ -1088,9 +1064,11 @@ class Backtesting: # without positionstacking, we can only have one open trade per pair. # max_open_trades must be respected # don't open on the last row + # We only open trades on the initial candle. trade_dir = self.check_for_trade_entry(row) if ( (self._position_stacking or len(LocalTrade.bt_trades_open_pp[pair]) == 0) + and is_first and self.trade_slot_available(max_open_trades, open_trade_count_start) and current_time != end_date and trade_dir is not None @@ -1116,7 +1094,7 @@ class Backtesting: # 4. Create exit orders (if any) if not trade.open_order_id: - self._get_exit_trade_entry(trade, row) # Place exit order if necessary + self._get_exit_trade_entry(trade, row, is_first) # Place exit order if necessary # 5. Process exit orders. order = trade.select_order(trade.exit_side, is_open=True) @@ -1167,7 +1145,6 @@ class Backtesting: self.progress.init_step(BacktestState.BACKTEST, int( (end_date - start_date) / timedelta(minutes=self.timeframe_min))) - # Loop timerange and get candle for each pair at that point in time while current_time <= end_date: open_trade_count_start = LocalTrade.bt_open_open_trade_count @@ -1181,9 +1158,35 @@ class Backtesting: row_index += 1 indexes[pair] = row_index self.dataprovider._set_dataframe_max_index(row_index) + current_detail_time: datetime = row[DATE_IDX].to_pydatetime() + if self.timeframe_detail and pair in self.detail_data: + exit_candle_end = current_detail_time + timedelta(minutes=self.timeframe_min) - open_trade_count_start = self.backtest_loop( - row, pair, current_time, end_date, max_open_trades, open_trade_count_start) + detail_data = self.detail_data[pair] + detail_data = detail_data.loc[ + (detail_data['date'] >= current_detail_time) & + (detail_data['date'] < exit_candle_end) + ].copy() + if len(detail_data) == 0: + # Fall back to "regular" data if no detail data was found for this candle + open_trade_count_start = self.backtest_loop( + row, pair, current_time, end_date, max_open_trades, + open_trade_count_start) + detail_data.loc[:, 'enter_long'] = row[LONG_IDX] + detail_data.loc[:, 'exit_long'] = row[ELONG_IDX] + detail_data.loc[:, 'enter_short'] = row[SHORT_IDX] + detail_data.loc[:, 'exit_short'] = row[ESHORT_IDX] + detail_data.loc[:, 'enter_tag'] = row[ENTER_TAG_IDX] + detail_data.loc[:, 'exit_tag'] = row[EXIT_TAG_IDX] + is_first = True + for det_row in detail_data[HEADERS].values.tolist(): + open_trade_count_start = self.backtest_loop( + det_row, pair, current_time, end_date, max_open_trades, + open_trade_count_start, is_first) + is_first = False + else: + open_trade_count_start = self.backtest_loop( + row, pair, current_time, end_date, max_open_trades, open_trade_count_start) # Move time one configured time_interval ahead. self.progress.increment() From 29ba263c3c19a96abecf50015dcc9f6017fa6ee5 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 22 Oct 2022 16:23:03 +0200 Subject: [PATCH 121/421] Update some test parameters --- tests/optimize/test_backtesting.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/optimize/test_backtesting.py b/tests/optimize/test_backtesting.py index 140cc3394..21d9d25cc 100644 --- a/tests/optimize/test_backtesting.py +++ b/tests/optimize/test_backtesting.py @@ -686,7 +686,7 @@ def test_backtest__get_sell_trade_entry(default_conf, fee, mocker) -> None: ) # No data available. - res = backtesting._get_exit_trade_entry(trade, row_sell) + res = backtesting._get_exit_trade_entry(trade, row_sell, True) assert res is not None assert res.exit_reason == ExitType.ROI.value assert res.close_date_utc == datetime(2020, 1, 1, 5, 0, tzinfo=timezone.utc) @@ -699,13 +699,13 @@ def test_backtest__get_sell_trade_entry(default_conf, fee, mocker) -> None: [], columns=['date', 'open', 'high', 'low', 'close', 'enter_long', 'exit_long', 'enter_short', 'exit_short', 'long_tag', 'short_tag', 'exit_tag']) - res = backtesting._get_exit_trade_entry(trade, row) + res = backtesting._get_exit_trade_entry(trade, row, True) assert res is None # Assign backtest-detail data backtesting.detail_data[pair] = row_detail - res = backtesting._get_exit_trade_entry(trade, row_sell) + res = backtesting._get_exit_trade_entry(trade, row_sell, True) assert res is not None assert res.exit_reason == ExitType.ROI.value # Sell at minute 3 (not available above!) From 0888b53b5a080c069006dc30d479126b17e56979 Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 4 Nov 2022 07:07:56 +0100 Subject: [PATCH 122/421] Udpate current_time handling for detail loop --- freqtrade/optimize/backtesting.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index b3395a2c3..fa45e9dd4 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -1179,9 +1179,11 @@ class Backtesting: detail_data.loc[:, 'enter_tag'] = row[ENTER_TAG_IDX] detail_data.loc[:, 'exit_tag'] = row[EXIT_TAG_IDX] is_first = True + current_time_det = current_time for det_row in detail_data[HEADERS].values.tolist(): + current_time_det += timedelta(minutes=self.timeframe_detail_min) open_trade_count_start = self.backtest_loop( - det_row, pair, current_time, end_date, max_open_trades, + det_row, pair, current_time_det, end_date, max_open_trades, open_trade_count_start, is_first) is_first = False else: From 5bd3e54b17424daec79c3208589b807f92890b7a Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 5 Nov 2022 20:01:05 +0100 Subject: [PATCH 123/421] Add test for detail backtesting --- tests/optimize/test_backtesting.py | 87 ++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+), 3 deletions(-) diff --git a/tests/optimize/test_backtesting.py b/tests/optimize/test_backtesting.py index 21d9d25cc..26c31efef 100644 --- a/tests/optimize/test_backtesting.py +++ b/tests/optimize/test_backtesting.py @@ -787,17 +787,98 @@ def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None: for _, t in results.iterrows(): assert len(t['orders']) == 2 ln = data_pair.loc[data_pair["date"] == t["open_date"]] - # Check open trade rate alignes to open rate + # Check open trade rate aligns to open rate assert not ln.empty assert round(ln.iloc[0]["open"], 6) == round(t["open_rate"], 6) - # check close trade rate alignes to close rate or is between high and low + # check close trade rate aligns to close rate or is between high and low ln1 = data_pair.loc[data_pair["date"] == t["close_date"]] - assert not ln1.empty assert (round(ln1.iloc[0]["open"], 6) == round(t["close_rate"], 6) or round(ln1.iloc[0]["low"], 6) < round( t["close_rate"], 6) < round(ln1.iloc[0]["high"], 6)) +@pytest.mark.parametrize('use_detail', [True, False]) +def test_backtest_one_detail(default_conf_usdt, fee, mocker, testdatadir, use_detail) -> None: + default_conf_usdt['use_exit_signal'] = False + mocker.patch('freqtrade.exchange.Exchange.get_fee', fee) + mocker.patch("freqtrade.exchange.Exchange.get_min_pair_stake_amount", return_value=0.00001) + mocker.patch("freqtrade.exchange.Exchange.get_max_pair_stake_amount", return_value=float('inf')) + if use_detail: + default_conf_usdt['timeframe_detail'] = '1m' + patch_exchange(mocker) + + def advise_entry(df, *args, **kwargs): + # Mock function to force several entries + df.loc[(df['rsi'] < 40), 'enter_long'] = 1 + return df + + def custom_entry_price(proposed_rate, **kwargs): + return proposed_rate * 0.997 + + backtesting = Backtesting(default_conf_usdt) + backtesting._set_strategy(backtesting.strategylist[0]) + backtesting.strategy.populate_entry_trend = advise_entry + backtesting.strategy.custom_entry_price = custom_entry_price + pair = 'XRP/ETH' + # Pick a timerange adapted to the pair we use to test + timerange = TimeRange.parse_timerange('20191010-20191013') + data = history.load_data(datadir=testdatadir, timeframe='5m', pairs=['XRP/ETH'], + timerange=timerange) + if use_detail: + data_1m = history.load_data(datadir=testdatadir, timeframe='1m', pairs=['XRP/ETH'], + timerange=timerange) + backtesting.detail_data = data_1m + processed = backtesting.strategy.advise_all_indicators(data) + min_date, max_date = get_timerange(processed) + + result = backtesting.backtest( + processed=deepcopy(processed), + start_date=min_date, + end_date=max_date, + max_open_trades=10, + ) + results = result['results'] + assert not results.empty + # Timeout settings from default_conf = entry: 10, exit: 30 + assert len(results) == (2 if use_detail else 3) + + assert 'orders' in results.columns + data_pair = processed[pair] + + data_1m_pair = data_1m[pair] if use_detail else pd.DataFrame() + late_entry = 0 + for _, t in results.iterrows(): + assert len(t['orders']) == 2 + + entryo = t['orders'][0] + entry_ts = datetime.fromtimestamp(entryo['order_filled_timestamp'] // 1000, tz=timezone.utc) + if entry_ts > t['open_date']: + late_entry += 1 + + # Get "entry fill" candle + ln = (data_1m_pair.loc[data_1m_pair["date"] == entry_ts] + if use_detail else data_pair.loc[data_pair["date"] == entry_ts]) + # Check open trade rate aligns to open rate + assert not ln.empty + + # assert round(ln.iloc[0]["open"], 6) == round(t["open_rate"], 6) + assert round(ln.iloc[0]["low"], 6) <= round( + t["open_rate"], 6) <= round(ln.iloc[0]["high"], 6) + # check close trade rate aligns to close rate or is between high and low + ln1 = data_pair.loc[data_pair["date"] == t["close_date"]] + if use_detail: + ln1_1m = data_1m_pair.loc[data_1m_pair["date"] == t["close_date"]] + assert not ln1.empty or not ln1_1m.empty + else: + assert not ln1.empty + ln2 = ln1_1m if ln1.empty else ln1 + + assert (round(ln2.iloc[0]["low"], 6) <= round( + t["close_rate"], 6) <= round(ln2.iloc[0]["high"], 6)) + + assert late_entry > 0 + + def test_backtest_timedout_entry_orders(default_conf, fee, mocker, testdatadir) -> None: # This strategy intentionally places unfillable orders. default_conf['strategy'] = 'StrategyTestV3CustomEntryPrice' From d089fdae34820726c4902911bf27fa96ea44e27b Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 5 Nov 2022 20:02:36 +0100 Subject: [PATCH 124/421] Fix current-time_det calculation --- freqtrade/optimize/backtesting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index fa45e9dd4..8faeeb9fe 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -1181,10 +1181,10 @@ class Backtesting: is_first = True current_time_det = current_time for det_row in detail_data[HEADERS].values.tolist(): - current_time_det += timedelta(minutes=self.timeframe_detail_min) open_trade_count_start = self.backtest_loop( det_row, pair, current_time_det, end_date, max_open_trades, open_trade_count_start, is_first) + current_time_det += timedelta(minutes=self.timeframe_detail_min) is_first = False else: open_trade_count_start = self.backtest_loop( From ded57fb3019e1e564cc9a6842c2183ae18de8951 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 5 Nov 2022 20:03:20 +0100 Subject: [PATCH 125/421] Remove no longer valid test part --- freqtrade/optimize/backtesting.py | 2 +- tests/optimize/test_backtesting.py | 32 ------------------------------ 2 files changed, 1 insertion(+), 33 deletions(-) diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index 8faeeb9fe..54312177c 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -1064,7 +1064,7 @@ class Backtesting: # without positionstacking, we can only have one open trade per pair. # max_open_trades must be respected # don't open on the last row - # We only open trades on the initial candle. + # We only open trades on the main candle, not on detail candles trade_dir = self.check_for_trade_entry(row) if ( (self._position_stacking or len(LocalTrade.bt_trades_open_pp[pair]) == 0) diff --git a/tests/optimize/test_backtesting.py b/tests/optimize/test_backtesting.py index 26c31efef..9a91b0c6f 100644 --- a/tests/optimize/test_backtesting.py +++ b/tests/optimize/test_backtesting.py @@ -663,27 +663,6 @@ def test_backtest__get_sell_trade_entry(default_conf, fee, mocker) -> None: '', # Exit Signal Name ] - row_detail = pd.DataFrame( - [ - [ - pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=0, tzinfo=timezone.utc), - 200, 200.1, 197, 199, 1, 0, 0, 0, '', '', '', - ], [ - pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=1, tzinfo=timezone.utc), - 199, 199.7, 199, 199.5, 0, 0, 0, 0, '', '', '', - ], [ - pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=2, tzinfo=timezone.utc), - 199.5, 200.8, 199, 200.9, 0, 0, 0, 0, '', '', '', - ], [ - pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=3, tzinfo=timezone.utc), - 200.5, 210.5, 193, 210.5, 0, 0, 0, 0, '', '', '', # ROI sell (?) - ], [ - pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=4, tzinfo=timezone.utc), - 200, 200.1, 193, 199, 0, 0, 0, 0, '', '', '', - ], - ], columns=['date', 'open', 'high', 'low', 'close', 'enter_long', 'exit_long', - 'enter_short', 'exit_short', 'long_tag', 'short_tag', 'exit_tag'] - ) # No data available. res = backtesting._get_exit_trade_entry(trade, row_sell, True) @@ -702,17 +681,6 @@ def test_backtest__get_sell_trade_entry(default_conf, fee, mocker) -> None: res = backtesting._get_exit_trade_entry(trade, row, True) assert res is None - # Assign backtest-detail data - backtesting.detail_data[pair] = row_detail - - res = backtesting._get_exit_trade_entry(trade, row_sell, True) - assert res is not None - assert res.exit_reason == ExitType.ROI.value - # Sell at minute 3 (not available above!) - assert res.close_date_utc == datetime(2020, 1, 1, 5, 3, tzinfo=timezone.utc) - sell_order = res.select_order('sell', True) - assert sell_order is not None - def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None: default_conf['use_exit_signal'] = False From 2c1330a4e29abfbea95df7e7cfa994faffe3dd81 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 6 Nov 2022 08:32:27 +0100 Subject: [PATCH 126/421] Update docs to new behavior --- docs/backtesting.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/backtesting.md b/docs/backtesting.md index e3cddb7a1..bfe0f4d07 100644 --- a/docs/backtesting.md +++ b/docs/backtesting.md @@ -583,7 +583,8 @@ To utilize this, you can append `--timeframe-detail 5m` to your regular backtest freqtrade backtesting --strategy AwesomeStrategy --timeframe 1h --timeframe-detail 5m ``` -This will load 1h data as well as 5m data for the timeframe. The strategy will be analyzed with the 1h timeframe - and for every "open trade candle" (candles where a trade is open) the 5m data will be used to simulate intra-candle movements. +This will load 1h data as well as 5m data for the timeframe. The strategy will be analyzed with the 1h timeframe, and Entry orders will only be placed at the main timeframe, however Order fills and exit signals will be evaluated at the 5m candle, simulating intra-candle movements. + All callback functions (`custom_exit()`, `custom_stoploss()`, ... ) will be running for each 5m candle once the trade is opened (so 12 times in the above example of 1h timeframe, and 5m detailed timeframe). `--timeframe-detail` must be smaller than the original timeframe, otherwise backtesting will fail to start. From 9c5ba0732a3440c95487cb13263bd394d0670647 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Tue, 8 Nov 2022 10:32:18 -0300 Subject: [PATCH 127/421] save predictions with date and merge by date --- freqtrade/freqai/data_kitchen.py | 23 +++++++++++++++-------- freqtrade/freqai/freqai_interface.py | 2 +- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 12a3cd519..8fd42ee85 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -979,7 +979,8 @@ class FreqaiDataKitchen: return weights def get_predictions_to_append(self, predictions: DataFrame, - do_predict: npt.ArrayLike) -> DataFrame: + do_predict: npt.ArrayLike, + dataframe_backtest: DataFrame) -> DataFrame: """ Get backtest prediction from current backtest period """ @@ -1001,7 +1002,9 @@ class FreqaiDataKitchen: if self.freqai_config["feature_parameters"].get("DI_threshold", 0) > 0: append_df["DI_values"] = self.DI_values - return append_df + dataframe_backtest.reset_index(drop=True, inplace=True) + merged_df = pd.concat([dataframe_backtest["date"], append_df], axis=1) + return merged_df def append_predictions(self, append_df: DataFrame) -> None: """ @@ -1019,15 +1022,19 @@ class FreqaiDataKitchen: when it goes back to the strategy. These rows are not included in the backtest. """ - len_filler = len(dataframe) - len(self.full_df.index) # startup_candle_count - filler_df = pd.DataFrame( - np.zeros((len_filler, len(self.full_df.columns))), columns=self.full_df.columns - ) + # len_filler = len(dataframe) - len(self.full_df.index) # startup_candle_count + # filler_df = pd.DataFrame( + # np.zeros((len_filler, len(self.full_df.columns))), columns=self.full_df.columns + # ) - self.full_df = pd.concat([filler_df, self.full_df], axis=0, ignore_index=True) + # self.full_df = pd.concat([filler_df, self.full_df], axis=0, ignore_index=True) to_keep = [col for col in dataframe.columns if not col.startswith("&")] - self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1) + # self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1) + # self.full_df = DataFrame() + + self.return_dataframe = pd.merge(dataframe[to_keep], + self.full_df, how='left', on='date') self.full_df = DataFrame() return diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index ae123f852..59a82958b 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -301,7 +301,7 @@ class IFreqaiModel(ABC): self.model = self.dd.load_data(pair, dk) pred_df, do_preds = self.predict(dataframe_backtest, dk) - append_df = dk.get_predictions_to_append(pred_df, do_preds) + append_df = dk.get_predictions_to_append(pred_df, do_preds, dataframe_backtest) dk.append_predictions(append_df) dk.save_backtesting_prediction(append_df) From 8d9988a942aba46f4e7eb851d51f30497983e1b7 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Tue, 8 Nov 2022 11:06:23 -0300 Subject: [PATCH 128/421] enforce date column in backtesting freqai predictions files --- freqtrade/freqai/data_kitchen.py | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 8fd42ee85..b99447ac9 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1021,20 +1021,11 @@ class FreqaiDataKitchen: Back fill values to before the backtesting range so that the dataframe matches size when it goes back to the strategy. These rows are not included in the backtest. """ - - # len_filler = len(dataframe) - len(self.full_df.index) # startup_candle_count - # filler_df = pd.DataFrame( - # np.zeros((len_filler, len(self.full_df.columns))), columns=self.full_df.columns - # ) - - # self.full_df = pd.concat([filler_df, self.full_df], axis=0, ignore_index=True) - to_keep = [col for col in dataframe.columns if not col.startswith("&")] - # self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1) - # self.full_df = DataFrame() - self.return_dataframe = pd.merge(dataframe[to_keep], self.full_df, how='left', on='date') + self.return_dataframe[self.full_df.columns] = ( + self.return_dataframe[self.full_df.columns].fillna(value=0)) self.full_df = DataFrame() return @@ -1368,12 +1359,13 @@ class FreqaiDataKitchen: if file_exists: append_df = self.get_backtesting_prediction() - if len(append_df) == len_backtest_df: + if len(append_df) == len_backtest_df and 'date' in append_df: logger.info(f"Found backtesting prediction file at {path_to_predictionfile}") return True else: logger.info("A new backtesting prediction file is required. " - "(Number of predictions is different from dataframe length).") + "(Number of predictions is different from dataframe length or " + "old prediction file version).") return False else: logger.info( From 3e57c18ac60e4d5310f3c4044b5d0ba59a0cb822 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Tue, 8 Nov 2022 18:20:39 -0300 Subject: [PATCH 129/421] add fix_live_predictions function to backtesting --- freqtrade/freqai/data_kitchen.py | 2 +- freqtrade/freqai/freqai_interface.py | 52 ++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index b99447ac9..53de00426 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1014,7 +1014,7 @@ class FreqaiDataKitchen: if self.full_df.empty: self.full_df = append_df else: - self.full_df = pd.concat([self.full_df, append_df], axis=0) + self.full_df = pd.concat([self.full_df, append_df], axis=0, ignore_index=True) def fill_predictions(self, dataframe): """ diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 59a82958b..ab0fb388a 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -305,6 +305,7 @@ class IFreqaiModel(ABC): dk.append_predictions(append_df) dk.save_backtesting_prediction(append_df) + self.backtesting_fit_live_predictions(dk) dk.fill_predictions(dataframe) return dk @@ -824,6 +825,57 @@ class IFreqaiModel(ABC): f"to {tr_train_stopts_str}, {train_it}/{total_trains} " "trains" ) + + def backtesting_fit_live_predictions(self, dk: FreqaiDataKitchen): + start_time = time.perf_counter() + fit_live_predictions_candles = self.freqai_info.get("fit_live_predictions_candles", 0) + if fit_live_predictions_candles: + predictions_columns = [col for col in dk.full_df.columns if ( + col.startswith("&") and + '_mean' not in col and + '_std' not in col and + col not in self.dk.data["extra_returns_per_train"]) + ] + self.dd.historic_predictions[self.dk.pair] = pd.DataFrame( + columns=dk.full_df.columns).astype(dk.full_df.dtypes) + + # for index, row in dk.full_df.iterrows(): + for index in range(len(dk.full_df)): + if index > fit_live_predictions_candles: + self.dd.historic_predictions[self.dk.pair] = ( + dk.full_df.iloc[index - fit_live_predictions_candles + 1:index + 1]) + else: + self.dd.historic_predictions[self.dk.pair] = dk.full_df.iloc[:index + 1] + # self.dd.historic_predictions[self.dk.pair].loc[index] = row.values.tolist() + # pd.concat(self.dd.historic_predictions[self.dk.pair], row.values) + self.fit_live_predictions(self.dk, self.dk.pair) + if index > fit_live_predictions_candles: + print(index) + + if index <= fit_live_predictions_candles: + dk.full_df.at[index, "warmed_up"] = 0 + else: + dk.full_df.at[index, "warmed_up"] = 1 + + for label in predictions_columns: + if dk.full_df[label].dtype == object: + continue + if "labels_mean" in self.dk.data: + dk.full_df.at[index, f"{label}_mean"] = ( + self.dk.data["labels_mean"][label]) + if "labels_std" in self.dk.data: + dk.full_df.at[index, f"{label}_std"] = self.dk.data["labels_std"][label] + + for extra_col in self.dk.data["extra_returns_per_train"]: + dk.full_df.at[index, f"{extra_col}"] = ( + self.dk.data["extra_returns_per_train"][extra_col]) + + end_time = time.perf_counter() + logger.info(f"Downloaded the tutorial in {start_time - end_time:0.4f} seconds") + + # print(f"Downloaded the tutorial in {start_time - end_time:0.4f} seconds") + + return # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example. From 8ee95db9270376eb459a172391f800986baec3c5 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Wed, 9 Nov 2022 09:51:42 -0300 Subject: [PATCH 130/421] refactoring backtesting_fit_live_predictions function --- freqtrade/freqai/freqai_interface.py | 39 ++++++++++------------------ 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index ab0fb388a..1dc326079 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -827,37 +827,31 @@ class IFreqaiModel(ABC): ) def backtesting_fit_live_predictions(self, dk: FreqaiDataKitchen): - start_time = time.perf_counter() + """ + Apply fit_live_predictions function in backtesting with a dummy historic_predictions + :param dk: datakitchen object + """ fit_live_predictions_candles = self.freqai_info.get("fit_live_predictions_candles", 0) if fit_live_predictions_candles: - predictions_columns = [col for col in dk.full_df.columns if ( + label_columns = [col for col in dk.full_df.columns if ( col.startswith("&") and - '_mean' not in col and - '_std' not in col and + not (col.startswith("&") and col.endswith("_mean")) and + not (col.startswith("&") and col.endswith("_std")) and col not in self.dk.data["extra_returns_per_train"]) ] self.dd.historic_predictions[self.dk.pair] = pd.DataFrame( columns=dk.full_df.columns).astype(dk.full_df.dtypes) - # for index, row in dk.full_df.iterrows(): for index in range(len(dk.full_df)): - if index > fit_live_predictions_candles: + if index >= fit_live_predictions_candles: self.dd.historic_predictions[self.dk.pair] = ( - dk.full_df.iloc[index - fit_live_predictions_candles + 1:index + 1]) + dk.full_df.iloc[index - fit_live_predictions_candles:index]) else: - self.dd.historic_predictions[self.dk.pair] = dk.full_df.iloc[:index + 1] - # self.dd.historic_predictions[self.dk.pair].loc[index] = row.values.tolist() - # pd.concat(self.dd.historic_predictions[self.dk.pair], row.values) + self.dd.historic_predictions[self.dk.pair] = dk.full_df.iloc[:index] + self.fit_live_predictions(self.dk, self.dk.pair) - if index > fit_live_predictions_candles: - print(index) - - if index <= fit_live_predictions_candles: - dk.full_df.at[index, "warmed_up"] = 0 - else: - dk.full_df.at[index, "warmed_up"] = 1 - - for label in predictions_columns: + if index >= fit_live_predictions_candles: + for label in label_columns: if dk.full_df[label].dtype == object: continue if "labels_mean" in self.dk.data: @@ -869,13 +863,8 @@ class IFreqaiModel(ABC): for extra_col in self.dk.data["extra_returns_per_train"]: dk.full_df.at[index, f"{extra_col}"] = ( self.dk.data["extra_returns_per_train"][extra_col]) - - end_time = time.perf_counter() - logger.info(f"Downloaded the tutorial in {start_time - end_time:0.4f} seconds") - - # print(f"Downloaded the tutorial in {start_time - end_time:0.4f} seconds") - return + # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example. From 4f0f3e5b64cf38a96dbf8aa7fa1cb21177373d7a Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Wed, 9 Nov 2022 10:07:24 -0300 Subject: [PATCH 131/421] removed unnecessary code --- freqtrade/freqai/freqai_interface.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 1dc326079..09e965b82 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -829,6 +829,8 @@ class IFreqaiModel(ABC): def backtesting_fit_live_predictions(self, dk: FreqaiDataKitchen): """ Apply fit_live_predictions function in backtesting with a dummy historic_predictions + The loop is required to simulate dry/live operation, as it is not possible to predict + the type of logic implemented by the user. :param dk: datakitchen object """ fit_live_predictions_candles = self.freqai_info.get("fit_live_predictions_candles", 0) @@ -839,8 +841,6 @@ class IFreqaiModel(ABC): not (col.startswith("&") and col.endswith("_std")) and col not in self.dk.data["extra_returns_per_train"]) ] - self.dd.historic_predictions[self.dk.pair] = pd.DataFrame( - columns=dk.full_df.columns).astype(dk.full_df.dtypes) for index in range(len(dk.full_df)): if index >= fit_live_predictions_candles: From 6ff0e66ddf7115a7ffc04df36d2aad299004aac8 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 12 Nov 2022 11:13:31 +0100 Subject: [PATCH 132/421] ensure strat tests are updated --- tests/strategy/test_strategy_loading.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/strategy/test_strategy_loading.py b/tests/strategy/test_strategy_loading.py index 6b831c116..5fcc75026 100644 --- a/tests/strategy/test_strategy_loading.py +++ b/tests/strategy/test_strategy_loading.py @@ -34,7 +34,7 @@ def test_search_all_strategies_no_failed(): directory = Path(__file__).parent / "strats" strategies = StrategyResolver._search_all_objects(directory, enum_failed=False) assert isinstance(strategies, list) - assert len(strategies) == 11 + assert len(strategies) == 12 assert isinstance(strategies[0], dict) @@ -42,10 +42,10 @@ def test_search_all_strategies_with_failed(): directory = Path(__file__).parent / "strats" strategies = StrategyResolver._search_all_objects(directory, enum_failed=True) assert isinstance(strategies, list) - assert len(strategies) == 12 + assert len(strategies) == 13 # with enum_failed=True search_all_objects() shall find 2 good strategies # and 1 which fails to load - assert len([x for x in strategies if x['class'] is not None]) == 11 + assert len([x for x in strategies if x['class'] is not None]) == 12 assert len([x for x in strategies if x['class'] is None]) == 1 From 6746868ea73b8f252590ce95b888a99398da470c Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 12 Nov 2022 11:33:03 +0100 Subject: [PATCH 133/421] store dataprovider to self instead of strategy --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 4 ++-- freqtrade/freqai/freqai_interface.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 64af31c45..f3da91b51 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -177,10 +177,10 @@ class BaseReinforcementLearningModel(IFreqaiModel): trade_duration = 0 for trade in open_trades: if trade.pair == pair: - if self.strategy.dp._exchange is None: # type: ignore + if self.data_provider._exchange is None: # type: ignore logger.error('No exchange available.') else: - current_value = self.strategy.dp._exchange.get_rate( # type: ignore + current_value = self.data_provider._exchange.get_rate( # type: ignore pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate now = datetime.now(timezone.utc).timestamp() diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index d28f00dda..406d37dc3 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -15,6 +15,7 @@ from pandas import DataFrame from freqtrade.configuration import TimeRange from freqtrade.constants import DATETIME_PRINT_FORMAT, Config +from freqtrade.data.dataprovider import DataProvider from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException from freqtrade.exchange import timeframe_to_seconds @@ -99,7 +100,7 @@ class IFreqaiModel(ABC): self.get_corr_dataframes: bool = True self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() - self.strategy: Optional[IStrategy] = None + self.data_provider: Optional[DataProvider] = None self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1) record_params(config, self.full_path) @@ -129,7 +130,7 @@ class IFreqaiModel(ABC): self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE) self.dd.set_pair_dict_info(metadata) - self.strategy = strategy + self.data_provider = strategy.dp if self.live: self.inference_timer('start') From 9c6b97c67811589882123ee3d52d040590018545 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 12 Nov 2022 12:01:59 +0100 Subject: [PATCH 134/421] ensure normalization acceleration methods are employed in RL --- docs/freqai-reinforcement-learning.md | 24 +++++++++---------- .../RL/BaseReinforcementLearningModel.py | 18 ++++++++++---- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 8b775e046..7179da626 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -38,8 +38,6 @@ where `ReinforcementLearner` will use the templated `ReinforcementLearner` from self, pair, df, tf, informative=None, set_generalized_indicators=False ): - coin = pair.split('/')[0] - if informative is None: informative = self.dp.get_pair_dataframe(pair, tf) @@ -47,15 +45,15 @@ where `ReinforcementLearner` will use the templated `ReinforcementLearner` from for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: t = int(t) - informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + informative[f"%-{pair}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, window=t) # The following features are necessary for RL models - informative[f"%-{coin}raw_close"] = informative["close"] - informative[f"%-{coin}raw_open"] = informative["open"] - informative[f"%-{coin}raw_high"] = informative["high"] - informative[f"%-{coin}raw_low"] = informative["low"] + informative[f"%-{pair}raw_close"] = informative["close"] + informative[f"%-{pair}raw_open"] = informative["open"] + informative[f"%-{pair}raw_high"] = informative["high"] + informative[f"%-{pair}raw_low"] = informative["low"] indicators = [col for col in informative if col.startswith("%")] # This loop duplicates and shifts all indicators to add a sense of recency to data @@ -88,10 +86,10 @@ Most of the function remains the same as for typical Regressors, however, the fu ```python # The following features are necessary for RL models - informative[f"%-{coin}raw_close"] = informative["close"] - informative[f"%-{coin}raw_open"] = informative["open"] - informative[f"%-{coin}raw_high"] = informative["high"] - informative[f"%-{coin}raw_low"] = informative["low"] + informative[f"%-{pair}raw_close"] = informative["close"] + informative[f"%-{pair}raw_open"] = informative["open"] + informative[f"%-{pair}raw_high"] = informative["high"] + informative[f"%-{pair}raw_low"] = informative["low"] ``` Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the user set the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index f3da91b51..323cfd782 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -253,18 +253,26 @@ class BaseReinforcementLearningModel(IFreqaiModel): Builds the train prices and test prices for the environment. """ - coin = pair.split('/')[0] + pair = pair.replace(':', '') train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] # price data for model training and evaluation tf = self.config['timeframe'] - ohlc_list = [f'%-{coin}raw_open_{tf}', f'%-{coin}raw_low_{tf}', - f'%-{coin}raw_high_{tf}', f'%-{coin}raw_close_{tf}'] - rename_dict = {f'%-{coin}raw_open_{tf}': 'open', f'%-{coin}raw_low_{tf}': 'low', - f'%-{coin}raw_high_{tf}': ' high', f'%-{coin}raw_close_{tf}': 'close'} + ohlc_list = [f'%-{pair}raw_open_{tf}', f'%-{pair}raw_low_{tf}', + f'%-{pair}raw_high_{tf}', f'%-{pair}raw_close_{tf}'] + rename_dict = {f'%-{pair}raw_open_{tf}': 'open', f'%-{pair}raw_low_{tf}': 'low', + f'%-{pair}raw_high_{tf}': ' high', f'%-{pair}raw_close_{tf}': 'close'} prices_train = train_df.filter(ohlc_list, axis=1) + if prices_train.empty: + raise OperationalException('Reinforcement learning module didnt find the raw prices ' + 'assigned in populate_any_indicators. Please assign them ' + 'with:\n' + 'informative[f"%-{pair}raw_close"] = informative["close"]\n' + 'informative[f"%-{pair}raw_open"] = informative["open"]\n' + 'informative[f"%-{pair}raw_high"] = informative["high"]\n' + 'informative[f"%-{pair}raw_low"] = informative["low"]\n') prices_train.rename(columns=rename_dict, inplace=True) prices_train.reset_index(drop=True) From 7adca9735862e1f186f8a6150bcdfca42c067e2c Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 12 Nov 2022 15:39:54 +0100 Subject: [PATCH 135/421] Improve python GC behavior --- freqtrade/main.py | 3 +++ freqtrade/util/gc_setup.py | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 freqtrade/util/gc_setup.py diff --git a/freqtrade/main.py b/freqtrade/main.py index 754c536d0..0a46747ea 100755 --- a/freqtrade/main.py +++ b/freqtrade/main.py @@ -7,6 +7,8 @@ import logging import sys from typing import Any, List +from freqtrade.util.gc_setup import gc_set_threshold + # check min. python version if sys.version_info < (3, 8): # pragma: no cover @@ -36,6 +38,7 @@ def main(sysargv: List[str] = None) -> None: # Call subcommand. if 'func' in args: logger.info(f'freqtrade {__version__}') + gc_set_threshold() return_code = args['func'](args) else: # No subcommand was issued. diff --git a/freqtrade/util/gc_setup.py b/freqtrade/util/gc_setup.py new file mode 100644 index 000000000..a3532cbab --- /dev/null +++ b/freqtrade/util/gc_setup.py @@ -0,0 +1,18 @@ +import gc +import logging +import platform + + +logger = logging.getLogger(__name__) + + +def gc_set_threshold(): + """ + Reduce number of GC runs to improve performance (explanation video) + https://www.youtube.com/watch?v=p4Sn6UcFTOU + + """ + if platform.python_implementation() == "CPython": + # allocs, g1, g2 = gc.get_threshold() + gc.set_threshold(50_000, 500, 1000) + logger.debug("Adjusting python allocations to reduce GC runs") From 27fa9f1f4e8542f90a0361f207b0a131011eac93 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Sat, 12 Nov 2022 14:37:23 -0300 Subject: [PATCH 136/421] backtest saved dataframe from live --- freqtrade/freqai/data_kitchen.py | 109 ++++++++++++++++++++++++++- freqtrade/freqai/freqai_interface.py | 42 ++++++++++- freqtrade/freqai/utils.py | 7 +- 3 files changed, 154 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 53de00426..8e30b0aec 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -9,7 +9,7 @@ from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt import pandas as pd -from pandas import DataFrame +from pandas import DataFrame, read_feather from scipy import stats from sklearn import linear_model from sklearn.cluster import DBSCAN @@ -73,6 +73,9 @@ class FreqaiDataKitchen: self.training_features_list: List = [] self.model_filename: str = "" self.backtesting_results_path = Path() + self.backtesting_live_model_folder_path = Path() + self.backtesting_live_model_path = Path() + self.backtesting_live_model_bkp_path = Path() self.backtest_predictions_folder: str = "backtesting_predictions" self.live = live self.pair = pair @@ -1488,3 +1491,107 @@ class FreqaiDataKitchen: dataframe.columns = dataframe.columns.str.replace(c, "") return dataframe + + def set_backtesting_live_dataframe_folder_path( + self + ) -> None: + """ + Set live backtesting dataframe path + :param pair: current pair + """ + self.backtesting_live_model_folder_path = Path( + self.full_path / self.backtest_predictions_folder / "live_data") + + def set_backtesting_live_dataframe_path( + self, pair: str + ) -> None: + """ + Set live backtesting dataframe path + :param pair: current pair + """ + self.set_backtesting_live_dataframe_folder_path() + if not self.backtesting_live_model_folder_path.is_dir(): + self.backtesting_live_model_folder_path.mkdir(parents=True, exist_ok=True) + + pair_path = pair.split(":")[0].replace("/", "_").lower() + file_name = f"live_backtesting_{pair_path}.feather" + path_to_live_backtesting_file = Path(self.full_path / + self.backtesting_live_model_folder_path / + file_name) + path_to_live_backtesting_bkp_file = Path(self.full_path / + self.backtesting_live_model_folder_path / + file_name.replace(".feather", ".backup.feather")) + + self.backtesting_live_model_path = path_to_live_backtesting_file + self.backtesting_live_model_bkp_path = path_to_live_backtesting_bkp_file + + def save_backtesting_live_dataframe( + self, dataframe: DataFrame, pair: str + ) -> None: + """ + Save live backtesting dataframe to feather file format + :param dataframe: current live dataframe + :param pair: current pair + """ + self.set_backtesting_live_dataframe_path(pair) + last_row_df = dataframe.tail(1) + if self.backtesting_live_model_path.is_file(): + saved_dataframe = self.get_backtesting_live_dataframe() + concat_dataframe = pd.concat([saved_dataframe, last_row_df]) + concat_dataframe.reset_index(drop=True).to_feather( + self.backtesting_live_model_path, compression_level=9, compression='lz4') + else: + last_row_df.reset_index(drop=True).to_feather( + self.backtesting_live_model_path, compression_level=9, compression='lz4') + + shutil.copy(self.backtesting_live_model_path, self.backtesting_live_model_bkp_path) + + def get_backtesting_live_dataframe( + self + ) -> DataFrame: + """ + Get live backtesting dataframe from feather file format + return: saved dataframe from previous dry/run or live + """ + if self.backtesting_live_model_path.is_file(): + saved_dataframe = DataFrame() + try: + saved_dataframe = read_feather(self.backtesting_live_model_path) + except Exception: + saved_dataframe = read_feather(self.backtesting_live_model_bkp_path) + return saved_dataframe + else: + raise OperationalException( + "Saved pair file not found" + ) + + def get_timerange_from_backtesting_live_dataframe( + self) -> TimeRange: + """ + Returns timerange information based on a FreqAI model directory + :param models_path: FreqAI model path + + :return: timerange calculated from saved live data + """ + all_assets_start_dates = [] + all_assets_end_dates = [] + self.set_backtesting_live_dataframe_folder_path() + if not self.backtesting_live_model_folder_path.is_dir(): + raise OperationalException( + 'Saved live data not found. Saved lived data is required ' + 'to run backtest with the freqai-backtest-live-models option ' + 'and save_live_data_backtest config option as true' + ) + for file_in_dir in self.backtesting_live_model_folder_path.iterdir(): + if file_in_dir.is_file() and "backup" not in file_in_dir.name: + saved_dataframe = read_feather(file_in_dir) + all_assets_start_dates.append(saved_dataframe.date.min()) + all_assets_end_dates.append(saved_dataframe.date.max()) + start_date = min(all_assets_start_dates) + end_date = min(all_assets_end_dates) + # add 1 day to string timerange to ensure BT module will load all dataframe data + end_date = end_date + timedelta(days=1) + backtesting_timerange = TimeRange( + 'date', 'date', int(start_date.timestamp()), int(end_date.timestamp()) + ) + return backtesting_timerange diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 09e965b82..47d75dfaa 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -67,6 +67,11 @@ class IFreqaiModel(ABC): self.save_backtest_models: bool = self.freqai_info.get("save_backtest_models", True) if self.save_backtest_models: logger.info('Backtesting module configured to save all models.') + self.save_live_data_backtest: bool = self.freqai_info.get( + "save_live_data_backtest", True) + if self.save_live_data_backtest: + logger.info('Live configured to save data for backtest.') + self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode) # set current candle to arbitrary historical date self.current_candle: datetime = datetime.fromtimestamp(637887600, tz=timezone.utc) @@ -147,12 +152,20 @@ class IFreqaiModel(ABC): dataframe = self.dk.use_strategy_to_populate_indicators( strategy, prediction_dataframe=dataframe, pair=metadata["pair"] ) - dk = self.start_backtesting(dataframe, metadata, self.dk) + if not self.save_live_data_backtest: + dk = self.start_backtesting(dataframe, metadata, self.dk) + dataframe = dk.remove_features_from_df(dk.return_dataframe) + else: + dk = self.start_backtesting_from_live_saved_files( + dataframe, metadata, self.dk) + dataframe = dk.return_dataframe - dataframe = dk.remove_features_from_df(dk.return_dataframe) self.clean_up() if self.live: self.inference_timer('stop', metadata["pair"]) + if self.save_live_data_backtest: + dk.save_backtesting_live_dataframe(dataframe, metadata["pair"]) + return dataframe def clean_up(self): @@ -310,6 +323,31 @@ class IFreqaiModel(ABC): return dk + def start_backtesting_from_live_saved_files( + self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen + ) -> FreqaiDataKitchen: + """ + :param dataframe: DataFrame = strategy passed dataframe + :param metadata: Dict = pair metadata + :param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only + :return: + FreqaiDataKitchen = Data management/analysis tool associated to present pair only + """ + pair = metadata["pair"] + dk.return_dataframe = dataframe + + dk.return_dataframe = dataframe + self.dk.set_backtesting_live_dataframe_path(pair) + saved_dataframe = self.dk.get_backtesting_live_dataframe() + columns_to_drop = list(set(dk.return_dataframe.columns).difference( + ["date", "open", "high", "low", "close", "volume"])) + saved_dataframe = saved_dataframe.drop( + columns=["open", "high", "low", "close", "volume"]) + dk.return_dataframe = dk.return_dataframe.drop(columns=list(columns_to_drop)) + dk.return_dataframe = pd.merge(dk.return_dataframe, saved_dataframe, how='left', on='date') + # dk.return_dataframe = dk.return_dataframe[saved_dataframe.columns].fillna(0) + return dk + def start_live( self, dataframe: DataFrame, metadata: dict, strategy: IStrategy, dk: FreqaiDataKitchen ) -> FreqaiDataKitchen: diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index e854bcf0b..ad38a339b 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -229,7 +229,12 @@ def get_timerange_backtest_live_models(config: Config) -> str: """ dk = FreqaiDataKitchen(config) models_path = dk.get_full_models_path(config) - timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) + timerange: TimeRange = TimeRange() + if not config.get("save_live_data_backtest", True): + timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) + else: + timerange = dk.get_timerange_from_backtesting_live_dataframe() + start_date = datetime.fromtimestamp(timerange.startts, tz=timezone.utc) end_date = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc) tr = f"{start_date.strftime('%Y%m%d')}-{end_date.strftime('%Y%m%d')}" From e71a8b8ac11faab3fdfc504ea1b26f76a9f8c203 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 12 Nov 2022 18:46:48 +0100 Subject: [PATCH 137/421] add ability to integrate state info or not, and prevent state info integration during backtesting --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 15 ---------- freqtrade/freqai/RL/BaseEnvironment.py | 28 +++++++++++++------ .../RL/BaseReinforcementLearningModel.py | 20 ++++++++++--- 3 files changed, 35 insertions(+), 28 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 80543bf72..663ecc77e 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -2,9 +2,7 @@ import logging from enum import Enum import numpy as np -import pandas as pd from gym import spaces -from pandas import DataFrame from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions @@ -145,19 +143,6 @@ class Base5ActionRLEnv(BaseEnvironment): return observation, step_reward, self._done, info - def _get_observation(self): - features_window = self.signal_features[( - self._current_tick - self.window_size):self._current_tick] - features_and_state = DataFrame(np.zeros((len(features_window), 3)), - columns=['current_profit_pct', 'position', 'trade_duration'], - index=features_window.index) - - features_and_state['current_profit_pct'] = self.get_unrealized_profit() - features_and_state['position'] = self._position.value - features_and_state['trade_duration'] = self.get_trade_duration() - features_and_state = pd.concat([features_window, features_and_state], axis=1) - return features_and_state - def get_trade_duration(self): if self._last_trade_tick is None: return 0 diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 6474483c6..6633bf3e8 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -35,6 +35,7 @@ class BaseEnvironment(gym.Env): id: str = 'baseenv-1', seed: int = 1, config: dict = {}): self.rl_config = config['freqai']['rl_config'] + self.add_state_info = self.rl_config.get('add_state_info', False) self.id = id self.seed(seed) self.reset_env(df, prices, window_size, reward_kwargs, starting_point) @@ -58,7 +59,11 @@ class BaseEnvironment(gym.Env): self.fee = 0.0015 # # spaces - self.shape = (window_size, self.signal_features.shape[1] + 3) + if self.add_state_info: + self.total_features = self.signal_features.shape[1] + 3 + else: + self.total_features = self.signal_features.shape[1] + self.shape = (window_size, self.total_features) self.set_action_space() self.observation_space = spaces.Box( low=-1, high=1, shape=self.shape, dtype=np.float32) @@ -126,15 +131,20 @@ class BaseEnvironment(gym.Env): """ features_window = self.signal_features[( self._current_tick - self.window_size):self._current_tick] - features_and_state = DataFrame(np.zeros((len(features_window), 3)), - columns=['current_profit_pct', 'position', 'trade_duration'], - index=features_window.index) + if self.add_state_info: + features_and_state = DataFrame(np.zeros((len(features_window), 3)), + columns=['current_profit_pct', + 'position', + 'trade_duration'], + index=features_window.index) - features_and_state['current_profit_pct'] = self.get_unrealized_profit() - features_and_state['position'] = self._position.value - features_and_state['trade_duration'] = self.get_trade_duration() - features_and_state = pd.concat([features_window, features_and_state], axis=1) - return features_and_state + features_and_state['current_profit_pct'] = self.get_unrealized_profit() + features_and_state['position'] = self._position.value + features_and_state['trade_duration'] = self.get_trade_duration() + features_and_state = pd.concat([features_window, features_and_state], axis=1) + return features_and_state + else: + return features_window def get_trade_duration(self): """ diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 323cfd782..885918ffb 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -234,11 +234,12 @@ class BaseReinforcementLearningModel(IFreqaiModel): output = pd.DataFrame(np.zeros(len(dataframe)), columns=dk.label_list) def _predict(window): - market_side, current_profit, trade_duration = self.get_state_info(dk.pair) observations = dataframe.iloc[window.index] - observations['current_profit_pct'] = current_profit - observations['position'] = market_side - observations['trade_duration'] = trade_duration + if self.live: # self.guard_state_info_if_backtest(): + market_side, current_profit, trade_duration = self.get_state_info(dk.pair) + observations['current_profit_pct'] = current_profit + observations['position'] = market_side + observations['trade_duration'] = trade_duration res, _ = model.predict(observations, deterministic=True) return res @@ -246,6 +247,17 @@ class BaseReinforcementLearningModel(IFreqaiModel): return output + # def guard_state_info_if_backtest(self): + # """ + # Ensure that backtesting mode doesnt try to use state information. + # """ + # if self.rl_config('add_state_info', False) and not self.live: + # logger.warning('Backtesting with state info is currently unavailable ' + # 'turning it off.') + # self.rl_config['add_state_info'] = False + + # return not self.rl_config['add_state_info'] + def build_ohlc_price_dataframes(self, data_dictionary: dict, pair: str, dk: FreqaiDataKitchen) -> Tuple[DataFrame, DataFrame]: From 259f87bd40bc8a82e214174b2e57326aa900aca9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 12 Nov 2022 19:01:40 +0100 Subject: [PATCH 138/421] fix rl test; --- tests/strategy/strats/freqai_rl_test_strat.py | 29 +++++-------------- 1 file changed, 7 insertions(+), 22 deletions(-) diff --git a/tests/strategy/strats/freqai_rl_test_strat.py b/tests/strategy/strats/freqai_rl_test_strat.py index 7b36dc6be..8d507a6da 100644 --- a/tests/strategy/strats/freqai_rl_test_strat.py +++ b/tests/strategy/strats/freqai_rl_test_strat.py @@ -38,25 +38,10 @@ class freqai_rl_test_strat(IStrategy): startup_candle_count: int = 30 can_short = False - def informative_pairs(self): - whitelist_pairs = self.dp.current_whitelist() - corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] - informative_pairs = [] - for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: - for pair in whitelist_pairs: - informative_pairs.append((pair, tf)) - for pair in corr_pairs: - if pair in whitelist_pairs: - continue # avoid duplication - informative_pairs.append((pair, tf)) - return informative_pairs - def populate_any_indicators( self, pair, df, tf, informative=None, set_generalized_indicators=False ): - coin = pair.split('/')[0] - if informative is None: informative = self.dp.get_pair_dataframe(pair, tf) @@ -64,16 +49,16 @@ class freqai_rl_test_strat(IStrategy): for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: t = int(t) - informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + informative[f"%-{pair}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, window=t) # FIXME: add these outside the user strategy? # The following columns are necessary for RL models. - informative[f"%-{coin}raw_close"] = informative["close"] - informative[f"%-{coin}raw_open"] = informative["open"] - informative[f"%-{coin}raw_high"] = informative["high"] - informative[f"%-{coin}raw_low"] = informative["low"] + informative[f"%-{pair}raw_close"] = informative["close"] + informative[f"%-{pair}raw_open"] = informative["open"] + informative[f"%-{pair}raw_high"] = informative["high"] + informative[f"%-{pair}raw_low"] = informative["low"] indicators = [col for col in informative if col.startswith("%")] # This loop duplicates and shifts all indicators to add a sense of recency to data From 81f800a79bf07b0778744faa9574198faf92ec79 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 13:41:17 +0100 Subject: [PATCH 139/421] switch to using FT calc_profi_pct, reverse entry/exit fees --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 1 + freqtrade/freqai/RL/Base5ActionRLEnv.py | 2 -- freqtrade/freqai/RL/BaseEnvironment.py | 15 +++++++-------- .../freqai/RL/BaseReinforcementLearningModel.py | 13 ++++--------- 4 files changed, 12 insertions(+), 19 deletions(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index b4fe78b71..0c719ea92 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -74,6 +74,7 @@ class Base4ActionRLEnv(BaseEnvironment): self._last_trade_tick = self._current_tick elif action == Actions.Exit.value: self._position = Positions.Neutral + self._update_total_profit() trade_type = "neutral" self._last_trade_tick = None else: diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 663ecc77e..b6ebcf703 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -75,8 +75,6 @@ class Base5ActionRLEnv(BaseEnvironment): if self._current_tick == self._end_tick: self._done = True - self.update_portfolio_log_returns(action) - self._update_unrealized_total_profit() step_reward = self.calculate_reward(action) self.total_reward += step_reward diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 6633bf3e8..3b56fc2c4 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -165,12 +165,12 @@ class BaseEnvironment(gym.Env): if self._position == Positions.Neutral: return 0. elif self._position == Positions.Short: - current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price) / last_trade_price - elif self._position == Positions.Long: current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) return (current_price - last_trade_price) / last_trade_price else: return 0. @@ -210,9 +210,8 @@ class BaseEnvironment(gym.Env): """ An example reward function. This is the one function that users will likely wish to inject their own creativity into. - :params: - action: int = The action made by the agent for the current candle. - :returns: + :param action: int = The action made by the agent for the current candle. + :return: float = the reward to give to the agent for current step (used for optimization of weights in NN) """ @@ -234,7 +233,7 @@ class BaseEnvironment(gym.Env): def _update_total_profit(self): pnl = self.get_unrealized_profit() if self.compound_trades: - # assumes unite stake and compounding + # assumes unitestake and compounding self._total_profit = self._total_profit * (1 + pnl) else: # assumes unit stake and no compounding diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 885918ffb..85756ad8f 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -180,17 +180,12 @@ class BaseReinforcementLearningModel(IFreqaiModel): if self.data_provider._exchange is None: # type: ignore logger.error('No exchange available.') else: - current_value = self.data_provider._exchange.get_rate( # type: ignore + current_rate = self.data_provider._exchange.get_rate( # type: ignore pair, refresh=False, side="exit", is_short=trade.is_short) - openrate = trade.open_rate + now = datetime.now(timezone.utc).timestamp() - trade_duration = int((now - trade.open_date.timestamp()) / self.base_tf_seconds) - if 'long' in str(trade.enter_tag): - market_side = 1 - current_profit = (current_value - openrate) / openrate - else: - market_side = 0 - current_profit = (openrate - current_value) / openrate + trade_duration = int((now - trade.open_date_utc) / self.base_tf_seconds) + current_profit = trade.calc_profit_ratio(current_rate) return market_side, current_profit, int(trade_duration) From af9e4005626c519015f5edd37c6101e0b22305f7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 15:31:37 +0100 Subject: [PATCH 140/421] add test coverage, fix bug in base environment. Ensure proper fee is used. --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 1 - freqtrade/freqai/RL/BaseEnvironment.py | 34 +++++++++++-- .../RL/BaseReinforcementLearningModel.py | 37 +++++++------- tests/freqai/test_freqai_interface.py | 49 +++++++++++++++++-- 4 files changed, 92 insertions(+), 29 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index b6ebcf703..0d101ee9c 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -148,7 +148,6 @@ class Base5ActionRLEnv(BaseEnvironment): return self._current_tick - self._last_trade_tick def is_tradesignal(self, action: int): - # trade signal """ Determine if the signal is a trade signal e.g.: agent wants a Actions.Long_exit while it is in a Positions.short diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 3b56fc2c4..bb8cd992c 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -10,6 +10,8 @@ from gym import spaces from gym.utils import seeding from pandas import DataFrame +from freqtrade.data.dataprovider import DataProvider + logger = logging.getLogger(__name__) @@ -32,8 +34,21 @@ class BaseEnvironment(gym.Env): def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1, config: dict = {}): - + id: str = 'baseenv-1', seed: int = 1, config: dict = {}, + dp: Optional[DataProvider] = None): + """ + Initializes the training/eval environment. + :param df: dataframe of features + :param prices: dataframe of prices to be used in the training environment + :param window_size: size of window (temporal) to pass to the agent + :param reward_kwargs: extra config settings assigned by user in `rl_config` + :param starting_point: start at edge of window or not + :param id: string id of the environment (used in backend for multiprocessed env) + :param seed: Sets the seed of the environment higher in the gym.Env object + :param config: Typical user configuration file + :param dp: dataprovider from freqtrade + """ + self.config = config self.rl_config = config['freqai']['rl_config'] self.add_state_info = self.rl_config.get('add_state_info', False) self.id = id @@ -41,12 +56,23 @@ class BaseEnvironment(gym.Env): self.reset_env(df, prices, window_size, reward_kwargs, starting_point) self.max_drawdown = 1 - self.rl_config.get('max_training_drawdown_pct', 0.8) self.compound_trades = config['stake_amount'] == 'unlimited' + if self.config.get('fee', None) is not None: + self.fee = self.config['fee'] + elif dp is not None: + self.fee = self.dp.exchange.get_fee(symbol=dp.current_whitelist()[0]) + else: + self.fee = 0.0015 def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, reward_kwargs: dict, starting_point=True): """ Resets the environment when the agent fails (in our case, if the drawdown exceeds the user set max_training_drawdown_pct) + :param df: dataframe of features + :param prices: dataframe of prices to be used in the training environment + :param window_size: size of window (temporal) to pass to the agent + :param reward_kwargs: extra config settings assigned by user in `rl_config` + :param starting_point: start at edge of window or not """ self.df = df self.signal_features = self.df @@ -56,8 +82,6 @@ class BaseEnvironment(gym.Env): self.rr = reward_kwargs["rr"] self.profit_aim = reward_kwargs["profit_aim"] - self.fee = 0.0015 - # # spaces if self.add_state_info: self.total_features = self.signal_features.shape[1] + 3 @@ -233,7 +257,7 @@ class BaseEnvironment(gym.Env): def _update_total_profit(self): pnl = self.get_unrealized_profit() if self.compound_trades: - # assumes unitestake and compounding + # assumes unit stake and compounding self._total_profit = self._total_profit * (1 + pnl) else: # assumes unit stake and no compounding diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 85756ad8f..a8c79ce6e 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -74,10 +74,10 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.ft_params.update({'use_SVM_to_remove_outliers': False}) logger.warning('User tried to use SVM with RL. Deactivating SVM.') if self.ft_params.get('use_DBSCAN_to_remove_outliers', False): - self.ft_params.update({'use_SVM_to_remove_outliers': False}) + self.ft_params.update({'use_DBSCAN_to_remove_outliers': False}) logger.warning('User tried to use DBSCAN with RL. Deactivating DBSCAN.') if self.freqai_info['data_split_parameters'].get('shuffle', False): - self.freqai_info['data_split_parameters'].update('shuffle', False) + self.freqai_info['data_split_parameters'].update({'shuffle': False}) logger.warning('User tried to shuffle training data. Setting shuffle to False') def train( @@ -141,11 +141,18 @@ class BaseReinforcementLearningModel(IFreqaiModel): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config) - self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config)) + self.train_env = self.MyRLEnv(df=train_df, + prices=prices_train, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, + config=self.config, + dp=self.data_provider) + self.eval_env = Monitor(self.MyRLEnv(df=test_df, + prices=prices_test, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, + config=self.config, + dp=self.data_provider)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) @@ -179,12 +186,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): if trade.pair == pair: if self.data_provider._exchange is None: # type: ignore logger.error('No exchange available.') + return 0, 0, 0 else: current_rate = self.data_provider._exchange.get_rate( # type: ignore pair, refresh=False, side="exit", is_short=trade.is_short) now = datetime.now(timezone.utc).timestamp() - trade_duration = int((now - trade.open_date_utc) / self.base_tf_seconds) + trade_duration = int((now - trade.open_date_utc.timestamp()) / self.base_tf_seconds) current_profit = trade.calc_profit_ratio(current_rate) return market_side, current_profit, int(trade_duration) @@ -230,7 +238,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def _predict(window): observations = dataframe.iloc[window.index] - if self.live: # self.guard_state_info_if_backtest(): + if self.live and self.rl_config('add_state_info', False): market_side, current_profit, trade_duration = self.get_state_info(dk.pair) observations['current_profit_pct'] = current_profit observations['position'] = market_side @@ -242,17 +250,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): return output - # def guard_state_info_if_backtest(self): - # """ - # Ensure that backtesting mode doesnt try to use state information. - # """ - # if self.rl_config('add_state_info', False) and not self.live: - # logger.warning('Backtesting with state info is currently unavailable ' - # 'turning it off.') - # self.rl_config['add_state_info'] = False - - # return not self.rl_config['add_state_info'] - def build_ohlc_price_dataframes(self, data_dictionary: dict, pair: str, dk: FreqaiDataKitchen) -> Tuple[DataFrame, DataFrame]: diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 6ed9dac3d..08f33add9 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -13,7 +13,7 @@ from freqtrade.freqai.utils import download_all_data_for_training, get_required_ from freqtrade.optimize.backtesting import Backtesting from freqtrade.persistence import Trade from freqtrade.plugins.pairlistmanager import PairListManager -from tests.conftest import get_patched_exchange, log_has_re +from tests.conftest import create_mock_trades, get_patched_exchange, log_has_re from tests.freqai.conftest import get_patched_freqai_strategy, make_rl_config @@ -32,7 +32,7 @@ def is_mac() -> bool: ('XGBoostRegressor', False, True), ('XGBoostRFRegressor', False, False), ('CatboostRegressor', False, False), - ('ReinforcementLearner', False, False), + ('ReinforcementLearner', False, True), ('ReinforcementLearner_multiproc', False, False), ('ReinforcementLearner_test_4ac', False, False) ]) @@ -40,7 +40,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, if is_arm() and model == 'CatboostRegressor': pytest.skip("CatBoost is not supported on ARM") - if is_mac(): + if is_mac() and 'Reinforcement' in model: pytest.skip("Reinforcement learning module not available on intel based Mac OS") model_save_ext = 'joblib' @@ -53,6 +53,9 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, if 'ReinforcementLearner' in model: model_save_ext = 'zip' freqai_conf = make_rl_config(freqai_conf) + # test the RL guardrails + freqai_conf['freqai']['feature_parameters'].update({"use_SVM_to_remove_outliers": True}) + freqai_conf['freqai']['data_split_parameters'].update({'shuffle': True}) if 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") @@ -497,3 +500,43 @@ def test_download_all_data_for_training(mocker, freqai_conf, caplog, tmpdir): "Downloading", caplog, ) + + +@pytest.mark.usefixtures("init_persistence") +@pytest.mark.parametrize('dp_exists', [(False), (True)]) +def test_get_state_info(mocker, freqai_conf, dp_exists, caplog, tickers): + + if is_mac(): + pytest.skip("Reinforcement learning module not available on intel based Mac OS") + + freqai_conf.update({"freqaimodel": "ReinforcementLearner"}) + freqai_conf.update({"timerange": "20180110-20180130"}) + freqai_conf.update({"strategy": "freqai_rl_test_strat"}) + freqai_conf = make_rl_config(freqai_conf) + freqai_conf['entry_pricing']['price_side'] = 'same' + freqai_conf['exit_pricing']['price_side'] = 'same' + + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + ticker_mock = MagicMock(return_value=tickers()['ETH/BTC']) + mocker.patch("freqtrade.exchange.Exchange.fetch_ticker", ticker_mock) + strategy.dp = DataProvider(freqai_conf, exchange) + + if not dp_exists: + strategy.dp._exchange = None + + strategy.freqai_info = freqai_conf.get("freqai", {}) + freqai = strategy.freqai + freqai.data_provider = strategy.dp + freqai.live = True + + Trade.use_db = True + create_mock_trades(MagicMock(return_value=0.0025), False, True) + freqai.get_state_info("ADA/BTC") + freqai.get_state_info("ETH/BTC") + + if not dp_exists: + assert log_has_re( + "No exchange available", + caplog, + ) From 3c249ba9940d9ea4b842ae6f31106b729bf43f67 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 16:11:14 +0100 Subject: [PATCH 141/421] add doc for data_kitchen_thread_count` --- docs/freqai-parameter-table.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 7d00acde8..df3dd5b53 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -19,6 +19,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models.
**Datatype:** Boolean.
Default: `False`. | `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)).
**Datatype:** Boolean.
Default: `False`. | `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file.
**Datatype:** Boolean.
Default: `False` +| `data_kitchen_thread_count` |
Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI)
**Datatype:** Positive integer. | | **Feature parameters** | `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](freqai-feature-engineering.md).
**Datatype:** Dictionary. | `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base indicators dataset.
**Datatype:** List of timeframes (strings). From 388ca2120030bfe375b91826fd11f55dc406b443 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 16:56:31 +0100 Subject: [PATCH 142/421] update docs, fix bug in environment --- docs/freqai-parameter-table.md | 1 + docs/freqai-reinforcement-learning.md | 17 ++++++++++++----- freqtrade/constants.py | 19 +++++++++++++++++++ freqtrade/freqai/RL/BaseEnvironment.py | 2 +- 4 files changed, 33 insertions(+), 6 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index df3dd5b53..925609270 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -58,6 +58,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `max_training_drawdown_pct` | The maximum drawdown that the agent is allowed to experience during training.
**Datatype:** float.
Default: 0.8 | `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not).
**Datatype:** int. | `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. +| `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. | | **Extraneous parameters** | `keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards.
**Datatype:** Boolean.
Default: `False`. | `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction.
**Datatype:** Integer.
Default: `2`. diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 7179da626..693918051 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -16,7 +16,10 @@ Reinforcement learning is a natural progression for FreqAI, since it adds a new ### The RL interface -With the current framework, we aim to expose the training environment to the user via the common "prediction model" file (i.e. CatboostClassifier, LightGBMRegressor, etc.). Users inherit our base environment in this file, which allows them to override as much or as little of the environment as they wish. +With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv`: + + + We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configruation settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. @@ -49,7 +52,7 @@ where `ReinforcementLearner` will use the templated `ReinforcementLearner` from informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, window=t) - # The following features are necessary for RL models + # The following raw price values are necessary for RL models informative[f"%-{pair}raw_close"] = informative["close"] informative[f"%-{pair}raw_open"] = informative["open"] informative[f"%-{pair}raw_high"] = informative["high"] @@ -131,11 +134,12 @@ It is important to consider that `&-action` depends on which environment they ch ## Configuring the Reinforcement Learner -In order to configure the `Reinforcement Learner` the following dictionary to their `freqai` config: +In order to configure the `Reinforcement Learner` the following dictionary must exist in the `freqai` config: ```json "rl_config": { "train_cycles": 25, + "add_state_info": true, "max_trade_duration_candles": 300, "max_training_drawdown_pct": 0.02, "cpu_count": 8, @@ -148,11 +152,14 @@ In order to configure the `Reinforcement Learner` the following dictionary to th } ``` -Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environemtn to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link). +Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environment to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link). + +!!! Note + Remember that the general `model_training_parameters` dictionary should contain all the model hyperparameter customizations for the particular `model_type`. For example, `PPO` parameters can be found [here](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html). ## Creating the reward -As users begin to modify the strategy and the prediction model, they will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, the user sets a `calculate_reward()` function inside their custom `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to give users the necessary building blocks to start their own models. It is inside the `calculate_reward()` where users express their creative theories about the market. For example, the user wants to reward their agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: +As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: ```python class MyRLEnv(Base5ActionRLEnv): diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 428f16586..e947b49e0 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -578,6 +578,25 @@ CONF_SCHEMA = { "model_training_parameters": { "type": "object" }, + "rl_config": { + "type": "object", + "properties": { + "train_cycles": {"type": "integer"}, + "max_trade_duration_candles": {"type": "integer"}, + "add_state_info": {"type": "boolean", "default": False}, + "max_training_drawdown_pct": {"type": "number", "default": 0.02}, + "cpu_count": {"type": "integer", "default": 1}, + "model_type": {"type": "string", "default": "PPO"}, + "policy_type": {"type": "string", "default": "MlpPolicy"}, + "model_reward_parameters": { + "type": "object", + "properties": { + "rr": {"type": "number", "default": 1}, + "profit_aim": {"type": "number", "default": 0.025} + } + } + }, + }, }, "required": [ "enabled", diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index bb8cd992c..6853377cb 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -59,7 +59,7 @@ class BaseEnvironment(gym.Env): if self.config.get('fee', None) is not None: self.fee = self.config['fee'] elif dp is not None: - self.fee = self.dp.exchange.get_fee(symbol=dp.current_whitelist()[0]) + self.fee = dp._exchange.get_fee(symbol=dp.current_whitelist()[0]) # type: ignore else: self.fee = 0.0015 From f8f553ec14b3793a5ae09d3b907eebeb4b253e5c Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 16:58:36 +0100 Subject: [PATCH 143/421] remove references to "the user" --- docs/freqai-reinforcement-learning.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 693918051..9cd4f7ca3 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -95,7 +95,7 @@ Most of the function remains the same as for typical Regressors, however, the fu informative[f"%-{pair}raw_low"] = informative["low"] ``` -Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the user set the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. +Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: @@ -159,7 +159,7 @@ Parameter details can be found [here](freqai-parameter-table.md), but in general ## Creating the reward -As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: +As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: ```python class MyRLEnv(Base5ActionRLEnv): @@ -214,6 +214,6 @@ cd freqtrade tensorboard --logdir user_data/models/unique-id ``` -where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if the user wishes to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). +where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). ![tensorboard](assets/tensorboard.jpg) From 90f168d1ff28f50ee8299e2439e7e28a2152bbc2 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 17:06:06 +0100 Subject: [PATCH 144/421] remove more user references. cleanup dataprovider --- docs/freqai-parameter-table.md | 4 ++-- docs/freqai-reinforcement-learning.md | 7 ++----- freqtrade/freqai/freqai_interface.py | 1 + 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 925609270..4009a280d 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -52,12 +52,12 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. | `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. | `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process.
**Datatype:** int. -| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. +| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the customizable `calculate_reward()` function.
**Datatype:** int. | `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. | `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. | `max_training_drawdown_pct` | The maximum drawdown that the agent is allowed to experience during training.
**Datatype:** float.
Default: 0.8 | `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not).
**Datatype:** int. -| `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. +| `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. | | **Extraneous parameters** | `keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards.
**Datatype:** Boolean.
Default: `False`. diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 9cd4f7ca3..77cc38cba 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -10,16 +10,13 @@ Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-the-reward)). The reward is used to train weights in a neural network. -A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live. *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployements.* +A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployements.* Reinforcement learning is a natural progression for FreqAI, since it adds a new layer of adaptivity and market reactivity that Classifiers and Regressors cannot match. However, Classifiers and Regressors have strengths that RL does not have such as robust predictions. Improperly trained RL agents may find "cheats" and "tricks" to maximize reward without actually winning any trades. For this reason, RL is more complex and demands a higher level of understanding than typical Classifiers and Regressors. ### The RL interface -With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv`: - - - +With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-the-reward). We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configruation settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 406d37dc3..2e455a347 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -168,6 +168,7 @@ class IFreqaiModel(ABC): """ self.model = None self.dk = None + self.data_provider = None def _on_stop(self): """ From b421521be34c1bbef5e9203eebfcd9ba20aaef28 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 17:12:17 +0100 Subject: [PATCH 145/421] help default ReinforcementLearner users by assigning the model_type automatically --- freqtrade/freqai/data_drawer.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index d6ad3047d..d41675f89 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -99,10 +99,9 @@ class FreqaiDataDrawer: "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False) - if 'rl_config' in self.freqai_info: - self.model_type = 'stable_baselines' - logger.warning('User indicated rl_config, FreqAI will now use stable_baselines3' - ' to save models.') + if 'Reinforcement' in self.config['freqaimodel']: + logger.warning('User passed a ReinforcementLearner model, FreqAI will ' + 'now use stable_baselines3 to save models.') else: self.model_type = self.freqai_info.get('model_save_type', 'joblib') From 96fafb7f5690c6a07bdeef6959a8e92c2ddebef0 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 17:14:47 +0100 Subject: [PATCH 146/421] remove limit_ram_use --- freqtrade/freqai/data_drawer.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index d41675f89..590439697 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -98,7 +98,6 @@ class FreqaiDataDrawer: self.empty_pair_dict: pair_info = { "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} - self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False) if 'Reinforcement' in self.config['freqaimodel']: logger.warning('User passed a ReinforcementLearner model, FreqAI will ' 'now use stable_baselines3 to save models.') @@ -514,10 +513,10 @@ class FreqaiDataDrawer: dk.pca, open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "wb") ) - if not self.limit_ram_use: - self.model_dictionary[coin] = model + self.model_dictionary[coin] = model self.pair_dict[coin]["model_filename"] = dk.model_filename self.pair_dict[coin]["data_path"] = str(dk.data_path) + if coin not in self.meta_data_dictionary: self.meta_data_dictionary[coin] = {} self.meta_data_dictionary[coin]["train_df"] = dk.data_dictionary["train_features"] @@ -565,7 +564,7 @@ class FreqaiDataDrawer: dk.label_list = dk.data["label_list"] # try to access model in memory instead of loading object from disk to save time - if dk.live and coin in self.model_dictionary and not self.limit_ram_use: + if dk.live and coin in self.model_dictionary: model = self.model_dictionary[coin] elif self.model_type == 'joblib': model = load(dk.data_path / f"{dk.model_filename}_model.joblib") @@ -587,7 +586,7 @@ class FreqaiDataDrawer: ) # load it into ram if it was loaded from disk - if coin not in self.model_dictionary and not self.limit_ram_use: + if coin not in self.model_dictionary: self.model_dictionary[coin] = model if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]: From c76afc255a838a9d7603b7bdef97ded54e3d3bf9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 17:26:11 +0100 Subject: [PATCH 147/421] explain how to choose environments, and how to customize them --- docs/freqai-reinforcement-learning.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 77cc38cba..e7c3576fc 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -214,3 +214,13 @@ tensorboard --logdir user_data/models/unique-id where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). ![tensorboard](assets/tensorboard.jpg) + + +### Choosing a base environment + +FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 4 or 5 actions. In the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Meanwhile, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include: + +* the actions available in the `calculate_reward` +* the actions consumed by the user strategy + +Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-the-reward)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. From c8d3e5771235598b51cfe969cdf67866cbe01612 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 17:30:56 +0100 Subject: [PATCH 148/421] add note that these environments are designed for short-long bots only. --- docs/freqai-reinforcement-learning.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index e7c3576fc..c4e70130b 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -224,3 +224,6 @@ FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5Action * the actions consumed by the user strategy Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-the-reward)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. + +!!! Note + FreqAI does not provide by default, a long-only training environment. However, creating one should be as simple as copy-pasting one of the built in environments and removing the `short` actions (and all associated references to those). From 6394ef45589d4724134c3e1f47f150dcc7a38ce4 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 17:43:52 +0100 Subject: [PATCH 149/421] fix docstrings --- .../RL/BaseReinforcementLearningModel.py | 19 ++++++++----------- .../prediction_models/ReinforcementLearner.py | 14 ++++++-------- .../ReinforcementLearner_multiproc.py | 7 +++---- 3 files changed, 17 insertions(+), 23 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a8c79ce6e..d0ddce294 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -130,13 +130,12 @@ class BaseReinforcementLearningModel(IFreqaiModel): dk: FreqaiDataKitchen): """ User can override this if they are using a custom MyRLEnv - :params: - data_dictionary: dict = common data dictionary containing train and test + :param data_dictionary: dict = common data dictionary containing train and test features/labels/weights. - prices_train/test: DataFrame = dataframe comprised of the prices to be used in the + :param prices_train/test: DataFrame = dataframe comprised of the prices to be used in the environment during training or testing - dk: FreqaiDataKitchen = the datakitchen for the current pair + :param dk: FreqaiDataKitchen = the datakitchen for the current pair """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] @@ -229,10 +228,9 @@ class BaseReinforcementLearningModel(IFreqaiModel): dk: FreqaiDataKitchen, model: Any) -> DataFrame: """ A helper function to make predictions in the Reinforcement learning module. - :params: - dataframe: DataFrame = the dataframe of features to make the predictions on - dk: FreqaiDatakitchen = data kitchen for the current pair - model: Any = the trained model used to inference the features. + :param dataframe: DataFrame = the dataframe of features to make the predictions on + :param dk: FreqaiDatakitchen = data kitchen for the current pair + :param model: Any = the trained model used to inference the features. """ output = pd.DataFrame(np.zeros(len(dataframe)), columns=dk.label_list) @@ -322,9 +320,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): """ An example reward function. This is the one function that users will likely wish to inject their own creativity into. - :params: - action: int = The action made by the agent for the current candle. - :returns: + :param action: int = The action made by the agent for the current candle. + :return: float = the reward to give to the agent for current step (used for optimization of weights in NN) """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 4bf990172..063af5ff5 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -20,12 +20,11 @@ class ReinforcementLearner(BaseReinforcementLearningModel): def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): """ User customizable fit method - :params: - data_dictionary: dict = common data dictionary containing all train/test + :param data_dictionary: dict = common data dictionary containing all train/test features/labels/weights. - dk: FreqaiDatakitchen = data kitchen for current pair. - :returns: - model: Any = trained model to be used for inference in dry/live/backtesting + :param dk: FreqaiDatakitchen = data kitchen for current pair. + :return: + model Any = trained model to be used for inference in dry/live/backtesting """ train_df = data_dictionary["train_features"] total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) @@ -69,9 +68,8 @@ class ReinforcementLearner(BaseReinforcementLearningModel): """ An example reward function. This is the one function that users will likely wish to inject their own creativity into. - :params: - action: int = The action made by the agent for the current candle. - :returns: + :param action: int = The action made by the agent for the current candle. + :return: float = the reward to give to the agent for current step (used for optimization of weights in NN) """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 41345b967..baba16066 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -61,13 +61,12 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): dk: FreqaiDataKitchen): """ User can override this if they are using a custom MyRLEnv - :params: - data_dictionary: dict = common data dictionary containing train and test + :param data_dictionary: dict = common data dictionary containing train and test features/labels/weights. - prices_train/test: DataFrame = dataframe comprised of the prices to be used in + :param prices_train/test: DataFrame = dataframe comprised of the prices to be used in the environment during training or testing - dk: FreqaiDataKitchen = the datakitchen for the current pair + :param dk: FreqaiDataKitchen = the datakitchen for the current pair """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] From bf4d5b432a19a090c1e09eb499c92d56258a7004 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 18:50:25 +0100 Subject: [PATCH 150/421] ensure model_type is defined --- freqtrade/freqai/data_drawer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 590439697..96b481074 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -99,6 +99,7 @@ class FreqaiDataDrawer: "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} if 'Reinforcement' in self.config['freqaimodel']: + self.model_type = 'stable_baselines' logger.warning('User passed a ReinforcementLearner model, FreqAI will ' 'now use stable_baselines3 to save models.') else: From 659c8c237f7a7e30ad0929fed448c449a01fb2bf Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Mon, 14 Nov 2022 20:27:45 -0700 Subject: [PATCH 151/421] initial revision --- freqtrade/rpc/api_server/api_ws.py | 170 +++++--- freqtrade/rpc/api_server/deps.py | 4 +- freqtrade/rpc/api_server/webserver.py | 162 +++++--- freqtrade/rpc/api_server/ws/__init__.py | 3 +- freqtrade/rpc/api_server/ws/channel.py | 365 ++++++++++++------ freqtrade/rpc/api_server/ws/message_stream.py | 23 ++ freqtrade/rpc/api_server/ws/serializer.py | 8 +- 7 files changed, 494 insertions(+), 241 deletions(-) create mode 100644 freqtrade/rpc/api_server/ws/message_stream.py diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 785773b39..a9b88aadb 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -1,16 +1,17 @@ +import asyncio import logging from typing import Any, Dict -from fastapi import APIRouter, Depends, WebSocketDisconnect -from fastapi.websockets import WebSocket, WebSocketState +from fastapi import APIRouter, Depends +from fastapi.websockets import WebSocket, WebSocketDisconnect from pydantic import ValidationError -from websockets.exceptions import WebSocketException +from websockets.exceptions import ConnectionClosed from freqtrade.enums import RPCMessageType, RPCRequestType from freqtrade.rpc.api_server.api_auth import validate_ws_token -from freqtrade.rpc.api_server.deps import get_channel_manager, get_rpc +from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc from freqtrade.rpc.api_server.ws import WebSocketChannel -from freqtrade.rpc.api_server.ws.channel import ChannelManager +from freqtrade.rpc.api_server.ws.message_stream import MessageStream from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSMessageSchema, WSRequestSchema, WSWhitelistMessage) from freqtrade.rpc.rpc import RPC @@ -22,23 +23,63 @@ logger = logging.getLogger(__name__) router = APIRouter() -async def is_websocket_alive(ws: WebSocket) -> bool: +# async def is_websocket_alive(ws: WebSocket) -> bool: +# """ +# Check if a FastAPI Websocket is still open +# """ +# if ( +# ws.application_state == WebSocketState.CONNECTED and +# ws.client_state == WebSocketState.CONNECTED +# ): +# return True +# return False + + +class WebSocketChannelClosed(Exception): """ - Check if a FastAPI Websocket is still open + General WebSocket exception to signal closing the channel """ - if ( - ws.application_state == WebSocketState.CONNECTED and - ws.client_state == WebSocketState.CONNECTED + pass + + +async def channel_reader(channel: WebSocketChannel, rpc: RPC): + """ + Iterate over the messages from the channel and process the request + """ + try: + async for message in channel: + await _process_consumer_request(message, channel, rpc) + except ( + RuntimeError, + WebSocketDisconnect, + ConnectionClosed ): - return True - return False + raise WebSocketChannelClosed + except asyncio.CancelledError: + return + + +async def channel_broadcaster(channel: WebSocketChannel, message_stream: MessageStream): + """ + Iterate over messages in the message stream and send them + """ + try: + async for message in message_stream: + await channel.send(message) + except ( + RuntimeError, + WebSocketDisconnect, + ConnectionClosed + ): + raise WebSocketChannelClosed + except asyncio.CancelledError: + return async def _process_consumer_request( request: Dict[str, Any], channel: WebSocketChannel, - rpc: RPC, - channel_manager: ChannelManager + rpc: RPC ): """ Validate and handle a request from a websocket consumer @@ -75,7 +116,7 @@ async def _process_consumer_request( # Format response response = WSWhitelistMessage(data=whitelist) # Send it back - await channel_manager.send_direct(channel, response.dict(exclude_none=True)) + await channel.send(response.dict(exclude_none=True)) elif type == RPCRequestType.ANALYZED_DF: limit = None @@ -86,53 +127,76 @@ async def _process_consumer_request( # For every pair in the generator, send a separate message for message in rpc._ws_request_analyzed_df(limit): + # Format response response = WSAnalyzedDFMessage(data=message) - await channel_manager.send_direct(channel, response.dict(exclude_none=True)) + await channel.send(response.dict(exclude_none=True)) @router.websocket("/message/ws") async def message_endpoint( - ws: WebSocket, + websocket: WebSocket, + token: str = Depends(validate_ws_token), rpc: RPC = Depends(get_rpc), - channel_manager=Depends(get_channel_manager), - token: str = Depends(validate_ws_token) + message_stream: MessageStream = Depends(get_message_stream) ): - """ - Message WebSocket endpoint, facilitates sending RPC messages - """ - try: - channel = await channel_manager.on_connect(ws) - if await is_websocket_alive(ws): + async with WebSocketChannel(websocket).connect() as channel: + try: + logger.info(f"Channel connected - {channel}") - logger.info(f"Consumer connected - {channel}") + channel_tasks = asyncio.gather( + channel_reader(channel, rpc), + channel_broadcaster(channel, message_stream) + ) + await channel_tasks - # Keep connection open until explicitly closed, and process requests - try: - while not channel.is_closed(): - request = await channel.recv() + finally: + logger.info(f"Channel disconnected - {channel}") + channel_tasks.cancel() - # Process the request here - await _process_consumer_request(request, channel, rpc, channel_manager) - except (WebSocketDisconnect, WebSocketException): - # Handle client disconnects - logger.info(f"Consumer disconnected - {channel}") - except RuntimeError: - # Handle cases like - - # RuntimeError('Cannot call "send" once a closed message has been sent') - pass - except Exception as e: - logger.info(f"Consumer connection failed - {channel}: {e}") - logger.debug(e, exc_info=e) +# @router.websocket("/message/ws") +# async def message_endpoint( +# ws: WebSocket, +# rpc: RPC = Depends(get_rpc), +# channel_manager=Depends(get_channel_manager), +# token: str = Depends(validate_ws_token) +# ): +# """ +# Message WebSocket endpoint, facilitates sending RPC messages +# """ +# try: +# channel = await channel_manager.on_connect(ws) +# if await is_websocket_alive(ws): - except RuntimeError: - # WebSocket was closed - # Do nothing - pass - except Exception as e: - logger.error(f"Failed to serve - {ws.client}") - # Log tracebacks to keep track of what errors are happening - logger.exception(e) - finally: - if channel: - await channel_manager.on_disconnect(ws) +# logger.info(f"Consumer connected - {channel}") + +# # Keep connection open until explicitly closed, and process requests +# try: +# while not channel.is_closed(): +# request = await channel.recv() + +# # Process the request here +# await _process_consumer_request(request, channel, rpc, channel_manager) + +# except (WebSocketDisconnect, WebSocketException): +# # Handle client disconnects +# logger.info(f"Consumer disconnected - {channel}") +# except RuntimeError: +# # Handle cases like - +# # RuntimeError('Cannot call "send" once a closed message has been sent') +# pass +# except Exception as e: +# logger.info(f"Consumer connection failed - {channel}: {e}") +# logger.debug(e, exc_info=e) + +# except RuntimeError: +# # WebSocket was closed +# # Do nothing +# pass +# except Exception as e: +# logger.error(f"Failed to serve - {ws.client}") +# # Log tracebacks to keep track of what errors are happening +# logger.exception(e) +# finally: +# if channel: +# await channel_manager.on_disconnect(ws) diff --git a/freqtrade/rpc/api_server/deps.py b/freqtrade/rpc/api_server/deps.py index abd3db036..aed97367b 100644 --- a/freqtrade/rpc/api_server/deps.py +++ b/freqtrade/rpc/api_server/deps.py @@ -41,8 +41,8 @@ def get_exchange(config=Depends(get_config)): return ApiServer._exchange -def get_channel_manager(): - return ApiServer._ws_channel_manager +def get_message_stream(): + return ApiServer._message_stream def is_webserver_mode(config=Depends(get_config)): diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index e9a12e4df..7e2c3f39f 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -1,7 +1,6 @@ import asyncio import logging from ipaddress import IPv4Address -from threading import Thread from typing import Any, Dict import orjson @@ -15,7 +14,7 @@ from starlette.responses import JSONResponse from freqtrade.constants import Config from freqtrade.exceptions import OperationalException from freqtrade.rpc.api_server.uvicorn_threaded import UvicornServer -from freqtrade.rpc.api_server.ws import ChannelManager +from freqtrade.rpc.api_server.ws.message_stream import MessageStream from freqtrade.rpc.api_server.ws_schemas import WSMessageSchemaType from freqtrade.rpc.rpc import RPC, RPCException, RPCHandler @@ -51,9 +50,10 @@ class ApiServer(RPCHandler): # Exchange - only available in webserver mode. _exchange = None # websocket message queue stuff - _ws_channel_manager = None - _ws_thread = None - _ws_loop = None + # _ws_channel_manager = None + # _ws_thread = None + # _ws_loop = None + _message_stream = None def __new__(cls, *args, **kwargs): """ @@ -71,14 +71,15 @@ class ApiServer(RPCHandler): return self._standalone: bool = standalone self._server = None + self._ws_queue = None - self._ws_background_task = None + self._ws_publisher_task = None ApiServer.__initialized = True api_config = self._config['api_server'] - ApiServer._ws_channel_manager = ChannelManager() + # ApiServer._ws_channel_manager = ChannelManager() self.app = FastAPI(title="Freqtrade API", docs_url='/docs' if api_config.get('enable_openapi', False) else None, @@ -107,18 +108,18 @@ class ApiServer(RPCHandler): logger.info("Stopping API Server") self._server.cleanup() - if self._ws_thread and self._ws_loop: - logger.info("Stopping API Server background tasks") + # if self._ws_thread and self._ws_loop: + # logger.info("Stopping API Server background tasks") - if self._ws_background_task: - # Cancel the queue task - self._ws_background_task.cancel() + # if self._ws_background_task: + # # Cancel the queue task + # self._ws_background_task.cancel() - self._ws_thread.join() + # self._ws_thread.join() - self._ws_thread = None - self._ws_loop = None - self._ws_background_task = None + # self._ws_thread = None + # self._ws_loop = None + # self._ws_background_task = None @classmethod def shutdown(cls): @@ -170,51 +171,102 @@ class ApiServer(RPCHandler): ) app.add_exception_handler(RPCException, self.handle_rpc_exception) + app.add_event_handler( + event_type="startup", + func=self._api_startup_event + ) + app.add_event_handler( + event_type="shutdown", + func=self._api_shutdown_event + ) - def start_message_queue(self): - if self._ws_thread: - return + async def _api_startup_event(self): + if not ApiServer._message_stream: + ApiServer._message_stream = MessageStream() - # Create a new loop, as it'll be just for the background thread - self._ws_loop = asyncio.new_event_loop() + if not self._ws_queue: + self._ws_queue = ThreadedQueue() - # Start the thread - self._ws_thread = Thread(target=self._ws_loop.run_forever) - self._ws_thread.start() + if not self._ws_publisher_task: + self._ws_publisher_task = asyncio.create_task( + self._publish_messages() + ) - # Finally, submit the coro to the thread - self._ws_background_task = asyncio.run_coroutine_threadsafe( - self._broadcast_queue_data(), loop=self._ws_loop) + async def _api_shutdown_event(self): + if ApiServer._message_stream: + ApiServer._message_stream = None - async def _broadcast_queue_data(self): - # Instantiate the queue in this coroutine so it's attached to our loop - self._ws_queue = ThreadedQueue() - async_queue = self._ws_queue.async_q - - try: - while True: - logger.debug("Getting queue messages...") - # Get data from queue - message: WSMessageSchemaType = await async_queue.get() - logger.debug(f"Found message of type: {message.get('type')}") - async_queue.task_done() - # Broadcast it - await self._ws_channel_manager.broadcast(message) - except asyncio.CancelledError: - pass - - # For testing, shouldn't happen when stable - except Exception as e: - logger.exception(f"Exception happened in background task: {e}") - - finally: - # Disconnect channels and stop the loop on cancel - await self._ws_channel_manager.disconnect_all() - self._ws_loop.stop() - # Avoid adding more items to the queue if they aren't - # going to get broadcasted. + if self._ws_queue: self._ws_queue = None + if self._ws_publisher_task: + self._ws_publisher_task.cancel() + + async def _publish_messages(self): + """ + Background task that reads messages from the queue and adds them + to the message stream + """ + try: + async_queue = self._ws_queue.async_q + message_stream = ApiServer._message_stream + + while message_stream: + message: WSMessageSchemaType = await async_queue.get() + message_stream.publish(message) + + # Make sure to throttle how fast we + # publish messages as some clients will be + # slower than others + await asyncio.sleep(0.01) + async_queue.task_done() + finally: + self._ws_queue = None + + # def start_message_queue(self): + # if self._ws_thread: + # return + + # # Create a new loop, as it'll be just for the background thread + # self._ws_loop = asyncio.new_event_loop() + + # # Start the thread + # self._ws_thread = Thread(target=self._ws_loop.run_forever) + # self._ws_thread.start() + + # # Finally, submit the coro to the thread + # self._ws_background_task = asyncio.run_coroutine_threadsafe( + # self._broadcast_queue_data(), loop=self._ws_loop) + + # async def _broadcast_queue_data(self): + # # Instantiate the queue in this coroutine so it's attached to our loop + # self._ws_queue = ThreadedQueue() + # async_queue = self._ws_queue.async_q + + # try: + # while True: + # logger.debug("Getting queue messages...") + # # Get data from queue + # message: WSMessageSchemaType = await async_queue.get() + # logger.debug(f"Found message of type: {message.get('type')}") + # async_queue.task_done() + # # Broadcast it + # await self._ws_channel_manager.broadcast(message) + # except asyncio.CancelledError: + # pass + + # # For testing, shouldn't happen when stable + # except Exception as e: + # logger.exception(f"Exception happened in background task: {e}") + + # finally: + # # Disconnect channels and stop the loop on cancel + # await self._ws_channel_manager.disconnect_all() + # self._ws_loop.stop() + # # Avoid adding more items to the queue if they aren't + # # going to get broadcasted. + # self._ws_queue = None + def start_api(self): """ Start API ... should be run in thread. @@ -253,7 +305,7 @@ class ApiServer(RPCHandler): if self._standalone: self._server.run() else: - self.start_message_queue() + # self.start_message_queue() self._server.run_in_thread() except Exception: logger.exception("Api server failed to start.") diff --git a/freqtrade/rpc/api_server/ws/__init__.py b/freqtrade/rpc/api_server/ws/__init__.py index 055b20a9d..0b94d3fee 100644 --- a/freqtrade/rpc/api_server/ws/__init__.py +++ b/freqtrade/rpc/api_server/ws/__init__.py @@ -3,4 +3,5 @@ from freqtrade.rpc.api_server.ws.types import WebSocketType from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy from freqtrade.rpc.api_server.ws.serializer import HybridJSONWebSocketSerializer -from freqtrade.rpc.api_server.ws.channel import ChannelManager, WebSocketChannel +from freqtrade.rpc.api_server.ws.channel import WebSocketChannel +from freqtrade.rpc.api_server.ws.message_stream import MessageStream diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 88b4db9ba..b98bd13c9 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -1,12 +1,9 @@ import asyncio import logging -import time -from threading import RLock +from contextlib import asynccontextmanager from typing import Any, Dict, List, Optional, Type, Union from uuid import uuid4 -from fastapi import WebSocket as FastAPIWebSocket - from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy from freqtrade.rpc.api_server.ws.serializer import (HybridJSONWebSocketSerializer, WebSocketSerializer) @@ -21,32 +18,21 @@ class WebSocketChannel: """ Object to help facilitate managing a websocket connection """ - def __init__( self, websocket: WebSocketType, channel_id: Optional[str] = None, - drain_timeout: int = 3, - throttle: float = 0.01, serializer_cls: Type[WebSocketSerializer] = HybridJSONWebSocketSerializer ): - self.channel_id = channel_id if channel_id else uuid4().hex[:8] - - # The WebSocket object self._websocket = WebSocketProxy(websocket) - self.drain_timeout = drain_timeout - self.throttle = throttle - - self._subscriptions: List[str] = [] - # 32 is the size of the receiving queue in websockets package - self.queue: asyncio.Queue[Dict[str, Any]] = asyncio.Queue(maxsize=32) - self._relay_task = asyncio.create_task(self.relay()) - # Internal event to signify a closed websocket self._closed = asyncio.Event() + # Throttle how fast we send messages + self._throttle = 0.01 + # Wrap the WebSocket in the Serializing class self._wrapped_ws = serializer_cls(self._websocket) @@ -61,40 +47,16 @@ class WebSocketChannel: def remote_addr(self): return self._websocket.remote_addr - async def _send(self, data): + async def send(self, message: Union[WSMessageSchemaType, Dict[str, Any]]): """ - Send data on the wrapped websocket + Send a message on the wrapped websocket """ - await self._wrapped_ws.send(data) - - async def send(self, data) -> bool: - """ - Add the data to the queue to be sent. - :returns: True if data added to queue, False otherwise - """ - - # This block only runs if the queue is full, it will wait - # until self.drain_timeout for the relay to drain the outgoing queue - # We can't use asyncio.wait_for here because the queue may have been created with a - # different eventloop - start = time.time() - while self.queue.full(): - await asyncio.sleep(1) - if (time.time() - start) > self.drain_timeout: - return False - - # If for some reason the queue is still full, just return False - try: - self.queue.put_nowait(data) - except asyncio.QueueFull: - return False - - # If we got here everything is ok - return True + await asyncio.sleep(self._throttle) + await self._wrapped_ws.send(message) async def recv(self): """ - Receive data on the wrapped websocket + Receive a message on the wrapped websocket """ return await self._wrapped_ws.recv() @@ -104,18 +66,23 @@ class WebSocketChannel: """ return await self._websocket.ping() + async def accept(self): + """ + Accept the underlying websocket connection + """ + return await self._websocket.accept() + async def close(self): """ Close the WebSocketChannel """ try: - await self.raw_websocket.close() + await self._websocket.close() except Exception: pass self._closed.set() - self._relay_task.cancel() def is_closed(self) -> bool: """ @@ -139,99 +106,243 @@ class WebSocketChannel: """ return message_type in self._subscriptions - async def relay(self): + async def __aiter__(self): """ - Relay messages from the channel's queue and send them out. This is started - as a task. + Generator for received messages """ - while not self._closed.is_set(): - message = await self.queue.get() + while True: try: - await self._send(message) - self.queue.task_done() + yield await self.recv() + except Exception: + break - # Limit messages per sec. - # Could cause problems with queue size if too low, and - # problems with network traffik if too high. - # 0.01 = 100/s - await asyncio.sleep(self.throttle) - except RuntimeError: - # The connection was closed, just exit the task - return - - -class ChannelManager: - def __init__(self): - self.channels = dict() - self._lock = RLock() # Re-entrant Lock - - async def on_connect(self, websocket: WebSocketType): + @asynccontextmanager + async def connect(self): """ - Wrap websocket connection into Channel and add to list - - :param websocket: The WebSocket object to attach to the Channel + Context manager for safely opening and closing the websocket connection """ - if isinstance(websocket, FastAPIWebSocket): - try: - await websocket.accept() - except RuntimeError: - # The connection was closed before we could accept it - return + try: + await self.accept() + yield self + finally: + await self.close() - ws_channel = WebSocketChannel(websocket) - with self._lock: - self.channels[websocket] = ws_channel +# class WebSocketChannel: +# """ +# Object to help facilitate managing a websocket connection +# """ - return ws_channel +# def __init__( +# self, +# websocket: WebSocketType, +# channel_id: Optional[str] = None, +# drain_timeout: int = 3, +# throttle: float = 0.01, +# serializer_cls: Type[WebSocketSerializer] = HybridJSONWebSocketSerializer +# ): - async def on_disconnect(self, websocket: WebSocketType): - """ - Call close on the channel if it's not, and remove from channel list +# self.channel_id = channel_id if channel_id else uuid4().hex[:8] - :param websocket: The WebSocket objet attached to the Channel - """ - with self._lock: - channel = self.channels.get(websocket) - if channel: - logger.info(f"Disconnecting channel {channel}") - if not channel.is_closed(): - await channel.close() +# # The WebSocket object +# self._websocket = WebSocketProxy(websocket) - del self.channels[websocket] +# self.drain_timeout = drain_timeout +# self.throttle = throttle - async def disconnect_all(self): - """ - Disconnect all Channels - """ - with self._lock: - for websocket in self.channels.copy().keys(): - await self.on_disconnect(websocket) +# self._subscriptions: List[str] = [] +# # 32 is the size of the receiving queue in websockets package +# self.queue: asyncio.Queue[Dict[str, Any]] = asyncio.Queue(maxsize=32) +# self._relay_task = asyncio.create_task(self.relay()) - async def broadcast(self, message: WSMessageSchemaType): - """ - Broadcast a message on all Channels +# # Internal event to signify a closed websocket +# self._closed = asyncio.Event() - :param message: The message to send - """ - with self._lock: - for channel in self.channels.copy().values(): - if channel.subscribed_to(message.get('type')): - await self.send_direct(channel, message) +# # Wrap the WebSocket in the Serializing class +# self._wrapped_ws = serializer_cls(self._websocket) - async def send_direct( - self, channel: WebSocketChannel, message: Union[WSMessageSchemaType, Dict[str, Any]]): - """ - Send a message directly through direct_channel only +# def __repr__(self): +# return f"WebSocketChannel({self.channel_id}, {self.remote_addr})" - :param direct_channel: The WebSocketChannel object to send the message through - :param message: The message to send - """ - if not await channel.send(message): - await self.on_disconnect(channel.raw_websocket) +# @property +# def raw_websocket(self): +# return self._websocket.raw_websocket - def has_channels(self): - """ - Flag for more than 0 channels - """ - return len(self.channels) > 0 +# @property +# def remote_addr(self): +# return self._websocket.remote_addr + +# async def _send(self, data): +# """ +# Send data on the wrapped websocket +# """ +# await self._wrapped_ws.send(data) + +# async def send(self, data) -> bool: +# """ +# Add the data to the queue to be sent. +# :returns: True if data added to queue, False otherwise +# """ + +# # This block only runs if the queue is full, it will wait +# # until self.drain_timeout for the relay to drain the outgoing queue +# # We can't use asyncio.wait_for here because the queue may have been created with a +# # different eventloop +# start = time.time() +# while self.queue.full(): +# await asyncio.sleep(1) +# if (time.time() - start) > self.drain_timeout: +# return False + +# # If for some reason the queue is still full, just return False +# try: +# self.queue.put_nowait(data) +# except asyncio.QueueFull: +# return False + +# # If we got here everything is ok +# return True + +# async def recv(self): +# """ +# Receive data on the wrapped websocket +# """ +# return await self._wrapped_ws.recv() + +# async def ping(self): +# """ +# Ping the websocket +# """ +# return await self._websocket.ping() + +# async def close(self): +# """ +# Close the WebSocketChannel +# """ + +# try: +# await self.raw_websocket.close() +# except Exception: +# pass + +# self._closed.set() +# self._relay_task.cancel() + +# def is_closed(self) -> bool: +# """ +# Closed flag +# """ +# return self._closed.is_set() + +# def set_subscriptions(self, subscriptions: List[str] = []) -> None: +# """ +# Set which subscriptions this channel is subscribed to + +# :param subscriptions: List of subscriptions, List[str] +# """ +# self._subscriptions = subscriptions + +# def subscribed_to(self, message_type: str) -> bool: +# """ +# Check if this channel is subscribed to the message_type + +# :param message_type: The message type to check +# """ +# return message_type in self._subscriptions + +# async def relay(self): +# """ +# Relay messages from the channel's queue and send them out. This is started +# as a task. +# """ +# while not self._closed.is_set(): +# message = await self.queue.get() +# try: +# await self._send(message) +# self.queue.task_done() + +# # Limit messages per sec. +# # Could cause problems with queue size if too low, and +# # problems with network traffik if too high. +# # 0.01 = 100/s +# await asyncio.sleep(self.throttle) +# except RuntimeError: +# # The connection was closed, just exit the task +# return + + +# class ChannelManager: +# def __init__(self): +# self.channels = dict() +# self._lock = RLock() # Re-entrant Lock + +# async def on_connect(self, websocket: WebSocketType): +# """ +# Wrap websocket connection into Channel and add to list + +# :param websocket: The WebSocket object to attach to the Channel +# """ +# if isinstance(websocket, FastAPIWebSocket): +# try: +# await websocket.accept() +# except RuntimeError: +# # The connection was closed before we could accept it +# return + +# ws_channel = WebSocketChannel(websocket) + +# with self._lock: +# self.channels[websocket] = ws_channel + +# return ws_channel + +# async def on_disconnect(self, websocket: WebSocketType): +# """ +# Call close on the channel if it's not, and remove from channel list + +# :param websocket: The WebSocket objet attached to the Channel +# """ +# with self._lock: +# channel = self.channels.get(websocket) +# if channel: +# logger.info(f"Disconnecting channel {channel}") +# if not channel.is_closed(): +# await channel.close() + +# del self.channels[websocket] + +# async def disconnect_all(self): +# """ +# Disconnect all Channels +# """ +# with self._lock: +# for websocket in self.channels.copy().keys(): +# await self.on_disconnect(websocket) + +# async def broadcast(self, message: WSMessageSchemaType): +# """ +# Broadcast a message on all Channels + +# :param message: The message to send +# """ +# with self._lock: +# for channel in self.channels.copy().values(): +# if channel.subscribed_to(message.get('type')): +# await self.send_direct(channel, message) + +# async def send_direct( +# self, channel: WebSocketChannel, message: Union[WSMessageSchemaType, Dict[str, Any]]): +# """ +# Send a message directly through direct_channel only + +# :param direct_channel: The WebSocketChannel object to send the message through +# :param message: The message to send +# """ +# if not await channel.send(message): +# await self.on_disconnect(channel.raw_websocket) + +# def has_channels(self): +# """ +# Flag for more than 0 channels +# """ +# return len(self.channels) > 0 diff --git a/freqtrade/rpc/api_server/ws/message_stream.py b/freqtrade/rpc/api_server/ws/message_stream.py new file mode 100644 index 000000000..f77242719 --- /dev/null +++ b/freqtrade/rpc/api_server/ws/message_stream.py @@ -0,0 +1,23 @@ +import asyncio + + +class MessageStream: + """ + A message stream for consumers to subscribe to, + and for producers to publish to. + """ + def __init__(self): + self._loop = asyncio.get_running_loop() + self._waiter = self._loop.create_future() + + def publish(self, message): + waiter, self._waiter = self._waiter, self._loop.create_future() + waiter.set_result((message, self._waiter)) + + async def subscribe(self): + waiter = self._waiter + while True: + message, waiter = await waiter + yield message + + __aiter__ = subscribe diff --git a/freqtrade/rpc/api_server/ws/serializer.py b/freqtrade/rpc/api_server/ws/serializer.py index 6c402a100..85703136b 100644 --- a/freqtrade/rpc/api_server/ws/serializer.py +++ b/freqtrade/rpc/api_server/ws/serializer.py @@ -1,5 +1,6 @@ import logging from abc import ABC, abstractmethod +from typing import Any, Dict, Union import orjson import rapidjson @@ -7,6 +8,7 @@ from pandas import DataFrame from freqtrade.misc import dataframe_to_json, json_to_dataframe from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy +from freqtrade.rpc.api_server.ws_schemas import WSMessageSchemaType logger = logging.getLogger(__name__) @@ -24,7 +26,7 @@ class WebSocketSerializer(ABC): def _deserialize(self, data): raise NotImplementedError() - async def send(self, data: bytes): + async def send(self, data: Union[WSMessageSchemaType, Dict[str, Any]]): await self._websocket.send(self._serialize(data)) async def recv(self) -> bytes: @@ -32,8 +34,8 @@ class WebSocketSerializer(ABC): return self._deserialize(data) - async def close(self, code: int = 1000): - await self._websocket.close(code) + # async def close(self, code: int = 1000): + # await self._websocket.close(code) class HybridJSONWebSocketSerializer(WebSocketSerializer): From d713af045fbd51df67825836d9fe3a17f1424622 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Mon, 14 Nov 2022 22:21:40 -0700 Subject: [PATCH 152/421] remove main queue completely --- freqtrade/rpc/api_server/api_ws.py | 3 +- freqtrade/rpc/api_server/webserver.py | 47 ++------------------------ freqtrade/rpc/api_server/ws/channel.py | 5 ++- 3 files changed, 6 insertions(+), 49 deletions(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index a9b88aadb..3f207eac3 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -148,7 +148,8 @@ async def message_endpoint( channel_broadcaster(channel, message_stream) ) await channel_tasks - + except WebSocketChannelClosed: + pass finally: logger.info(f"Channel disconnected - {channel}") channel_tasks.cancel() diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index 7e2c3f39f..d0695e06d 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -1,4 +1,3 @@ -import asyncio import logging from ipaddress import IPv4Address from typing import Any, Dict @@ -7,15 +6,12 @@ import orjson import uvicorn from fastapi import Depends, FastAPI from fastapi.middleware.cors import CORSMiddleware -# Look into alternatives -from janus import Queue as ThreadedQueue from starlette.responses import JSONResponse from freqtrade.constants import Config from freqtrade.exceptions import OperationalException from freqtrade.rpc.api_server.uvicorn_threaded import UvicornServer from freqtrade.rpc.api_server.ws.message_stream import MessageStream -from freqtrade.rpc.api_server.ws_schemas import WSMessageSchemaType from freqtrade.rpc.rpc import RPC, RPCException, RPCHandler @@ -72,9 +68,6 @@ class ApiServer(RPCHandler): self._standalone: bool = standalone self._server = None - self._ws_queue = None - self._ws_publisher_task = None - ApiServer.__initialized = True api_config = self._config['api_server'] @@ -130,9 +123,8 @@ class ApiServer(RPCHandler): cls._rpc = None def send_msg(self, msg: Dict[str, Any]) -> None: - if self._ws_queue: - sync_q = self._ws_queue.sync_q - sync_q.put(msg) + if ApiServer._message_stream: + ApiServer._message_stream.publish(msg) def handle_rpc_exception(self, request, exc): logger.exception(f"API Error calling: {exc}") @@ -184,45 +176,10 @@ class ApiServer(RPCHandler): if not ApiServer._message_stream: ApiServer._message_stream = MessageStream() - if not self._ws_queue: - self._ws_queue = ThreadedQueue() - - if not self._ws_publisher_task: - self._ws_publisher_task = asyncio.create_task( - self._publish_messages() - ) - async def _api_shutdown_event(self): if ApiServer._message_stream: ApiServer._message_stream = None - if self._ws_queue: - self._ws_queue = None - - if self._ws_publisher_task: - self._ws_publisher_task.cancel() - - async def _publish_messages(self): - """ - Background task that reads messages from the queue and adds them - to the message stream - """ - try: - async_queue = self._ws_queue.async_q - message_stream = ApiServer._message_stream - - while message_stream: - message: WSMessageSchemaType = await async_queue.get() - message_stream.publish(message) - - # Make sure to throttle how fast we - # publish messages as some clients will be - # slower than others - await asyncio.sleep(0.01) - async_queue.task_done() - finally: - self._ws_queue = None - # def start_message_queue(self): # if self._ws_thread: # return diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index b98bd13c9..39c8db516 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -30,8 +30,8 @@ class WebSocketChannel: # Internal event to signify a closed websocket self._closed = asyncio.Event() - # Throttle how fast we send messages - self._throttle = 0.01 + # The subscribed message types + self._subscriptions: List[str] = [] # Wrap the WebSocket in the Serializing class self._wrapped_ws = serializer_cls(self._websocket) @@ -51,7 +51,6 @@ class WebSocketChannel: """ Send a message on the wrapped websocket """ - await asyncio.sleep(self._throttle) await self._wrapped_ws.send(message) async def recv(self): From 442467e8aed2ff639bfba04e7a2f6e175f774af1 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Mon, 14 Nov 2022 22:26:34 -0700 Subject: [PATCH 153/421] remove old comments and code --- freqtrade/rpc/api_server/api_ws.py | 60 ------ freqtrade/rpc/api_server/webserver.py | 75 ++------ freqtrade/rpc/api_server/ws/channel.py | 220 ---------------------- freqtrade/rpc/api_server/ws/serializer.py | 3 - 4 files changed, 12 insertions(+), 346 deletions(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 3f207eac3..01243b0cc 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -23,18 +23,6 @@ logger = logging.getLogger(__name__) router = APIRouter() -# async def is_websocket_alive(ws: WebSocket) -> bool: -# """ -# Check if a FastAPI Websocket is still open -# """ -# if ( -# ws.application_state == WebSocketState.CONNECTED and -# ws.client_state == WebSocketState.CONNECTED -# ): -# return True -# return False - - class WebSocketChannelClosed(Exception): """ General WebSocket exception to signal closing the channel @@ -153,51 +141,3 @@ async def message_endpoint( finally: logger.info(f"Channel disconnected - {channel}") channel_tasks.cancel() - - -# @router.websocket("/message/ws") -# async def message_endpoint( -# ws: WebSocket, -# rpc: RPC = Depends(get_rpc), -# channel_manager=Depends(get_channel_manager), -# token: str = Depends(validate_ws_token) -# ): -# """ -# Message WebSocket endpoint, facilitates sending RPC messages -# """ -# try: -# channel = await channel_manager.on_connect(ws) -# if await is_websocket_alive(ws): - -# logger.info(f"Consumer connected - {channel}") - -# # Keep connection open until explicitly closed, and process requests -# try: -# while not channel.is_closed(): -# request = await channel.recv() - -# # Process the request here -# await _process_consumer_request(request, channel, rpc, channel_manager) - -# except (WebSocketDisconnect, WebSocketException): -# # Handle client disconnects -# logger.info(f"Consumer disconnected - {channel}") -# except RuntimeError: -# # Handle cases like - -# # RuntimeError('Cannot call "send" once a closed message has been sent') -# pass -# except Exception as e: -# logger.info(f"Consumer connection failed - {channel}: {e}") -# logger.debug(e, exc_info=e) - -# except RuntimeError: -# # WebSocket was closed -# # Do nothing -# pass -# except Exception as e: -# logger.error(f"Failed to serve - {ws.client}") -# # Log tracebacks to keep track of what errors are happening -# logger.exception(e) -# finally: -# if channel: -# await channel_manager.on_disconnect(ws) diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index d0695e06d..f100a46ef 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -45,10 +45,7 @@ class ApiServer(RPCHandler): _config: Config = {} # Exchange - only available in webserver mode. _exchange = None - # websocket message queue stuff - # _ws_channel_manager = None - # _ws_thread = None - # _ws_loop = None + # websocket message stuff _message_stream = None def __new__(cls, *args, **kwargs): @@ -72,8 +69,6 @@ class ApiServer(RPCHandler): api_config = self._config['api_server'] - # ApiServer._ws_channel_manager = ChannelManager() - self.app = FastAPI(title="Freqtrade API", docs_url='/docs' if api_config.get('enable_openapi', False) else None, redoc_url=None, @@ -101,19 +96,6 @@ class ApiServer(RPCHandler): logger.info("Stopping API Server") self._server.cleanup() - # if self._ws_thread and self._ws_loop: - # logger.info("Stopping API Server background tasks") - - # if self._ws_background_task: - # # Cancel the queue task - # self._ws_background_task.cancel() - - # self._ws_thread.join() - - # self._ws_thread = None - # self._ws_loop = None - # self._ws_background_task = None - @classmethod def shutdown(cls): cls.__initialized = False @@ -123,6 +105,9 @@ class ApiServer(RPCHandler): cls._rpc = None def send_msg(self, msg: Dict[str, Any]) -> None: + """ + Publish the message to the message stream + """ if ApiServer._message_stream: ApiServer._message_stream.publish(msg) @@ -173,57 +158,21 @@ class ApiServer(RPCHandler): ) async def _api_startup_event(self): + """ + Creates the MessageStream class on startup + so it has access to the same event loop + as uvicorn + """ if not ApiServer._message_stream: ApiServer._message_stream = MessageStream() async def _api_shutdown_event(self): + """ + Removes the MessageStream class on shutdown + """ if ApiServer._message_stream: ApiServer._message_stream = None - # def start_message_queue(self): - # if self._ws_thread: - # return - - # # Create a new loop, as it'll be just for the background thread - # self._ws_loop = asyncio.new_event_loop() - - # # Start the thread - # self._ws_thread = Thread(target=self._ws_loop.run_forever) - # self._ws_thread.start() - - # # Finally, submit the coro to the thread - # self._ws_background_task = asyncio.run_coroutine_threadsafe( - # self._broadcast_queue_data(), loop=self._ws_loop) - - # async def _broadcast_queue_data(self): - # # Instantiate the queue in this coroutine so it's attached to our loop - # self._ws_queue = ThreadedQueue() - # async_queue = self._ws_queue.async_q - - # try: - # while True: - # logger.debug("Getting queue messages...") - # # Get data from queue - # message: WSMessageSchemaType = await async_queue.get() - # logger.debug(f"Found message of type: {message.get('type')}") - # async_queue.task_done() - # # Broadcast it - # await self._ws_channel_manager.broadcast(message) - # except asyncio.CancelledError: - # pass - - # # For testing, shouldn't happen when stable - # except Exception as e: - # logger.exception(f"Exception happened in background task: {e}") - - # finally: - # # Disconnect channels and stop the loop on cancel - # await self._ws_channel_manager.disconnect_all() - # self._ws_loop.stop() - # # Avoid adding more items to the queue if they aren't - # # going to get broadcasted. - # self._ws_queue = None - def start_api(self): """ Start API ... should be run in thread. diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 39c8db516..ee16a95c6 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -125,223 +125,3 @@ class WebSocketChannel: yield self finally: await self.close() - - -# class WebSocketChannel: -# """ -# Object to help facilitate managing a websocket connection -# """ - -# def __init__( -# self, -# websocket: WebSocketType, -# channel_id: Optional[str] = None, -# drain_timeout: int = 3, -# throttle: float = 0.01, -# serializer_cls: Type[WebSocketSerializer] = HybridJSONWebSocketSerializer -# ): - -# self.channel_id = channel_id if channel_id else uuid4().hex[:8] - -# # The WebSocket object -# self._websocket = WebSocketProxy(websocket) - -# self.drain_timeout = drain_timeout -# self.throttle = throttle - -# self._subscriptions: List[str] = [] -# # 32 is the size of the receiving queue in websockets package -# self.queue: asyncio.Queue[Dict[str, Any]] = asyncio.Queue(maxsize=32) -# self._relay_task = asyncio.create_task(self.relay()) - -# # Internal event to signify a closed websocket -# self._closed = asyncio.Event() - -# # Wrap the WebSocket in the Serializing class -# self._wrapped_ws = serializer_cls(self._websocket) - -# def __repr__(self): -# return f"WebSocketChannel({self.channel_id}, {self.remote_addr})" - -# @property -# def raw_websocket(self): -# return self._websocket.raw_websocket - -# @property -# def remote_addr(self): -# return self._websocket.remote_addr - -# async def _send(self, data): -# """ -# Send data on the wrapped websocket -# """ -# await self._wrapped_ws.send(data) - -# async def send(self, data) -> bool: -# """ -# Add the data to the queue to be sent. -# :returns: True if data added to queue, False otherwise -# """ - -# # This block only runs if the queue is full, it will wait -# # until self.drain_timeout for the relay to drain the outgoing queue -# # We can't use asyncio.wait_for here because the queue may have been created with a -# # different eventloop -# start = time.time() -# while self.queue.full(): -# await asyncio.sleep(1) -# if (time.time() - start) > self.drain_timeout: -# return False - -# # If for some reason the queue is still full, just return False -# try: -# self.queue.put_nowait(data) -# except asyncio.QueueFull: -# return False - -# # If we got here everything is ok -# return True - -# async def recv(self): -# """ -# Receive data on the wrapped websocket -# """ -# return await self._wrapped_ws.recv() - -# async def ping(self): -# """ -# Ping the websocket -# """ -# return await self._websocket.ping() - -# async def close(self): -# """ -# Close the WebSocketChannel -# """ - -# try: -# await self.raw_websocket.close() -# except Exception: -# pass - -# self._closed.set() -# self._relay_task.cancel() - -# def is_closed(self) -> bool: -# """ -# Closed flag -# """ -# return self._closed.is_set() - -# def set_subscriptions(self, subscriptions: List[str] = []) -> None: -# """ -# Set which subscriptions this channel is subscribed to - -# :param subscriptions: List of subscriptions, List[str] -# """ -# self._subscriptions = subscriptions - -# def subscribed_to(self, message_type: str) -> bool: -# """ -# Check if this channel is subscribed to the message_type - -# :param message_type: The message type to check -# """ -# return message_type in self._subscriptions - -# async def relay(self): -# """ -# Relay messages from the channel's queue and send them out. This is started -# as a task. -# """ -# while not self._closed.is_set(): -# message = await self.queue.get() -# try: -# await self._send(message) -# self.queue.task_done() - -# # Limit messages per sec. -# # Could cause problems with queue size if too low, and -# # problems with network traffik if too high. -# # 0.01 = 100/s -# await asyncio.sleep(self.throttle) -# except RuntimeError: -# # The connection was closed, just exit the task -# return - - -# class ChannelManager: -# def __init__(self): -# self.channels = dict() -# self._lock = RLock() # Re-entrant Lock - -# async def on_connect(self, websocket: WebSocketType): -# """ -# Wrap websocket connection into Channel and add to list - -# :param websocket: The WebSocket object to attach to the Channel -# """ -# if isinstance(websocket, FastAPIWebSocket): -# try: -# await websocket.accept() -# except RuntimeError: -# # The connection was closed before we could accept it -# return - -# ws_channel = WebSocketChannel(websocket) - -# with self._lock: -# self.channels[websocket] = ws_channel - -# return ws_channel - -# async def on_disconnect(self, websocket: WebSocketType): -# """ -# Call close on the channel if it's not, and remove from channel list - -# :param websocket: The WebSocket objet attached to the Channel -# """ -# with self._lock: -# channel = self.channels.get(websocket) -# if channel: -# logger.info(f"Disconnecting channel {channel}") -# if not channel.is_closed(): -# await channel.close() - -# del self.channels[websocket] - -# async def disconnect_all(self): -# """ -# Disconnect all Channels -# """ -# with self._lock: -# for websocket in self.channels.copy().keys(): -# await self.on_disconnect(websocket) - -# async def broadcast(self, message: WSMessageSchemaType): -# """ -# Broadcast a message on all Channels - -# :param message: The message to send -# """ -# with self._lock: -# for channel in self.channels.copy().values(): -# if channel.subscribed_to(message.get('type')): -# await self.send_direct(channel, message) - -# async def send_direct( -# self, channel: WebSocketChannel, message: Union[WSMessageSchemaType, Dict[str, Any]]): -# """ -# Send a message directly through direct_channel only - -# :param direct_channel: The WebSocketChannel object to send the message through -# :param message: The message to send -# """ -# if not await channel.send(message): -# await self.on_disconnect(channel.raw_websocket) - -# def has_channels(self): -# """ -# Flag for more than 0 channels -# """ -# return len(self.channels) > 0 diff --git a/freqtrade/rpc/api_server/ws/serializer.py b/freqtrade/rpc/api_server/ws/serializer.py index 85703136b..625a0990c 100644 --- a/freqtrade/rpc/api_server/ws/serializer.py +++ b/freqtrade/rpc/api_server/ws/serializer.py @@ -34,9 +34,6 @@ class WebSocketSerializer(ABC): return self._deserialize(data) - # async def close(self, code: int = 1000): - # await self._websocket.close(code) - class HybridJSONWebSocketSerializer(WebSocketSerializer): def _serialize(self, data) -> str: From b01e4e3dbfcfebc72990e03399a7bcb93f231d5f Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Thu, 17 Nov 2022 10:14:30 -0300 Subject: [PATCH 154/421] change default value - save_live_data_backtest as false --- freqtrade/freqai/freqai_interface.py | 2 +- freqtrade/freqai/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 47d75dfaa..cc6cd3c9b 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -68,7 +68,7 @@ class IFreqaiModel(ABC): if self.save_backtest_models: logger.info('Backtesting module configured to save all models.') self.save_live_data_backtest: bool = self.freqai_info.get( - "save_live_data_backtest", True) + "save_live_data_backtest", False) if self.save_live_data_backtest: logger.info('Live configured to save data for backtest.') diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index ad38a339b..a4e7a9a55 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -230,7 +230,7 @@ def get_timerange_backtest_live_models(config: Config) -> str: dk = FreqaiDataKitchen(config) models_path = dk.get_full_models_path(config) timerange: TimeRange = TimeRange() - if not config.get("save_live_data_backtest", True): + if not config.get("save_live_data_backtest", False): timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) else: timerange = dk.get_timerange_from_backtesting_live_dataframe() From 913749c81bad3c85c882391bf0b6341967b0e89a Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Thu, 17 Nov 2022 10:30:16 -0300 Subject: [PATCH 155/421] backtesting_from_live_saved_files - code refactoring --- docs/freqai-running.md | 2 +- freqtrade/freqai/data_kitchen.py | 27 +++++++++++---------------- freqtrade/freqai/freqai_interface.py | 2 -- 3 files changed, 12 insertions(+), 19 deletions(-) diff --git a/docs/freqai-running.md b/docs/freqai-running.md index f97ed0ab4..d2f9595be 100644 --- a/docs/freqai-running.md +++ b/docs/freqai-running.md @@ -81,7 +81,7 @@ To save the models generated during a particular backtest so that you can start ### Backtest live models -FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse models generated in dry/run for comparison or other study. For that, you must set `"purge_old_models"` to `True` in the config. +FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse models generated in dry/run for comparison or other study. For that, you must set `"purge_old_models"` to `False` in the config. The `--timerange` parameter must not be informed, as it will be automatically calculated through the training end dates of the models. diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index c7fae7770..d5427c4a5 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1519,15 +1519,13 @@ class FreqaiDataKitchen: pair_path = pair.split(":")[0].replace("/", "_").lower() file_name = f"live_backtesting_{pair_path}.feather" - path_to_live_backtesting_file = Path(self.full_path / - self.backtesting_live_model_folder_path / - file_name) - path_to_live_backtesting_bkp_file = Path(self.full_path / - self.backtesting_live_model_folder_path / - file_name.replace(".feather", ".backup.feather")) - - self.backtesting_live_model_path = path_to_live_backtesting_file - self.backtesting_live_model_bkp_path = path_to_live_backtesting_bkp_file + self.backtesting_live_model_path = Path(self.full_path / + self.backtesting_live_model_folder_path / + file_name) + self.backtesting_live_model_bkp_path = Path( + self.full_path / + self.backtesting_live_model_folder_path / + file_name.replace(".feather", ".backup.feather")) def save_backtesting_live_dataframe( self, dataframe: DataFrame, pair: str @@ -1566,15 +1564,12 @@ class FreqaiDataKitchen: return saved_dataframe else: raise OperationalException( - "Saved pair file not found" + "Saved live backtesting dataframe file not found." ) - def get_timerange_from_backtesting_live_dataframe( - self) -> TimeRange: + def get_timerange_from_backtesting_live_dataframe(self) -> TimeRange: """ - Returns timerange information based on a FreqAI model directory - :param models_path: FreqAI model path - + Returns timerange information based on live backtesting dataframe file :return: timerange calculated from saved live data """ all_assets_start_dates = [] @@ -1592,7 +1587,7 @@ class FreqaiDataKitchen: all_assets_start_dates.append(saved_dataframe.date.min()) all_assets_end_dates.append(saved_dataframe.date.max()) start_date = min(all_assets_start_dates) - end_date = min(all_assets_end_dates) + end_date = max(all_assets_end_dates) # add 1 day to string timerange to ensure BT module will load all dataframe data end_date = end_date + timedelta(days=1) backtesting_timerange = TimeRange( diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index cc6cd3c9b..8d84d70c5 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -334,8 +334,6 @@ class IFreqaiModel(ABC): FreqaiDataKitchen = Data management/analysis tool associated to present pair only """ pair = metadata["pair"] - dk.return_dataframe = dataframe - dk.return_dataframe = dataframe self.dk.set_backtesting_live_dataframe_path(pair) saved_dataframe = self.dk.get_backtesting_live_dataframe() From 99bff9cbfa149b0c28b91c2736a472aad47c8633 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Thu, 17 Nov 2022 10:30:51 -0300 Subject: [PATCH 156/421] backtesting_from_live_saved_files - code refactoring --- freqtrade/freqai/data_kitchen.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index d5427c4a5..ed78cfee5 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1519,9 +1519,10 @@ class FreqaiDataKitchen: pair_path = pair.split(":")[0].replace("/", "_").lower() file_name = f"live_backtesting_{pair_path}.feather" - self.backtesting_live_model_path = Path(self.full_path / - self.backtesting_live_model_folder_path / - file_name) + self.backtesting_live_model_path = Path( + self.full_path / + self.backtesting_live_model_folder_path / + file_name) self.backtesting_live_model_bkp_path = Path( self.full_path / self.backtesting_live_model_folder_path / From 3903b04d3f42f4465d85165913e265c75ffc9f76 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Thu, 17 Nov 2022 15:20:07 -0300 Subject: [PATCH 157/421] save_live_data_backtest - added docs and tests --- docs/freqai-parameter-table.md | 1 + docs/freqai-running.md | 5 ++- freqtrade/freqai/data_kitchen.py | 10 +++-- freqtrade/freqai/freqai_interface.py | 10 ++--- tests/freqai/test_freqai_datakitchen.py | 44 ++++++++++++++++++++ tests/freqai/test_freqai_interface.py | 55 +++++++++++++++++++++++++ 6 files changed, 114 insertions(+), 11 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index c027a12b1..2961b1b8d 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -15,6 +15,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old.
**Datatype:** Positive integer.
Default: `0` (models never expire). | `purge_old_models` | Delete obsolete models.
**Datatype:** Boolean.
Default: `False` (all historic models remain on disk). | `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`.
**Datatype:** Boolean.
Default: `False` (no models are saved). +| `save_live_data_backtest` | Save live dataframe during dry/live runs to reuse in backtesting with [Backtest live models](freqai-running.md#backtest_live_models)) option. | `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)).
**Datatype:** Positive integer. | `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models.
**Datatype:** Boolean.
Default: `False`. | `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)).
**Datatype:** Boolean.
Default: `False`. diff --git a/docs/freqai-running.md b/docs/freqai-running.md index d2f9595be..4c90a4885 100644 --- a/docs/freqai-running.md +++ b/docs/freqai-running.md @@ -81,7 +81,10 @@ To save the models generated during a particular backtest so that you can start ### Backtest live models -FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse models generated in dry/run for comparison or other study. For that, you must set `"purge_old_models"` to `False` in the config. +FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study. For that, you have 2 options: + +1. Set `"save_live_data_backtest"` to `True` in the config. With this option, FreqAI will save the live dataframe for reuse in backtesting. This option requires less disk space and backtesting will run faster. +2. Set `"purge_old_models"` to `False` and `"save_live_data_backtest"` to `False` in the config. In this case, FreqAI will use the saved models to make the predictions in backtesting. This option requires more disk space and the backtest will have a longer execution time. The `--timerange` parameter must not be informed, as it will be automatically calculated through the training end dates of the models. diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index ed78cfee5..d93060568 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1541,14 +1541,16 @@ class FreqaiDataKitchen: if self.backtesting_live_model_path.is_file(): saved_dataframe = self.get_backtesting_live_dataframe() concat_dataframe = pd.concat([saved_dataframe, last_row_df]) - concat_dataframe.reset_index(drop=True).to_feather( - self.backtesting_live_model_path, compression_level=9, compression='lz4') + self.save_backtesting_live_dataframe_to_feather(concat_dataframe) else: - last_row_df.reset_index(drop=True).to_feather( - self.backtesting_live_model_path, compression_level=9, compression='lz4') + self.save_backtesting_live_dataframe_to_feather(last_row_df) shutil.copy(self.backtesting_live_model_path, self.backtesting_live_model_bkp_path) + def save_backtesting_live_dataframe_to_feather(self, dataframe: DataFrame): + dataframe.reset_index(drop=True).to_feather( + self.backtesting_live_model_path, compression_level=9, compression='lz4') + def get_backtesting_live_dataframe( self ) -> DataFrame: diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 8d84d70c5..a0dac5725 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -694,7 +694,8 @@ class IFreqaiModel(ABC): for label in full_labels: if self.dd.historic_predictions[dk.pair][label].dtype == object: continue - f = spy.stats.norm.fit(self.dd.historic_predictions[dk.pair][label].tail(num_candles)) + f = spy.stats.norm.fit( + self.dd.historic_predictions[dk.pair][label].fillna(0).tail(num_candles)) dk.data["labels_mean"][label], dk.data["labels_std"][label] = f[0], f[1] return @@ -882,11 +883,7 @@ class IFreqaiModel(ABC): if index >= fit_live_predictions_candles: self.dd.historic_predictions[self.dk.pair] = ( dk.full_df.iloc[index - fit_live_predictions_candles:index]) - else: - self.dd.historic_predictions[self.dk.pair] = dk.full_df.iloc[:index] - - self.fit_live_predictions(self.dk, self.dk.pair) - if index >= fit_live_predictions_candles: + self.fit_live_predictions(self.dk, self.dk.pair) for label in label_columns: if dk.full_df[label].dtype == object: continue @@ -899,6 +896,7 @@ class IFreqaiModel(ABC): for extra_col in self.dk.data["extra_returns_per_train"]: dk.full_df.at[index, f"{extra_col}"] = ( self.dk.data["extra_returns_per_train"][extra_col]) + return # Following methods which are overridden by user made prediction models. diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index 9abe60edb..ca7c19c94 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -259,3 +259,47 @@ def test_get_full_model_path(mocker, freqai_conf, model): model_path = freqai.dk.get_full_models_path(freqai_conf) assert model_path.is_dir() is True + + +def test_save_backtesting_live_dataframe(mocker, freqai_conf): + freqai, dataframe = make_unfiltered_dataframe(mocker, freqai_conf) + dataframe_without_last_candle = dataframe.copy() + dataframe_without_last_candle.drop(dataframe.tail(1).index, inplace=True) + freqai_conf.update({"save_live_data_backtest": True}) + freqai.dk.save_backtesting_live_dataframe(dataframe_without_last_candle, "ADA/BTC") + saved_dataframe = freqai.dk.get_backtesting_live_dataframe() + assert len(saved_dataframe) == 1 + assert saved_dataframe.iloc[-1, 0] == dataframe_without_last_candle.iloc[-1, 0] + freqai.dk.save_backtesting_live_dataframe(dataframe, "ADA/BTC") + saved_dataframe = freqai.dk.get_backtesting_live_dataframe() + assert len(saved_dataframe) == 2 + assert saved_dataframe.iloc[-1, 0] == dataframe.iloc[-1, 0] + assert saved_dataframe.iloc[-2, 0] == dataframe.iloc[-2, 0] + + +def test_get_timerange_from_backtesting_live_dataframe(mocker, freqai_conf): + freqai, dataframe = make_unfiltered_dataframe(mocker, freqai_conf) + freqai_conf.update({"save_live_data_backtest": True}) + freqai.dk.set_backtesting_live_dataframe_path("ADA/BTC") + freqai.dk.save_backtesting_live_dataframe_to_feather(dataframe) + timerange = freqai.dk.get_timerange_from_backtesting_live_dataframe() + assert timerange.startts == 1516406400 + assert timerange.stopts == 1517356500 + + +def test_get_timerange_from_backtesting_live_dataframe_folder_not_found(mocker, freqai_conf): + freqai, _ = make_unfiltered_dataframe(mocker, freqai_conf) + with pytest.raises( + OperationalException, + match=r'Saved live data not found.*' + ): + freqai.dk.get_timerange_from_backtesting_live_dataframe() + + +def test_saved_live_bt_file_not_found(mocker, freqai_conf): + freqai, _ = make_unfiltered_dataframe(mocker, freqai_conf) + with pytest.raises( + OperationalException, + match=r'.*live backtesting dataframe file not found.*' + ): + freqai.dk.get_backtesting_live_dataframe() diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 25bc99580..ed634de55 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -300,6 +300,61 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): shutil.rmtree(Path(freqai.dk.full_path)) +def test_start_backtesting_from_saved_live_dataframe(mocker, freqai_conf, caplog): + freqai_conf.update({"save_live_data_backtest": True}) + freqai_conf.update({"freqai_backtest_live_models": True}) + + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange) + strategy.freqai_info = freqai_conf.get("freqai", {}) + freqai = strategy.freqai + freqai.live = False + freqai.dk = FreqaiDataKitchen(freqai_conf) + timerange = TimeRange.parse_timerange("20180110-20180130") + freqai.dd.load_all_pair_histories(timerange, freqai.dk) + sub_timerange = TimeRange.parse_timerange("20180110-20180130") + corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk) + df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") + metadata = {"pair": "ADA/BTC"} + + # create a dummy live dataframe file with 10 rows + dataframe_predictions = df.tail(10).copy() + dataframe_predictions["&s_close"] = dataframe_predictions["close"] * 1.1 + freqai.dk.set_backtesting_live_dataframe_path("ADA/BTC") + freqai.dk.save_backtesting_live_dataframe_to_feather(dataframe_predictions) + + freqai.start_backtesting_from_live_saved_files(df, metadata, freqai.dk) + assert len(freqai.dk.return_dataframe) == len(df) + assert len(freqai.dk.return_dataframe[freqai.dk.return_dataframe["&s_close"] > 0]) == ( + len(dataframe_predictions)) + shutil.rmtree(Path(freqai.dk.full_path)) + + +def test_backtesting_fit_live_predictions(mocker, freqai_conf, caplog): + freqai_conf.get("freqai", {}).update({"fit_live_predictions_candles": 10}) + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange) + strategy.freqai_info = freqai_conf.get("freqai", {}) + freqai = strategy.freqai + freqai.live = False + freqai.dk = FreqaiDataKitchen(freqai_conf) + timerange = TimeRange.parse_timerange("20180128-20180130") + freqai.dd.load_all_pair_histories(timerange, freqai.dk) + sub_timerange = TimeRange.parse_timerange("20180129-20180130") + corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk) + df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") + freqai.dk.pair = "ADA/BTC" + freqai.dk.full_df = df + assert "&-s_close_mean" not in freqai.dk.full_df.columns + assert "&-s_close_std" not in freqai.dk.full_df.columns + freqai.backtesting_fit_live_predictions(freqai.dk) + assert "&-s_close_mean" in freqai.dk.full_df.columns + assert "&-s_close_std" in freqai.dk.full_df.columns + shutil.rmtree(Path(freqai.dk.full_path)) + + def test_follow_mode(mocker, freqai_conf): freqai_conf.update({"timerange": "20180110-20180130"}) From 60fcd8dce22024ea5cff3b48a5b17ff33bfc723e Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 17 Nov 2022 21:50:02 +0100 Subject: [PATCH 158/421] fix skipped mac test, fix RL bug in add_state_info, fix use of __import__, revise doc --- docs/freqai-reinforcement-learning.md | 2 +- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 6 +++--- tests/freqai/test_freqai_interface.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index c4e70130b..b96c591de 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -20,7 +20,7 @@ With the current framework, we aim to expose the training environment via the co We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configruation settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. -The framework is built on stable_baselines3 (torch) and openai gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework (albeit with some basic assistance from core-dev). For the environment, it is inheriting from `gym.env` which means that a user would need to write an entirely new environment if they wish to switch to a different library. +The framework is built on stable_baselines3 (torch) and openai gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library. ## Running Reinforcement Learning diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index d0ddce294..629633814 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -1,3 +1,4 @@ +import importlib import logging from abc import abstractmethod from datetime import datetime, timezone @@ -58,8 +59,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): f'sb3_contrib. please choose one of {SB3_MODELS} or ' f'{SB3_CONTRIB_MODELS}') - mod = __import__(import_str, fromlist=[ - self.model_type]) + mod = importlib.import_module(import_str, self.model_type) self.MODELCLASS = getattr(mod, self.model_type) self.policy_type = self.freqai_info['rl_config']['policy_type'] self.unset_outlier_removal() @@ -236,7 +236,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def _predict(window): observations = dataframe.iloc[window.index] - if self.live and self.rl_config('add_state_info', False): + if self.live and self.rl_config.get('add_state_info', False): market_side, current_profit, trade_duration = self.get_state_info(dk.pair) observations['current_profit_pct'] = current_profit observations['position'] = market_side diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 08f33add9..3415c75ca 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -196,7 +196,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog) if is_arm() and "Catboost" in model: pytest.skip("CatBoost is not supported on ARM") - if is_mac(): + if is_mac() and 'Reinforcement' in model: pytest.skip("Reinforcement learning module not available on intel based Mac OS") Trade.use_db = False From 61a859ba4c8462c4ae7785063f6ed1014e598764 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 18 Nov 2022 17:30:03 +0100 Subject: [PATCH 159/421] remove tensorboard req from rl reqs --- requirements-freqai-rl.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index 22e077241..b6bd7ef15 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -6,4 +6,3 @@ torch==1.12.1 stable-baselines3==1.6.1 gym==0.21 sb3-contrib==1.6.1 -tensorboard==2.10.1 From 0cb6f71c026bd2f771a862c43c5b2c744a64264e Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 18 Nov 2022 13:32:27 -0700 Subject: [PATCH 160/421] better error handling, true async sending, more readable api --- freqtrade/rpc/api_server/api_ws.py | 66 +++----------- freqtrade/rpc/api_server/webserver.py | 1 + freqtrade/rpc/api_server/ws/channel.py | 89 +++++++++++++++---- freqtrade/rpc/api_server/ws/message_stream.py | 3 +- 4 files changed, 88 insertions(+), 71 deletions(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 01243b0cc..2454646ea 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -1,16 +1,14 @@ -import asyncio import logging from typing import Any, Dict from fastapi import APIRouter, Depends -from fastapi.websockets import WebSocket, WebSocketDisconnect +from fastapi.websockets import WebSocket from pydantic import ValidationError -from websockets.exceptions import ConnectionClosed from freqtrade.enums import RPCMessageType, RPCRequestType from freqtrade.rpc.api_server.api_auth import validate_ws_token from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc -from freqtrade.rpc.api_server.ws import WebSocketChannel +from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel from freqtrade.rpc.api_server.ws.message_stream import MessageStream from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSMessageSchema, WSRequestSchema, WSWhitelistMessage) @@ -23,45 +21,20 @@ logger = logging.getLogger(__name__) router = APIRouter() -class WebSocketChannelClosed(Exception): - """ - General WebSocket exception to signal closing the channel - """ - pass - - async def channel_reader(channel: WebSocketChannel, rpc: RPC): """ Iterate over the messages from the channel and process the request """ - try: - async for message in channel: - await _process_consumer_request(message, channel, rpc) - except ( - RuntimeError, - WebSocketDisconnect, - ConnectionClosed - ): - raise WebSocketChannelClosed - except asyncio.CancelledError: - return + async for message in channel: + await _process_consumer_request(message, channel, rpc) async def channel_broadcaster(channel: WebSocketChannel, message_stream: MessageStream): """ Iterate over messages in the message stream and send them """ - try: - async for message in message_stream: - await channel.send(message) - except ( - RuntimeError, - WebSocketDisconnect, - ConnectionClosed - ): - raise WebSocketChannelClosed - except asyncio.CancelledError: - return + async for message in message_stream: + await channel.send(message) async def _process_consumer_request( @@ -103,15 +76,11 @@ async def _process_consumer_request( # Format response response = WSWhitelistMessage(data=whitelist) - # Send it back await channel.send(response.dict(exclude_none=True)) elif type == RPCRequestType.ANALYZED_DF: - limit = None - - if data: - # Limit the amount of candles per dataframe to 'limit' or 1500 - limit = max(data.get('limit', 1500), 1500) + # Limit the amount of candles per dataframe to 'limit' or 1500 + limit = min(data.get('limit', 1500), 1500) if data else None # For every pair in the generator, send a separate message for message in rpc._ws_request_analyzed_df(limit): @@ -127,17 +96,8 @@ async def message_endpoint( rpc: RPC = Depends(get_rpc), message_stream: MessageStream = Depends(get_message_stream) ): - async with WebSocketChannel(websocket).connect() as channel: - try: - logger.info(f"Channel connected - {channel}") - - channel_tasks = asyncio.gather( - channel_reader(channel, rpc), - channel_broadcaster(channel, message_stream) - ) - await channel_tasks - except WebSocketChannelClosed: - pass - finally: - logger.info(f"Channel disconnected - {channel}") - channel_tasks.cancel() + async with create_channel(websocket) as channel: + await channel.run_channel_tasks( + channel_reader(channel, rpc), + channel_broadcaster(channel, message_stream) + ) diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index f100a46ef..4a9f089d1 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -94,6 +94,7 @@ class ApiServer(RPCHandler): del ApiServer._rpc if self._server and not self._standalone: logger.info("Stopping API Server") + # self._server.force_exit, self._server.should_exit = True, True self._server.cleanup() @classmethod diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 8e248d368..d4d4d6453 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -29,6 +29,7 @@ class WebSocketChannel: # Internal event to signify a closed websocket self._closed = asyncio.Event() + self._send_timeout_high_limit = 2 # The subscribed message types self._subscriptions: List[str] = [] @@ -36,6 +37,9 @@ class WebSocketChannel: # Wrap the WebSocket in the Serializing class self._wrapped_ws = serializer_cls(self._websocket) + # The async tasks created for the channel + self._channel_tasks: List[asyncio.Task] = [] + def __repr__(self): return f"WebSocketChannel({self.channel_id}, {self.remote_addr})" @@ -51,7 +55,14 @@ class WebSocketChannel: """ Send a message on the wrapped websocket """ - await self._wrapped_ws.send(message) + + # Without this sleep, messages would send to one channel + # first then another after the first one finished. + # With the sleep call, it gives control to the event + # loop to schedule other channel send methods. + await asyncio.sleep(0) + + return await self._wrapped_ws.send(message) async def recv(self): """ @@ -77,7 +88,6 @@ class WebSocketChannel: """ self._closed.set() - self._relay_task.cancel() try: await self._websocket.close() @@ -106,23 +116,68 @@ class WebSocketChannel: """ return message_type in self._subscriptions + async def run_channel_tasks(self, *tasks, **kwargs): + """ + Create and await on the channel tasks unless an exception + was raised, then cancel them all. + + :params *tasks: All coros or tasks to be run concurrently + :param **kwargs: Any extra kwargs to pass to gather + """ + + # Wrap the coros into tasks if they aren't already + self._channel_tasks = [ + task if isinstance(task, asyncio.Task) else asyncio.create_task(task) + for task in tasks + ] + + try: + await asyncio.gather(*self._channel_tasks, **kwargs) + except Exception: + # If an exception occurred, cancel the rest of the tasks and bubble up + # the error that was caught here + await self.cancel_channel_tasks() + raise + + async def cancel_channel_tasks(self): + """ + Cancel and wait on all channel tasks + """ + for task in self._channel_tasks: + task.cancel() + + # Wait for tasks to finish cancelling + try: + await asyncio.wait(self._channel_tasks) + except asyncio.CancelledError: + pass + + self._channel_tasks = [] + async def __aiter__(self): """ Generator for received messages """ - while True: - try: - yield await self.recv() - except Exception: - break + # We can not catch any errors here as websocket.recv is + # the first to catch any disconnects and bubble it up + # so the connection is garbage collected right away + while not self.is_closed(): + yield await self.recv() - @asynccontextmanager - async def connect(self): - """ - Context manager for safely opening and closing the websocket connection - """ - try: - await self.accept() - yield self - finally: - await self.close() + +@asynccontextmanager +async def create_channel(websocket: WebSocketType, **kwargs): + """ + Context manager for safely opening and closing a WebSocketChannel + """ + channel = WebSocketChannel(websocket, **kwargs) + try: + await channel.accept() + logger.info(f"Connected to channel - {channel}") + + yield channel + except Exception: + pass + finally: + await channel.close() + logger.info(f"Disconnected from channel - {channel}") diff --git a/freqtrade/rpc/api_server/ws/message_stream.py b/freqtrade/rpc/api_server/ws/message_stream.py index f77242719..9592908ab 100644 --- a/freqtrade/rpc/api_server/ws/message_stream.py +++ b/freqtrade/rpc/api_server/ws/message_stream.py @@ -17,7 +17,8 @@ class MessageStream: async def subscribe(self): waiter = self._waiter while True: - message, waiter = await waiter + # Shield the future from being cancelled by a task waiting on it + message, waiter = await asyncio.shield(waiter) yield message __aiter__ = subscribe From d02da279f8d76bcbd4042e473a1d8d199355b266 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 19 Nov 2022 13:20:20 +0100 Subject: [PATCH 161/421] document the simplifications of the training environment --- docs/freqai-reinforcement-learning.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index b96c591de..bd2b36463 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -23,6 +23,11 @@ We envision the majority of users focusing their effort on creative design of th The framework is built on stable_baselines3 (torch) and openai gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library. +### Important considerations + +As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, ect) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. + + ## Running Reinforcement Learning Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line: From 80d070e9eed2a05980818af817594c6cae0b0f9a Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Sat, 19 Nov 2022 14:15:58 -0300 Subject: [PATCH 162/421] update code to use historic_predictions for freqai_backtest_live_models --- docs/freqai-parameter-table.md | 2 +- docs/freqai-running.md | 4 +- freqtrade/freqai/data_drawer.py | 21 +++++ freqtrade/freqai/data_kitchen.py | 114 +++++------------------- freqtrade/freqai/freqai_interface.py | 53 +++++++---- freqtrade/freqai/utils.py | 2 +- tests/freqai/test_freqai_datakitchen.py | 33 +------ tests/freqai/test_freqai_interface.py | 31 ------- 8 files changed, 86 insertions(+), 174 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 2961b1b8d..059d56a1f 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -15,7 +15,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old.
**Datatype:** Positive integer.
Default: `0` (models never expire). | `purge_old_models` | Delete obsolete models.
**Datatype:** Boolean.
Default: `False` (all historic models remain on disk). | `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`.
**Datatype:** Boolean.
Default: `False` (no models are saved). -| `save_live_data_backtest` | Save live dataframe during dry/live runs to reuse in backtesting with [Backtest live models](freqai-running.md#backtest_live_models)) option. +| `backtest_using_historic_predictions` | Reuse `historic_predictions` in backtesting with [Backtest live models](freqai-running.md#backtest_live_models)) option.
Default: `True` | `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)).
**Datatype:** Positive integer. | `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models.
**Datatype:** Boolean.
Default: `False`. | `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)).
**Datatype:** Boolean.
Default: `False`. diff --git a/docs/freqai-running.md b/docs/freqai-running.md index 4c90a4885..d777b180e 100644 --- a/docs/freqai-running.md +++ b/docs/freqai-running.md @@ -83,8 +83,8 @@ To save the models generated during a particular backtest so that you can start FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study. For that, you have 2 options: -1. Set `"save_live_data_backtest"` to `True` in the config. With this option, FreqAI will save the live dataframe for reuse in backtesting. This option requires less disk space and backtesting will run faster. -2. Set `"purge_old_models"` to `False` and `"save_live_data_backtest"` to `False` in the config. In this case, FreqAI will use the saved models to make the predictions in backtesting. This option requires more disk space and the backtest will have a longer execution time. +1. Set `"backtest_using_historic_predictions"` to `True` in the config. With this option, FreqAI will reuse `historic_predictions` in backtesting. This option requires less disk space and backtesting will run faster. +2. Set `"purge_old_models"` to `False` and `"backtest_using_historic_predictions"` to `False` in the config. In this case, FreqAI will use the saved models to make the predictions in backtesting. This option requires more disk space and the backtest will have a longer execution time. The `--timerange` parameter must not be informed, as it will be automatically calculated through the training end dates of the models. diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 038ddaf2e..e83b05aaa 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -81,6 +81,7 @@ class FreqaiDataDrawer: self.historic_predictions_bkp_path = Path( self.full_path / "historic_predictions.backup.pkl") self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json") + self.global_metadata_path = Path(self.full_path / "global_metadata.json") self.metric_tracker_path = Path(self.full_path / "metric_tracker.json") self.follow_mode = follow_mode if follow_mode: @@ -125,6 +126,17 @@ class FreqaiDataDrawer: self.update_metric_tracker('cpu_load5min', load5 / cpus, pair) self.update_metric_tracker('cpu_load15min', load15 / cpus, pair) + def load_global_metadata_from_disk(self): + """ + Locate and load a previously saved global metadata in present model folder. + """ + exists = self.global_metadata_path.is_file() + if exists: + with open(self.global_metadata_path, "r") as fp: + metatada_dict = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE) + return metatada_dict + return {} + def load_drawer_from_disk(self): """ Locate and load a previously saved data drawer full of all pair model metadata in @@ -225,6 +237,15 @@ class FreqaiDataDrawer: rapidjson.dump(self.follower_dict, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE) + def save_global_metadata_to_disk(self, metadata: Dict[str, Any]): + """ + Save global metadata json to disk + """ + with self.save_lock: + with open(self.global_metadata_path, 'w') as fp: + rapidjson.dump(metadata, fp, default=self.np_encoder, + number_mode=rapidjson.NM_NATIVE) + def create_follower_dict(self): """ Create or dictionary for each follower to maintain unique persistent prediction targets diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index be2fb68b1..641c95725 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -9,7 +9,7 @@ from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt import pandas as pd -from pandas import DataFrame, read_feather +from pandas import DataFrame from scipy import stats from sklearn import linear_model from sklearn.cluster import DBSCAN @@ -74,9 +74,6 @@ class FreqaiDataKitchen: self.training_features_list: List = [] self.model_filename: str = "" self.backtesting_results_path = Path() - self.backtesting_live_model_folder_path = Path() - self.backtesting_live_model_path = Path() - self.backtesting_live_model_bkp_path = Path() self.backtest_predictions_folder: str = "backtesting_predictions" self.live = live self.pair = pair @@ -90,7 +87,9 @@ class FreqaiDataKitchen: self.full_path = self.get_full_models_path(self.config) if self.backtest_live_models: - if self.pair: + if self.pair and not ( + self.freqai_config.get("backtest_using_historic_predictions", True) + ): self.set_timerange_from_ready_models() (self.training_timeranges, self.backtesting_timeranges) = self.split_timerange_live_models() @@ -1488,101 +1487,30 @@ class FreqaiDataKitchen: return dataframe - def set_backtesting_live_dataframe_folder_path( - self - ) -> None: - """ - Set live backtesting dataframe path - :param pair: current pair - """ - self.backtesting_live_model_folder_path = Path( - self.full_path / self.backtest_predictions_folder / "live_data") - - def set_backtesting_live_dataframe_path( - self, pair: str - ) -> None: - """ - Set live backtesting dataframe path - :param pair: current pair - """ - self.set_backtesting_live_dataframe_folder_path() - if not self.backtesting_live_model_folder_path.is_dir(): - self.backtesting_live_model_folder_path.mkdir(parents=True, exist_ok=True) - - pair_path = pair.split(":")[0].replace("/", "_").lower() - file_name = f"live_backtesting_{pair_path}.feather" - self.backtesting_live_model_path = Path( - self.full_path / - self.backtesting_live_model_folder_path / - file_name) - self.backtesting_live_model_bkp_path = Path( - self.full_path / - self.backtesting_live_model_folder_path / - file_name.replace(".feather", ".backup.feather")) - - def save_backtesting_live_dataframe( - self, dataframe: DataFrame, pair: str - ) -> None: - """ - Save live backtesting dataframe to feather file format - :param dataframe: current live dataframe - :param pair: current pair - """ - self.set_backtesting_live_dataframe_path(pair) - last_row_df = dataframe.tail(1) - if self.backtesting_live_model_path.is_file(): - saved_dataframe = self.get_backtesting_live_dataframe() - concat_dataframe = pd.concat([saved_dataframe, last_row_df]) - self.save_backtesting_live_dataframe_to_feather(concat_dataframe) - else: - self.save_backtesting_live_dataframe_to_feather(last_row_df) - - shutil.copy(self.backtesting_live_model_path, self.backtesting_live_model_bkp_path) - - def save_backtesting_live_dataframe_to_feather(self, dataframe: DataFrame): - dataframe.reset_index(drop=True).to_feather( - self.backtesting_live_model_path, compression_level=9, compression='lz4') - - def get_backtesting_live_dataframe( - self - ) -> DataFrame: - """ - Get live backtesting dataframe from feather file format - return: saved dataframe from previous dry/run or live - """ - if self.backtesting_live_model_path.is_file(): - saved_dataframe = DataFrame() - try: - saved_dataframe = read_feather(self.backtesting_live_model_path) - except Exception: - saved_dataframe = read_feather(self.backtesting_live_model_bkp_path) - return saved_dataframe - else: - raise OperationalException( - "Saved live backtesting dataframe file not found." - ) - def get_timerange_from_backtesting_live_dataframe(self) -> TimeRange: """ - Returns timerange information based on live backtesting dataframe file + Returns timerange information based on historic predictions file :return: timerange calculated from saved live data """ - all_assets_start_dates = [] - all_assets_end_dates = [] - self.set_backtesting_live_dataframe_folder_path() - if not self.backtesting_live_model_folder_path.is_dir(): + from freqtrade.freqai.data_drawer import FreqaiDataDrawer + dd = FreqaiDataDrawer(Path(self.full_path), self.config) + if not dd.historic_predictions_path.is_file(): raise OperationalException( - 'Saved live data not found. Saved lived data is required ' + 'Historic predictions not found. Historic predictions data is required ' 'to run backtest with the freqai-backtest-live-models option ' - 'and save_live_data_backtest config option as true' + 'and backtest_using_historic_predictions config option as true' ) - for file_in_dir in self.backtesting_live_model_folder_path.iterdir(): - if file_in_dir.is_file() and "backup" not in file_in_dir.name: - saved_dataframe = read_feather(file_in_dir) - all_assets_start_dates.append(saved_dataframe.date.min()) - all_assets_end_dates.append(saved_dataframe.date.max()) - start_date = min(all_assets_start_dates) - end_date = max(all_assets_end_dates) + + dd.load_historic_predictions_from_disk() + + all_pairs_end_dates = [] + for pair in dd.historic_predictions: + pair_historic_data = dd.historic_predictions[pair] + all_pairs_end_dates.append(pair_historic_data.date_pred.max()) + + global_metadata = dd.load_global_metadata_from_disk() + start_date = datetime.fromtimestamp(int(global_metadata["start_dry_live_date"])) + end_date = max(all_pairs_end_dates) # add 1 day to string timerange to ensure BT module will load all dataframe data end_date = end_date + timedelta(days=1) backtesting_timerange = TimeRange( diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index c48758df4..473fe939f 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -53,6 +53,7 @@ class IFreqaiModel(ABC): def __init__(self, config: Config) -> None: self.config = config + self.metadata: Dict[str, Any] = {} self.assert_config(self.config) self.freqai_info: Dict[str, Any] = config["freqai"] self.data_split_parameters: Dict[str, Any] = config.get("freqai", {}).get( @@ -67,10 +68,10 @@ class IFreqaiModel(ABC): self.save_backtest_models: bool = self.freqai_info.get("save_backtest_models", True) if self.save_backtest_models: logger.info('Backtesting module configured to save all models.') - self.save_live_data_backtest: bool = self.freqai_info.get( - "save_live_data_backtest", False) - if self.save_live_data_backtest: - logger.info('Live configured to save data for backtest.') + self.backtest_using_historic_predictions: bool = self.freqai_info.get( + "backtest_using_historic_predictions", True) + if self.backtest_using_historic_predictions: + logger.info('Backtesting live models configured to use historic predictions.') self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode) # set current candle to arbitrary historical date @@ -103,6 +104,7 @@ class IFreqaiModel(ABC): self.get_corr_dataframes: bool = True self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() + self.metadata = self.dd.load_global_metadata_from_disk() record_params(config, self.full_path) @@ -136,6 +138,7 @@ class IFreqaiModel(ABC): self.inference_timer('start') self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"]) dk = self.start_live(dataframe, metadata, strategy, self.dk) + dataframe = dk.remove_features_from_df(dk.return_dataframe) # For backtesting, each pair enters and then gets trained for each window along the # sliding window defined by "train_period_days" (training window) and "live_retrain_hours" @@ -145,14 +148,19 @@ class IFreqaiModel(ABC): elif not self.follow_mode: self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"]) if self.dk.backtest_live_models: - logger.info( - f"Backtesting {len(self.dk.backtesting_timeranges)} timeranges (live models)") + if self.backtest_using_historic_predictions: + logger.info( + "Backtesting using historic predictions (live models)") + else: + logger.info( + f"Backtesting {len(self.dk.backtesting_timeranges)} " + "timeranges (live models)") else: logger.info(f"Training {len(self.dk.training_timeranges)} timeranges") dataframe = self.dk.use_strategy_to_populate_indicators( strategy, prediction_dataframe=dataframe, pair=metadata["pair"] ) - if not self.save_live_data_backtest: + if not self.backtest_using_historic_predictions: dk = self.start_backtesting(dataframe, metadata, self.dk) dataframe = dk.remove_features_from_df(dk.return_dataframe) else: @@ -163,8 +171,7 @@ class IFreqaiModel(ABC): self.clean_up() if self.live: self.inference_timer('stop', metadata["pair"]) - if self.save_live_data_backtest: - dk.save_backtesting_live_dataframe(dataframe, metadata["pair"]) + self.set_start_dry_live_date(dataframe) return dataframe @@ -335,14 +342,12 @@ class IFreqaiModel(ABC): """ pair = metadata["pair"] dk.return_dataframe = dataframe - self.dk.set_backtesting_live_dataframe_path(pair) - saved_dataframe = self.dk.get_backtesting_live_dataframe() - columns_to_drop = list(set(dk.return_dataframe.columns).difference( - ["date", "open", "high", "low", "close", "volume"])) - saved_dataframe = saved_dataframe.drop( - columns=["open", "high", "low", "close", "volume"]) + saved_dataframe = self.dd.historic_predictions[pair] + columns_to_drop = list(set(saved_dataframe.columns).intersection( + dk.return_dataframe.columns)) dk.return_dataframe = dk.return_dataframe.drop(columns=list(columns_to_drop)) - dk.return_dataframe = pd.merge(dk.return_dataframe, saved_dataframe, how='left', on='date') + dk.return_dataframe = pd.merge( + dk.return_dataframe, saved_dataframe, how='left', left_on='date', right_on="date_pred") # dk.return_dataframe = dk.return_dataframe[saved_dataframe.columns].fillna(0) return dk @@ -886,6 +891,22 @@ class IFreqaiModel(ABC): return + def update_metadata(self, metadata: Dict[str, Any]): + """ + Update global metadata and save the updated json file + :param metadata: new global metadata dict + """ + self.dd.save_global_metadata_to_disk(metadata) + self.metadata = metadata + + def set_start_dry_live_date(self, live_dataframe: DataFrame): + key_name = "start_dry_live_date" + if key_name not in self.metadata: + metadata = self.metadata + metadata[key_name] = int( + pd.to_datetime(live_dataframe.tail(1)["date"].values[0]).timestamp()) + self.update_metadata(metadata) + # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example. diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index c9efe6a3c..f42a87be7 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -230,7 +230,7 @@ def get_timerange_backtest_live_models(config: Config) -> str: dk = FreqaiDataKitchen(config) models_path = dk.get_full_models_path(config) timerange: TimeRange = TimeRange() - if not config.get("save_live_data_backtest", False): + if not config.get("freqai", {}).get("backtest_using_historic_predictions", True): timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) else: timerange = dk.get_timerange_from_backtesting_live_dataframe() diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index ca7c19c94..2dbbd7ef5 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -261,45 +261,18 @@ def test_get_full_model_path(mocker, freqai_conf, model): assert model_path.is_dir() is True -def test_save_backtesting_live_dataframe(mocker, freqai_conf): - freqai, dataframe = make_unfiltered_dataframe(mocker, freqai_conf) - dataframe_without_last_candle = dataframe.copy() - dataframe_without_last_candle.drop(dataframe.tail(1).index, inplace=True) - freqai_conf.update({"save_live_data_backtest": True}) - freqai.dk.save_backtesting_live_dataframe(dataframe_without_last_candle, "ADA/BTC") - saved_dataframe = freqai.dk.get_backtesting_live_dataframe() - assert len(saved_dataframe) == 1 - assert saved_dataframe.iloc[-1, 0] == dataframe_without_last_candle.iloc[-1, 0] - freqai.dk.save_backtesting_live_dataframe(dataframe, "ADA/BTC") - saved_dataframe = freqai.dk.get_backtesting_live_dataframe() - assert len(saved_dataframe) == 2 - assert saved_dataframe.iloc[-1, 0] == dataframe.iloc[-1, 0] - assert saved_dataframe.iloc[-2, 0] == dataframe.iloc[-2, 0] - - def test_get_timerange_from_backtesting_live_dataframe(mocker, freqai_conf): freqai, dataframe = make_unfiltered_dataframe(mocker, freqai_conf) - freqai_conf.update({"save_live_data_backtest": True}) - freqai.dk.set_backtesting_live_dataframe_path("ADA/BTC") - freqai.dk.save_backtesting_live_dataframe_to_feather(dataframe) + freqai_conf.update({"backtest_using_historic_predictions": True}) timerange = freqai.dk.get_timerange_from_backtesting_live_dataframe() assert timerange.startts == 1516406400 assert timerange.stopts == 1517356500 -def test_get_timerange_from_backtesting_live_dataframe_folder_not_found(mocker, freqai_conf): +def test_get_timerange_from_backtesting_live_df_pred_not_found(mocker, freqai_conf): freqai, _ = make_unfiltered_dataframe(mocker, freqai_conf) with pytest.raises( OperationalException, - match=r'Saved live data not found.*' + match=r'Historic predictions not found.*' ): freqai.dk.get_timerange_from_backtesting_live_dataframe() - - -def test_saved_live_bt_file_not_found(mocker, freqai_conf): - freqai, _ = make_unfiltered_dataframe(mocker, freqai_conf) - with pytest.raises( - OperationalException, - match=r'.*live backtesting dataframe file not found.*' - ): - freqai.dk.get_backtesting_live_dataframe() diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index ed634de55..66b3bac17 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -300,37 +300,6 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): shutil.rmtree(Path(freqai.dk.full_path)) -def test_start_backtesting_from_saved_live_dataframe(mocker, freqai_conf, caplog): - freqai_conf.update({"save_live_data_backtest": True}) - freqai_conf.update({"freqai_backtest_live_models": True}) - - strategy = get_patched_freqai_strategy(mocker, freqai_conf) - exchange = get_patched_exchange(mocker, freqai_conf) - strategy.dp = DataProvider(freqai_conf, exchange) - strategy.freqai_info = freqai_conf.get("freqai", {}) - freqai = strategy.freqai - freqai.live = False - freqai.dk = FreqaiDataKitchen(freqai_conf) - timerange = TimeRange.parse_timerange("20180110-20180130") - freqai.dd.load_all_pair_histories(timerange, freqai.dk) - sub_timerange = TimeRange.parse_timerange("20180110-20180130") - corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk) - df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") - metadata = {"pair": "ADA/BTC"} - - # create a dummy live dataframe file with 10 rows - dataframe_predictions = df.tail(10).copy() - dataframe_predictions["&s_close"] = dataframe_predictions["close"] * 1.1 - freqai.dk.set_backtesting_live_dataframe_path("ADA/BTC") - freqai.dk.save_backtesting_live_dataframe_to_feather(dataframe_predictions) - - freqai.start_backtesting_from_live_saved_files(df, metadata, freqai.dk) - assert len(freqai.dk.return_dataframe) == len(df) - assert len(freqai.dk.return_dataframe[freqai.dk.return_dataframe["&s_close"] > 0]) == ( - len(dataframe_predictions)) - shutil.rmtree(Path(freqai.dk.full_path)) - - def test_backtesting_fit_live_predictions(mocker, freqai_conf, caplog): freqai_conf.get("freqai", {}).update({"fit_live_predictions_candles": 10}) strategy = get_patched_freqai_strategy(mocker, freqai_conf) From c1a73a551225424591891c8bb15491de85a79a36 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sat, 19 Nov 2022 13:21:26 -0700 Subject: [PATCH 163/421] move sleep call in send, minor cleanup --- freqtrade/rpc/api_server/ws/channel.py | 20 +++++++++----------- freqtrade/rpc/api_server/ws/serializer.py | 1 - 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index d4d4d6453..7a1191d62 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -55,14 +55,16 @@ class WebSocketChannel: """ Send a message on the wrapped websocket """ + await self._wrapped_ws.send(message) # Without this sleep, messages would send to one channel - # first then another after the first one finished. + # first then another after the first one finished and prevent + # any normal Rest API calls from processing at the same time. # With the sleep call, it gives control to the event - # loop to schedule other channel send methods. - await asyncio.sleep(0) - - return await self._wrapped_ws.send(message) + # loop to schedule other channel send methods, and helps + # throttle how fast we send. + # 0.01 = 100 messages/second max throughput + await asyncio.sleep(0.01) async def recv(self): """ @@ -132,12 +134,10 @@ class WebSocketChannel: ] try: - await asyncio.gather(*self._channel_tasks, **kwargs) + return await asyncio.gather(*self._channel_tasks, **kwargs) except Exception: - # If an exception occurred, cancel the rest of the tasks and bubble up - # the error that was caught here + # If an exception occurred, cancel the rest of the tasks await self.cancel_channel_tasks() - raise async def cancel_channel_tasks(self): """ @@ -176,8 +176,6 @@ async def create_channel(websocket: WebSocketType, **kwargs): logger.info(f"Connected to channel - {channel}") yield channel - except Exception: - pass finally: await channel.close() logger.info(f"Disconnected from channel - {channel}") diff --git a/freqtrade/rpc/api_server/ws/serializer.py b/freqtrade/rpc/api_server/ws/serializer.py index 625a0990c..9a894e1bf 100644 --- a/freqtrade/rpc/api_server/ws/serializer.py +++ b/freqtrade/rpc/api_server/ws/serializer.py @@ -31,7 +31,6 @@ class WebSocketSerializer(ABC): async def recv(self) -> bytes: data = await self._websocket.recv() - return self._deserialize(data) From 3714d7074b91b9f0219e9fbac9c3effed9b4aecd Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sat, 19 Nov 2022 13:29:23 -0700 Subject: [PATCH 164/421] smaller throttle in channel send --- freqtrade/rpc/api_server/ws/channel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 7a1191d62..80b2ec220 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -63,8 +63,8 @@ class WebSocketChannel: # With the sleep call, it gives control to the event # loop to schedule other channel send methods, and helps # throttle how fast we send. - # 0.01 = 100 messages/second max throughput - await asyncio.sleep(0.01) + # 0.005 = 200 messages/second max throughput + await asyncio.sleep(0.005) async def recv(self): """ From fdc82af883d4e6601ab7468e73a748ecc2d11fd0 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Sat, 19 Nov 2022 22:27:58 -0300 Subject: [PATCH 165/421] fix tests - update code to backtest with historic_predictions --- freqtrade/freqai/data_drawer.py | 31 ++++++++++++++++++++- freqtrade/freqai/data_kitchen.py | 31 --------------------- freqtrade/freqai/utils.py | 4 ++- tests/freqai/test_freqai_backtesting.py | 2 ++ tests/freqai/test_freqai_datadrawer.py | 37 +++++++++++++++++++++++++ tests/freqai/test_freqai_datakitchen.py | 18 +----------- 6 files changed, 73 insertions(+), 50 deletions(-) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index e83b05aaa..59b8e2684 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -3,7 +3,7 @@ import logging import re import shutil import threading -from datetime import datetime, timezone +from datetime import datetime, timedelta, timezone from pathlib import Path from typing import Any, Dict, Tuple, TypedDict @@ -714,3 +714,32 @@ class FreqaiDataDrawer: ).reset_index(drop=True) return corr_dataframes, base_dataframes + + def get_timerange_from_backtesting_live_dataframe(self) -> TimeRange: + """ + Returns timerange information based on historic predictions file + :return: timerange calculated from saved live data + """ + if not self.historic_predictions_path.is_file(): + raise OperationalException( + 'Historic predictions not found. Historic predictions data is required ' + 'to run backtest with the freqai-backtest-live-models option ' + 'and backtest_using_historic_predictions config option as true' + ) + + self.load_historic_predictions_from_disk() + + all_pairs_end_dates = [] + for pair in self.historic_predictions: + pair_historic_data = self.historic_predictions[pair] + all_pairs_end_dates.append(pair_historic_data.date_pred.max()) + + global_metadata = self.load_global_metadata_from_disk() + start_date = datetime.fromtimestamp(int(global_metadata["start_dry_live_date"])) + end_date = max(all_pairs_end_dates) + # add 1 day to string timerange to ensure BT module will load all dataframe data + end_date = end_date + timedelta(days=1) + backtesting_timerange = TimeRange( + 'date', 'date', int(start_date.timestamp()), int(end_date.timestamp()) + ) + return backtesting_timerange diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 641c95725..b364f4e7e 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1486,34 +1486,3 @@ class FreqaiDataKitchen: dataframe.columns = dataframe.columns.str.replace(c, "") return dataframe - - def get_timerange_from_backtesting_live_dataframe(self) -> TimeRange: - """ - Returns timerange information based on historic predictions file - :return: timerange calculated from saved live data - """ - from freqtrade.freqai.data_drawer import FreqaiDataDrawer - dd = FreqaiDataDrawer(Path(self.full_path), self.config) - if not dd.historic_predictions_path.is_file(): - raise OperationalException( - 'Historic predictions not found. Historic predictions data is required ' - 'to run backtest with the freqai-backtest-live-models option ' - 'and backtest_using_historic_predictions config option as true' - ) - - dd.load_historic_predictions_from_disk() - - all_pairs_end_dates = [] - for pair in dd.historic_predictions: - pair_historic_data = dd.historic_predictions[pair] - all_pairs_end_dates.append(pair_historic_data.date_pred.max()) - - global_metadata = dd.load_global_metadata_from_disk() - start_date = datetime.fromtimestamp(int(global_metadata["start_dry_live_date"])) - end_date = max(all_pairs_end_dates) - # add 1 day to string timerange to ensure BT module will load all dataframe data - end_date = end_date + timedelta(days=1) - backtesting_timerange = TimeRange( - 'date', 'date', int(start_date.timestamp()), int(end_date.timestamp()) - ) - return backtesting_timerange diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index f42a87be7..fd5d448bd 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -14,6 +14,7 @@ from freqtrade.data.history.history_utils import refresh_backtest_ohlcv_data from freqtrade.exceptions import OperationalException from freqtrade.exchange import timeframe_to_seconds from freqtrade.exchange.exchange import market_is_active +from freqtrade.freqai.data_drawer import FreqaiDataDrawer from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist @@ -233,6 +234,7 @@ def get_timerange_backtest_live_models(config: Config) -> str: if not config.get("freqai", {}).get("backtest_using_historic_predictions", True): timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) else: - timerange = dk.get_timerange_from_backtesting_live_dataframe() + dd = FreqaiDataDrawer(models_path, config) + timerange = dd.get_timerange_from_backtesting_live_dataframe() return timerange.timerange_str diff --git a/tests/freqai/test_freqai_backtesting.py b/tests/freqai/test_freqai_backtesting.py index b9e2d650a..49b27f724 100644 --- a/tests/freqai/test_freqai_backtesting.py +++ b/tests/freqai/test_freqai_backtesting.py @@ -65,6 +65,8 @@ def test_freqai_backtest_live_models_model_not_found(freqai_conf, mocker, testda mocker.patch('freqtrade.optimize.backtesting.history.load_data') mocker.patch('freqtrade.optimize.backtesting.history.get_timerange', return_value=(now, now)) freqai_conf["timerange"] = "" + freqai_conf.get("freqai", {}).update({"backtest_using_historic_predictions": False}) + patched_configuration_load_config_file(mocker, freqai_conf) args = [ diff --git a/tests/freqai/test_freqai_datadrawer.py b/tests/freqai/test_freqai_datadrawer.py index 7ab963507..3abf84586 100644 --- a/tests/freqai/test_freqai_datadrawer.py +++ b/tests/freqai/test_freqai_datadrawer.py @@ -2,8 +2,11 @@ import shutil from pathlib import Path +import pytest + from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider +from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from tests.conftest import get_patched_exchange from tests.freqai.conftest import get_patched_freqai_strategy @@ -93,3 +96,37 @@ def test_use_strategy_to_populate_indicators(mocker, freqai_conf): assert len(df.columns) == 33 shutil.rmtree(Path(freqai.dk.full_path)) + + +def test_get_timerange_from_backtesting_live_dataframe(mocker, freqai_conf): + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange) + freqai = strategy.freqai + freqai.live = True + freqai.dk = FreqaiDataKitchen(freqai_conf) + timerange = TimeRange.parse_timerange("20180126-20180130") + freqai.dd.load_all_pair_histories(timerange, freqai.dk) + sub_timerange = TimeRange.parse_timerange("20180128-20180130") + _, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "ADA/BTC", freqai.dk) + base_df["5m"]["date_pred"] = base_df["5m"]["date"] + freqai.dd.historic_predictions = {} + freqai.dd.historic_predictions["ADA/USDT"] = base_df["5m"] + freqai.dd.save_historic_predictions_to_disk() + freqai.dd.save_global_metadata_to_disk({"start_dry_live_date": 1516406400}) + + timerange = freqai.dd.get_timerange_from_backtesting_live_dataframe() + assert timerange.startts == 1516406400 + assert timerange.stopts == 1517356500 + + +def test_get_timerange_from_backtesting_live_df_pred_not_found(mocker, freqai_conf): + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange) + freqai = strategy.freqai + with pytest.raises( + OperationalException, + match=r'Historic predictions not found.*' + ): + freqai.dd.get_timerange_from_backtesting_live_dataframe() diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index 2dbbd7ef5..4dfc75d38 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -190,6 +190,7 @@ def test_get_timerange_from_ready_models(mocker, freqai_conf, model): freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"strategy": "freqai_test_strat"}) + freqai_conf.get("freqai", {}).update({"backtest_using_historic_predictions": False}) strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) @@ -259,20 +260,3 @@ def test_get_full_model_path(mocker, freqai_conf, model): model_path = freqai.dk.get_full_models_path(freqai_conf) assert model_path.is_dir() is True - - -def test_get_timerange_from_backtesting_live_dataframe(mocker, freqai_conf): - freqai, dataframe = make_unfiltered_dataframe(mocker, freqai_conf) - freqai_conf.update({"backtest_using_historic_predictions": True}) - timerange = freqai.dk.get_timerange_from_backtesting_live_dataframe() - assert timerange.startts == 1516406400 - assert timerange.stopts == 1517356500 - - -def test_get_timerange_from_backtesting_live_df_pred_not_found(mocker, freqai_conf): - freqai, _ = make_unfiltered_dataframe(mocker, freqai_conf) - with pytest.raises( - OperationalException, - match=r'Historic predictions not found.*' - ): - freqai.dk.get_timerange_from_backtesting_live_dataframe() From 60a167bdefac8ba1cdf5224aee00dfdc26145020 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sun, 20 Nov 2022 14:09:45 -0700 Subject: [PATCH 166/421] add dynamic send timeout --- freqtrade/rpc/api_server/api_ws.py | 2 +- freqtrade/rpc/api_server/ws/channel.py | 65 +++++++++++++++++++------- 2 files changed, 50 insertions(+), 17 deletions(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 2454646ea..618490ec8 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -34,7 +34,7 @@ async def channel_broadcaster(channel: WebSocketChannel, message_stream: Message Iterate over messages in the message stream and send them """ async for message in message_stream: - await channel.send(message) + await channel.send(message, timeout=True) async def _process_consumer_request( diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 80b2ec220..5424d7440 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -1,7 +1,9 @@ import asyncio import logging +import time +from collections import deque from contextlib import asynccontextmanager -from typing import Any, Dict, List, Optional, Type, Union +from typing import Any, Deque, Dict, List, Optional, Type, Union from uuid import uuid4 from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy @@ -29,7 +31,13 @@ class WebSocketChannel: # Internal event to signify a closed websocket self._closed = asyncio.Event() - self._send_timeout_high_limit = 2 + # The async tasks created for the channel + self._channel_tasks: List[asyncio.Task] = [] + + # Deque for average send times + self._send_times: Deque[float] = deque([], maxlen=10) + # High limit defaults to 3 to start + self._send_high_limit = 3 # The subscribed message types self._subscriptions: List[str] = [] @@ -37,9 +45,6 @@ class WebSocketChannel: # Wrap the WebSocket in the Serializing class self._wrapped_ws = serializer_cls(self._websocket) - # The async tasks created for the channel - self._channel_tasks: List[asyncio.Task] = [] - def __repr__(self): return f"WebSocketChannel({self.channel_id}, {self.remote_addr})" @@ -51,20 +56,48 @@ class WebSocketChannel: def remote_addr(self): return self._websocket.remote_addr - async def send(self, message: Union[WSMessageSchemaType, Dict[str, Any]]): + def _calc_send_limit(self): """ - Send a message on the wrapped websocket + Calculate the send high limit for this channel """ - await self._wrapped_ws.send(message) - # Without this sleep, messages would send to one channel - # first then another after the first one finished and prevent - # any normal Rest API calls from processing at the same time. - # With the sleep call, it gives control to the event - # loop to schedule other channel send methods, and helps - # throttle how fast we send. - # 0.005 = 200 messages/second max throughput - await asyncio.sleep(0.005) + # Only update if we have enough data + if len(self._send_times) == self._send_times.maxlen: + # At least 1s or twice the average of send times + self._send_high_limit = max( + (sum(self._send_times) / len(self._send_times)) * 2, + 1 + ) + + async def send( + self, + message: Union[WSMessageSchemaType, Dict[str, Any]], + timeout: bool = False + ): + """ + Send a message on the wrapped websocket. If the sending + takes too long, it will raise a TimeoutError and + disconnect the connection. + + :param message: The message to send + :param timeout: Enforce send high limit, defaults to False + """ + try: + _ = time.time() + # If the send times out, it will raise + # a TimeoutError and bubble up to the + # message_endpoint to close the connection + await asyncio.wait_for( + self._wrapped_ws.send(message), + timeout=self._send_high_limit if timeout else None + ) + total_time = time.time() - _ + self._send_times.append(total_time) + + self._calc_send_limit() + except asyncio.TimeoutError: + logger.info(f"Connection for {self} is too far behind, disconnecting") + raise async def recv(self): """ From 48a1f2418ffb89c148e3417f65545ec7248a6faf Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sun, 20 Nov 2022 16:18:24 -0700 Subject: [PATCH 167/421] update typing, remove unneeded try block, readd sleep --- freqtrade/rpc/api_server/ws/channel.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 5424d7440..4bd7b0e4b 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -3,7 +3,7 @@ import logging import time from collections import deque from contextlib import asynccontextmanager -from typing import Any, Deque, Dict, List, Optional, Type, Union +from typing import Any, AsyncGenerator, Deque, Dict, List, Optional, Type, Union from uuid import uuid4 from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy @@ -99,6 +99,15 @@ class WebSocketChannel: logger.info(f"Connection for {self} is too far behind, disconnecting") raise + # Without this sleep, messages would send to one channel + # first then another after the first one finished and prevent + # any normal Rest API calls from processing at the same time. + # With the sleep call, it gives control to the event + # loop to schedule other channel send methods, and helps + # throttle how fast we send. + # 0.01 = 100 messages/second max throughput + await asyncio.sleep(0.01) + async def recv(self): """ Receive a message on the wrapped websocket @@ -180,10 +189,7 @@ class WebSocketChannel: task.cancel() # Wait for tasks to finish cancelling - try: - await asyncio.wait(self._channel_tasks) - except asyncio.CancelledError: - pass + await asyncio.wait(self._channel_tasks) self._channel_tasks = [] @@ -199,7 +205,10 @@ class WebSocketChannel: @asynccontextmanager -async def create_channel(websocket: WebSocketType, **kwargs): +async def create_channel( + websocket: WebSocketType, + **kwargs +) -> AsyncGenerator[WebSocketChannel, None]: """ Context manager for safely opening and closing a WebSocketChannel """ From d2870d48ea8e7d19782f6a2c753ea622c16d36ae Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sun, 20 Nov 2022 16:24:44 -0700 Subject: [PATCH 168/421] change typing to async iterator --- freqtrade/rpc/api_server/ws/channel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 4bd7b0e4b..8699de66c 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -3,7 +3,7 @@ import logging import time from collections import deque from contextlib import asynccontextmanager -from typing import Any, AsyncGenerator, Deque, Dict, List, Optional, Type, Union +from typing import Any, AsyncIterator, Deque, Dict, List, Optional, Type, Union from uuid import uuid4 from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy @@ -208,7 +208,7 @@ class WebSocketChannel: async def create_channel( websocket: WebSocketType, **kwargs -) -> AsyncGenerator[WebSocketChannel, None]: +) -> AsyncIterator[WebSocketChannel]: """ Context manager for safely opening and closing a WebSocketChannel """ From d9d7df70bfcbc2094ed51518438b238254d193f6 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Mon, 21 Nov 2022 12:21:40 -0700 Subject: [PATCH 169/421] fix tests, log unknown errors --- freqtrade/rpc/api_server/webserver.py | 1 - freqtrade/rpc/api_server/ws/channel.py | 14 ++++++++++- tests/rpc/test_rpc_apiserver.py | 34 ++++++++++++-------------- 3 files changed, 29 insertions(+), 20 deletions(-) diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index 4a9f089d1..e4eb3895d 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -212,7 +212,6 @@ class ApiServer(RPCHandler): if self._standalone: self._server.run() else: - # self.start_message_queue() self._server.run_in_thread() except Exception: logger.exception("Api server failed to start.") diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 8699de66c..9dea21f3b 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -6,6 +6,9 @@ from contextlib import asynccontextmanager from typing import Any, AsyncIterator, Deque, Dict, List, Optional, Type, Union from uuid import uuid4 +from fastapi import WebSocketDisconnect +from websockets.exceptions import ConnectionClosed + from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy from freqtrade.rpc.api_server.ws.serializer import (HybridJSONWebSocketSerializer, WebSocketSerializer) @@ -189,7 +192,16 @@ class WebSocketChannel: task.cancel() # Wait for tasks to finish cancelling - await asyncio.wait(self._channel_tasks) + try: + await task + except ( + asyncio.CancelledError, + WebSocketDisconnect, + ConnectionClosed + ): + pass + except Exception as e: + logger.info(f"Encountered unknown exception: {e}", exc_info=e) self._channel_tasks = [] diff --git a/tests/rpc/test_rpc_apiserver.py b/tests/rpc/test_rpc_apiserver.py index 969728b6f..25d6a32e3 100644 --- a/tests/rpc/test_rpc_apiserver.py +++ b/tests/rpc/test_rpc_apiserver.py @@ -57,7 +57,10 @@ def botclient(default_conf, mocker): try: apiserver = ApiServer(default_conf) apiserver.add_rpc_handler(rpc) - yield ftbot, TestClient(apiserver.app) + # We need to use the TestClient as a context manager to + # handle lifespan events correctly + with TestClient(apiserver.app) as client: + yield ftbot, client # Cleanup ... ? finally: if apiserver: @@ -438,7 +441,6 @@ def test_api_cleanup(default_conf, mocker, caplog): apiserver.cleanup() assert apiserver._server.cleanup.call_count == 1 assert log_has("Stopping API Server", caplog) - assert log_has("Stopping API Server background tasks", caplog) ApiServer.shutdown() @@ -1714,12 +1716,14 @@ def test_api_ws_subscribe(botclient, mocker): with client.websocket_connect(ws_url) as ws: ws.send_json({'type': 'subscribe', 'data': ['whitelist']}) + time.sleep(1) # Check call count is now 1 as we sent a valid subscribe request assert sub_mock.call_count == 1 with client.websocket_connect(ws_url) as ws: ws.send_json({'type': 'subscribe', 'data': 'whitelist'}) + time.sleep(1) # Call count hasn't changed as the subscribe request was invalid assert sub_mock.call_count == 1 @@ -1773,24 +1777,18 @@ def test_api_ws_send_msg(default_conf, mocker, caplog): mocker.patch('freqtrade.rpc.api_server.ApiServer.start_api') apiserver = ApiServer(default_conf) apiserver.add_rpc_handler(RPC(get_patched_freqtradebot(mocker, default_conf))) - apiserver.start_message_queue() - # Give the queue thread time to start - time.sleep(0.2) - # Test message_queue coro receives the message - test_message = {"type": "status", "data": "test"} - apiserver.send_msg(test_message) - time.sleep(0.1) # Not sure how else to wait for the coro to receive the data - assert log_has("Found message of type: status", caplog) + # Start test client context manager to run lifespan events + with TestClient(apiserver.app): + # Test message is published on the Message Stream + test_message = {"type": "status", "data": "test"} + first_waiter = apiserver._message_stream._waiter + apiserver.send_msg(test_message) + assert first_waiter.result()[0] == test_message - # Test if exception logged when error occurs in sending - mocker.patch('freqtrade.rpc.api_server.ws.channel.ChannelManager.broadcast', - side_effect=Exception) - - apiserver.send_msg(test_message) - time.sleep(0.1) # Not sure how else to wait for the coro to receive the data - assert log_has_re(r"Exception happened in background task.*", caplog) + second_waiter = apiserver._message_stream._waiter + apiserver.send_msg(test_message) + assert first_waiter != second_waiter finally: - apiserver.cleanup() ApiServer.shutdown() From c01f25ddc95f6dbdf91b3dddd52cda4bcbf57428 Mon Sep 17 00:00:00 2001 From: Wagner Costa Date: Tue, 22 Nov 2022 13:09:09 -0300 Subject: [PATCH 170/421] update code to freqai_backtest_live_models only from historic predictions --- docs/freqai-parameter-table.md | 1 - docs/freqai-running.md | 12 +- freqtrade/freqai/data_drawer.py | 3 +- freqtrade/freqai/data_kitchen.py | 144 ++++-------------------- freqtrade/freqai/freqai_interface.py | 19 +--- freqtrade/freqai/utils.py | 9 +- tests/freqai/test_freqai_backtesting.py | 2 +- tests/freqai/test_freqai_datadrawer.py | 6 +- tests/freqai/test_freqai_datakitchen.py | 67 ----------- 9 files changed, 36 insertions(+), 227 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 059d56a1f..c027a12b1 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -15,7 +15,6 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old.
**Datatype:** Positive integer.
Default: `0` (models never expire). | `purge_old_models` | Delete obsolete models.
**Datatype:** Boolean.
Default: `False` (all historic models remain on disk). | `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`.
**Datatype:** Boolean.
Default: `False` (no models are saved). -| `backtest_using_historic_predictions` | Reuse `historic_predictions` in backtesting with [Backtest live models](freqai-running.md#backtest_live_models)) option.
Default: `True` | `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)).
**Datatype:** Positive integer. | `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models.
**Datatype:** Boolean.
Default: `False`. | `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)).
**Datatype:** Boolean.
Default: `False`. diff --git a/docs/freqai-running.md b/docs/freqai-running.md index d777b180e..23873547f 100644 --- a/docs/freqai-running.md +++ b/docs/freqai-running.md @@ -81,17 +81,9 @@ To save the models generated during a particular backtest so that you can start ### Backtest live models -FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study. For that, you have 2 options: +FreqAI allow you to reuse live historic predictions through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study. -1. Set `"backtest_using_historic_predictions"` to `True` in the config. With this option, FreqAI will reuse `historic_predictions` in backtesting. This option requires less disk space and backtesting will run faster. -2. Set `"purge_old_models"` to `False` and `"backtest_using_historic_predictions"` to `False` in the config. In this case, FreqAI will use the saved models to make the predictions in backtesting. This option requires more disk space and the backtest will have a longer execution time. - -The `--timerange` parameter must not be informed, as it will be automatically calculated through the training end dates of the models. - -Each model has an identifier derived from the training end date. If you have only 1 model trained, FreqAI will backtest from the training end date until the current date. If you have more than 1 model, each model will perform the backtesting according to the training end date until the training end date of the next model and so on. For the last model, the period of the previous model will be used for the execution. - -!!! Note - Currently, there is no checking for expired models, even if the `expired_hours` parameter is set. +The `--timerange` parameter must not be informed, as it will be automatically calculated through the data in historic predictions file. ### Downloading data to cover the full backtest period diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 59b8e2684..9f1e27796 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -715,7 +715,7 @@ class FreqaiDataDrawer: return corr_dataframes, base_dataframes - def get_timerange_from_backtesting_live_dataframe(self) -> TimeRange: + def get_timerange_from_live_historic_predictions(self) -> TimeRange: """ Returns timerange information based on historic predictions file :return: timerange calculated from saved live data @@ -724,7 +724,6 @@ class FreqaiDataDrawer: raise OperationalException( 'Historic predictions not found. Historic predictions data is required ' 'to run backtest with the freqai-backtest-live-models option ' - 'and backtest_using_historic_predictions config option as true' ) self.load_historic_predictions_from_disk() diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index b364f4e7e..f75fd3dd8 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1,7 +1,7 @@ import copy import logging import shutil -from datetime import datetime, timedelta, timezone +from datetime import datetime, timezone from math import cos, sin from pathlib import Path from typing import Any, Dict, List, Tuple @@ -86,14 +86,7 @@ class FreqaiDataKitchen: if not self.live: self.full_path = self.get_full_models_path(self.config) - if self.backtest_live_models: - if self.pair and not ( - self.freqai_config.get("backtest_using_historic_predictions", True) - ): - self.set_timerange_from_ready_models() - (self.training_timeranges, - self.backtesting_timeranges) = self.split_timerange_live_models() - else: + if not self.backtest_live_models: self.full_timerange = self.create_fulltimerange( self.config["timerange"], self.freqai_config.get("train_period_days", 0) ) @@ -458,28 +451,28 @@ class FreqaiDataKitchen: # print(tr_training_list, tr_backtesting_list) return tr_training_list_timerange, tr_backtesting_list_timerange - def split_timerange_live_models( - self - ) -> Tuple[list, list]: + # def split_timerange_live_models( + # self + # ) -> Tuple[list, list]: - tr_backtesting_list_timerange = [] - asset = self.pair.split("/")[0] - if asset not in self.backtest_live_models_data["assets_end_dates"]: - raise OperationalException( - f"Model not available for pair {self.pair}. " - "Please, try again after removing this pair from the configuration file." - ) - asset_data = self.backtest_live_models_data["assets_end_dates"][asset] - backtesting_timerange = self.backtest_live_models_data["backtesting_timerange"] - model_end_dates = [x for x in asset_data] - model_end_dates.append(backtesting_timerange.stopts) - model_end_dates.sort() - for index, item in enumerate(model_end_dates): - if len(model_end_dates) > (index + 1): - tr_to_add = TimeRange("date", "date", item, model_end_dates[index + 1]) - tr_backtesting_list_timerange.append(tr_to_add) + # tr_backtesting_list_timerange = [] + # asset = self.pair.split("/")[0] + # if asset not in self.backtest_live_models_data["assets_end_dates"]: + # raise OperationalException( + # f"Model not available for pair {self.pair}. " + # "Please, try again after removing this pair from the configuration file." + # ) + # asset_data = self.backtest_live_models_data["assets_end_dates"][asset] + # backtesting_timerange = self.backtest_live_models_data["backtesting_timerange"] + # model_end_dates = [x for x in asset_data] + # model_end_dates.append(backtesting_timerange.stopts) + # model_end_dates.sort() + # for index, item in enumerate(model_end_dates): + # if len(model_end_dates) > (index + 1): + # tr_to_add = TimeRange("date", "date", item, model_end_dates[index + 1]) + # tr_backtesting_list_timerange.append(tr_to_add) - return tr_backtesting_list_timerange, tr_backtesting_list_timerange + # return tr_backtesting_list_timerange, tr_backtesting_list_timerange def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame: """ @@ -1371,17 +1364,6 @@ class FreqaiDataKitchen: ) return False - def set_timerange_from_ready_models(self): - backtesting_timerange, \ - assets_end_dates = ( - self.get_timerange_and_assets_end_dates_from_ready_models(self.full_path)) - - self.backtest_live_models_data = { - "backtesting_timerange": backtesting_timerange, - "assets_end_dates": assets_end_dates - } - return - def get_full_models_path(self, config: Config) -> Path: """ Returns default FreqAI model path @@ -1392,88 +1374,6 @@ class FreqaiDataKitchen: config["user_data_dir"] / "models" / str(freqai_config.get("identifier")) ) - def get_timerange_and_assets_end_dates_from_ready_models( - self, models_path: Path) -> Tuple[TimeRange, Dict[str, Any]]: - """ - Returns timerange information based on a FreqAI model directory - :param models_path: FreqAI model path - - :return: a Tuple with (Timerange calculated from directory and - a Dict with pair and model end training dates info) - """ - all_models_end_dates = [] - assets_end_dates: Dict[str, Any] = self.get_assets_timestamps_training_from_ready_models( - models_path) - for key in assets_end_dates: - for model_end_date in assets_end_dates[key]: - if model_end_date not in all_models_end_dates: - all_models_end_dates.append(model_end_date) - - if len(all_models_end_dates) == 0: - raise OperationalException( - 'At least 1 saved model is required to ' - 'run backtest with the freqai-backtest-live-models option' - ) - - if len(all_models_end_dates) == 1: - logger.warning( - "Only 1 model was found. Backtesting will run with the " - "timerange from the end of the training date to the current date" - ) - - finish_timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - if len(all_models_end_dates) > 1: - # After last model end date, use the same period from previous model - # to finish the backtest - all_models_end_dates.sort(reverse=True) - finish_timestamp = all_models_end_dates[0] + \ - (all_models_end_dates[0] - all_models_end_dates[1]) - - all_models_end_dates.append(finish_timestamp) - all_models_end_dates.sort() - start_date = (datetime(*datetime.fromtimestamp(min(all_models_end_dates), - timezone.utc).timetuple()[:3], tzinfo=timezone.utc)) - end_date = (datetime(*datetime.fromtimestamp(max(all_models_end_dates), - timezone.utc).timetuple()[:3], tzinfo=timezone.utc)) - - # add 1 day to string timerange to ensure BT module will load all dataframe data - end_date = end_date + timedelta(days=1) - backtesting_timerange = TimeRange( - 'date', 'date', int(start_date.timestamp()), int(end_date.timestamp()) - ) - return backtesting_timerange, assets_end_dates - - def get_assets_timestamps_training_from_ready_models( - self, models_path: Path) -> Dict[str, Any]: - """ - Scan the models path and returns all assets end training dates (timestamp) - :param models_path: FreqAI model path - - :return: a Dict with asset and model end training dates info - """ - assets_end_dates: Dict[str, Any] = {} - if not models_path.is_dir(): - raise OperationalException( - 'Model folders not found. Saved models are required ' - 'to run backtest with the freqai-backtest-live-models option' - ) - for model_dir in models_path.iterdir(): - if str(model_dir.name).startswith("sub-train"): - model_end_date = int(model_dir.name.split("_")[1]) - asset = model_dir.name.split("_")[0].replace("sub-train-", "") - model_file_name = ( - f"cb_{str(model_dir.name).replace('sub-train-', '').lower()}" - "_model.joblib" - ) - - model_path_file = Path(model_dir / model_file_name) - if model_path_file.is_file(): - if asset not in assets_end_dates: - assets_end_dates[asset] = [] - assets_end_dates[asset].append(model_end_date) - - return assets_end_dates - def remove_special_chars_from_feature_names(self, dataframe: pd.DataFrame) -> pd.DataFrame: """ Remove all special characters from feature strings (:) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 473fe939f..80348fda8 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -68,10 +68,6 @@ class IFreqaiModel(ABC): self.save_backtest_models: bool = self.freqai_info.get("save_backtest_models", True) if self.save_backtest_models: logger.info('Backtesting module configured to save all models.') - self.backtest_using_historic_predictions: bool = self.freqai_info.get( - "backtest_using_historic_predictions", True) - if self.backtest_using_historic_predictions: - logger.info('Backtesting live models configured to use historic predictions.') self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode) # set current candle to arbitrary historical date @@ -148,23 +144,18 @@ class IFreqaiModel(ABC): elif not self.follow_mode: self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"]) if self.dk.backtest_live_models: - if self.backtest_using_historic_predictions: - logger.info( - "Backtesting using historic predictions (live models)") - else: - logger.info( - f"Backtesting {len(self.dk.backtesting_timeranges)} " - "timeranges (live models)") + logger.info( + "Backtesting using historic predictions (live models)") else: logger.info(f"Training {len(self.dk.training_timeranges)} timeranges") dataframe = self.dk.use_strategy_to_populate_indicators( strategy, prediction_dataframe=dataframe, pair=metadata["pair"] ) - if not self.backtest_using_historic_predictions: + if not self.config.get("freqai_backtest_live_models", False): dk = self.start_backtesting(dataframe, metadata, self.dk) dataframe = dk.remove_features_from_df(dk.return_dataframe) else: - dk = self.start_backtesting_from_live_saved_files( + dk = self.start_backtesting_from_historic_predictions( dataframe, metadata, self.dk) dataframe = dk.return_dataframe @@ -330,7 +321,7 @@ class IFreqaiModel(ABC): return dk - def start_backtesting_from_live_saved_files( + def start_backtesting_from_historic_predictions( self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen ) -> FreqaiDataKitchen: """ diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index fd5d448bd..806e3ca15 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -230,11 +230,6 @@ def get_timerange_backtest_live_models(config: Config) -> str: """ dk = FreqaiDataKitchen(config) models_path = dk.get_full_models_path(config) - timerange: TimeRange = TimeRange() - if not config.get("freqai", {}).get("backtest_using_historic_predictions", True): - timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) - else: - dd = FreqaiDataDrawer(models_path, config) - timerange = dd.get_timerange_from_backtesting_live_dataframe() - + dd = FreqaiDataDrawer(models_path, config) + timerange = dd.get_timerange_from_live_historic_predictions() return timerange.timerange_str diff --git a/tests/freqai/test_freqai_backtesting.py b/tests/freqai/test_freqai_backtesting.py index 49b27f724..60963e762 100644 --- a/tests/freqai/test_freqai_backtesting.py +++ b/tests/freqai/test_freqai_backtesting.py @@ -81,7 +81,7 @@ def test_freqai_backtest_live_models_model_not_found(freqai_conf, mocker, testda bt_config = setup_optimize_configuration(args, RunMode.BACKTEST) with pytest.raises(OperationalException, - match=r".* Saved models are required to run backtest .*"): + match=r".* Historic predictions data is required to run backtest .*"): Backtesting(bt_config) Backtesting.cleanup() diff --git a/tests/freqai/test_freqai_datadrawer.py b/tests/freqai/test_freqai_datadrawer.py index 3abf84586..da3b8f9c1 100644 --- a/tests/freqai/test_freqai_datadrawer.py +++ b/tests/freqai/test_freqai_datadrawer.py @@ -98,7 +98,7 @@ def test_use_strategy_to_populate_indicators(mocker, freqai_conf): shutil.rmtree(Path(freqai.dk.full_path)) -def test_get_timerange_from_backtesting_live_dataframe(mocker, freqai_conf): +def test_get_timerange_from_live_historic_predictions(mocker, freqai_conf): strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) @@ -115,7 +115,7 @@ def test_get_timerange_from_backtesting_live_dataframe(mocker, freqai_conf): freqai.dd.save_historic_predictions_to_disk() freqai.dd.save_global_metadata_to_disk({"start_dry_live_date": 1516406400}) - timerange = freqai.dd.get_timerange_from_backtesting_live_dataframe() + timerange = freqai.dd.get_timerange_from_live_historic_predictions() assert timerange.startts == 1516406400 assert timerange.stopts == 1517356500 @@ -129,4 +129,4 @@ def test_get_timerange_from_backtesting_live_df_pred_not_found(mocker, freqai_co OperationalException, match=r'Historic predictions not found.*' ): - freqai.dd.get_timerange_from_backtesting_live_dataframe() + freqai.dd.get_timerange_from_live_historic_predictions() diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index 4dfc75d38..0dc897916 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -9,7 +9,6 @@ from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.utils import get_timerange_backtest_live_models from tests.conftest import get_patched_exchange, log_has_re from tests.freqai.conftest import (get_patched_data_kitchen, get_patched_freqai_strategy, make_data_dictionary, make_unfiltered_dataframe) @@ -166,72 +165,6 @@ def test_make_train_test_datasets(mocker, freqai_conf): assert len(data_dictionary['train_features'].index) == 1916 -def test_get_pairs_timestamp_validation(mocker, freqai_conf): - exchange = get_patched_exchange(mocker, freqai_conf) - strategy = get_patched_freqai_strategy(mocker, freqai_conf) - strategy.dp = DataProvider(freqai_conf, exchange) - strategy.freqai_info = freqai_conf.get("freqai", {}) - freqai = strategy.freqai - freqai.live = True - freqai.dk = FreqaiDataKitchen(freqai_conf) - freqai_conf['freqai'].update({"identifier": "invalid_id"}) - model_path = freqai.dk.get_full_models_path(freqai_conf) - with pytest.raises( - OperationalException, - match=r'.*required to run backtest with the freqai-backtest-live-models.*' - ): - freqai.dk.get_assets_timestamps_training_from_ready_models(model_path) - - -@pytest.mark.parametrize('model', [ - 'LightGBMRegressor' - ]) -def test_get_timerange_from_ready_models(mocker, freqai_conf, model): - freqai_conf.update({"freqaimodel": model}) - freqai_conf.update({"timerange": "20180110-20180130"}) - freqai_conf.update({"strategy": "freqai_test_strat"}) - freqai_conf.get("freqai", {}).update({"backtest_using_historic_predictions": False}) - - strategy = get_patched_freqai_strategy(mocker, freqai_conf) - exchange = get_patched_exchange(mocker, freqai_conf) - strategy.dp = DataProvider(freqai_conf, exchange) - strategy.freqai_info = freqai_conf.get("freqai", {}) - freqai = strategy.freqai - freqai.live = True - freqai.dk = FreqaiDataKitchen(freqai_conf) - timerange = TimeRange.parse_timerange("20180101-20180130") - freqai.dd.load_all_pair_histories(timerange, freqai.dk) - - freqai.dd.pair_dict = MagicMock() - - data_load_timerange = TimeRange.parse_timerange("20180101-20180130") - - # 1516233600 (2018-01-18 00:00) - Start Training 1 - # 1516406400 (2018-01-20 00:00) - End Training 1 (Backtest slice 1) - # 1516579200 (2018-01-22 00:00) - End Training 2 (Backtest slice 2) - # 1516838400 (2018-01-25 00:00) - End Timerange - - new_timerange = TimeRange("date", "date", 1516233600, 1516406400) - freqai.extract_data_and_train_model( - new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) - - new_timerange = TimeRange("date", "date", 1516406400, 1516579200) - freqai.extract_data_and_train_model( - new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) - - model_path = freqai.dk.get_full_models_path(freqai_conf) - (backtesting_timerange, - pairs_end_dates) = freqai.dk.get_timerange_and_assets_end_dates_from_ready_models( - models_path=model_path) - - assert len(pairs_end_dates["ADA"]) == 2 - assert backtesting_timerange.startts == 1516406400 - assert backtesting_timerange.stopts == 1516838400 - - backtesting_string_timerange = get_timerange_backtest_live_models(freqai_conf) - assert backtesting_string_timerange == '20180120-20180125' - - @pytest.mark.parametrize('model', [ 'LightGBMRegressor' ]) From a5442772fc22138dc18fcd3c99c2727f1e9007dd Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 22 Nov 2022 09:42:09 -0700 Subject: [PATCH 171/421] ensure only broadcasting to subscribed topics --- freqtrade/rpc/api_server/api_ws.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 618490ec8..fe2968c05 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -34,7 +34,8 @@ async def channel_broadcaster(channel: WebSocketChannel, message_stream: Message Iterate over messages in the message stream and send them """ async for message in message_stream: - await channel.send(message, timeout=True) + if channel.subscribed_to(message.get('type')): + await channel.send(message, timeout=True) async def _process_consumer_request( From d09157efb89a947e24451babd5b1ff11f3fa58e0 Mon Sep 17 00:00:00 2001 From: Wagner Costa Date: Tue, 22 Nov 2022 15:15:42 -0300 Subject: [PATCH 172/421] update code to use one prediction file / pair --- freqtrade/freqai/data_kitchen.py | 39 ++++++++++++++++++--------- freqtrade/freqai/freqai_interface.py | 1 + tests/freqai/test_freqai_interface.py | 14 ++++++++-- 3 files changed, 39 insertions(+), 15 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index f75fd3dd8..65f3483af 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -9,7 +9,7 @@ from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt import pandas as pd -from pandas import DataFrame +from pandas import DataFrame, HDFStore from scipy import stats from sklearn import linear_model from sklearn.cluster import DBSCAN @@ -74,6 +74,7 @@ class FreqaiDataKitchen: self.training_features_list: List = [] self.model_filename: str = "" self.backtesting_results_path = Path() + self.backtesting_h5_data: HDFStore = {} self.backtest_predictions_folder: str = "backtesting_predictions" self.live = live self.pair = pair @@ -1319,7 +1320,7 @@ class FreqaiDataKitchen: if not full_predictions_folder.is_dir(): full_predictions_folder.mkdir(parents=True, exist_ok=True) - append_df.to_hdf(self.backtesting_results_path, key='append_df', mode='w') + append_df.to_hdf(self.backtesting_results_path, key=self.model_filename) def get_backtesting_prediction( self @@ -1327,9 +1328,26 @@ class FreqaiDataKitchen: """ Get prediction dataframe from h5 file format """ - append_df = pd.read_hdf(self.backtesting_results_path) + append_df = self.backtesting_h5_data[self.model_filename] return append_df + def load_prediction_pair_file( + self + ) -> None: + """ + Load prediction file if it exists + """ + pair_file_name = self.pair.split(':')[0].replace('/', '_').lower() + path_to_predictionfile = Path(self.full_path / + self.backtest_predictions_folder / + f"{pair_file_name}_prediction.h5") + self.backtesting_results_path = path_to_predictionfile + file_exists = path_to_predictionfile.is_file() + if file_exists: + self.backtesting_h5_data = pd.HDFStore(path_to_predictionfile) + else: + self.backtesting_h5_data = {} + def check_if_backtest_prediction_is_valid( self, len_backtest_df: int @@ -1341,17 +1359,11 @@ class FreqaiDataKitchen: :return: :boolean: whether the prediction file is valid. """ - path_to_predictionfile = Path(self.full_path / - self.backtest_predictions_folder / - f"{self.model_filename}_prediction.h5") - self.backtesting_results_path = path_to_predictionfile - - file_exists = path_to_predictionfile.is_file() - - if file_exists: + if self.model_filename in self.backtesting_h5_data: append_df = self.get_backtesting_prediction() if len(append_df) == len_backtest_df and 'date' in append_df: - logger.info(f"Found backtesting prediction file at {path_to_predictionfile}") + logger.info("Found backtesting prediction file " + f"at {self.backtesting_results_path.name}") return True else: logger.info("A new backtesting prediction file is required. " @@ -1360,7 +1372,8 @@ class FreqaiDataKitchen: return False else: logger.info( - f"Could not find backtesting prediction file at {path_to_predictionfile}" + "Could not find backtesting prediction file " + f"at {self.backtesting_results_path.name}" ) return False diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 80348fda8..21851b3b6 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -260,6 +260,7 @@ class IFreqaiModel(ABC): self.pair_it += 1 train_it = 0 + dk.load_prediction_pair_file() # Loop enforcing the sliding window training/backtesting paradigm # tr_train is the training time range e.g. 1 historical month # tr_backtest is the backtesting time range e.g. the week directly diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 66b3bac17..6e2e774fe 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -263,7 +263,9 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") - metadata = {"pair": "ADA/BTC"} + pair = "ADA/BTC" + metadata = {"pair": pair} + freqai.dk.pair = pair freqai.start_backtesting(df, metadata, freqai.dk) model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] @@ -286,6 +288,9 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") + pair = "ADA/BTC" + metadata = {"pair": pair} + freqai.dk.pair = pair freqai.start_backtesting(df, metadata, freqai.dk) assert log_has_re( @@ -293,9 +298,14 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): caplog, ) + pair = "ETH/BTC" + metadata = {"pair": pair} + freqai.dk.pair = pair + freqai.start_backtesting(df, metadata, freqai.dk) + path = (freqai.dd.full_path / freqai.dk.backtest_predictions_folder) prediction_files = [x for x in path.iterdir() if x.is_file()] - assert len(prediction_files) == 5 + assert len(prediction_files) == 2 shutil.rmtree(Path(freqai.dk.full_path)) From 48242ca02b0f819d0d0318e89ad2b1804017b076 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 22 Nov 2022 12:43:45 -0700 Subject: [PATCH 173/421] update catch block in cancel channel tasks --- freqtrade/rpc/api_server/ws/channel.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 9dea21f3b..ad183ce5b 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -99,7 +99,7 @@ class WebSocketChannel: self._calc_send_limit() except asyncio.TimeoutError: - logger.info(f"Connection for {self} is too far behind, disconnecting") + logger.info(f"Connection for {self} timed out, disconnecting") raise # Without this sleep, messages would send to one channel @@ -138,7 +138,7 @@ class WebSocketChannel: try: await self._websocket.close() - except Exception: + except RuntimeError: pass def is_closed(self) -> bool: @@ -196,8 +196,10 @@ class WebSocketChannel: await task except ( asyncio.CancelledError, + asyncio.TimeoutError, WebSocketDisconnect, - ConnectionClosed + ConnectionClosed, + RuntimeError ): pass except Exception as e: From 3d26659d5ef520a6320532e767608cbdfbc1563c Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 23 Nov 2022 20:09:55 +0100 Subject: [PATCH 174/421] Fix some doc typos --- docs/freqai-reinforcement-learning.md | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index bd2b36463..45f29c6ea 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -1,16 +1,15 @@ # Reinforcement Learning -!!! Note - Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?" Users who prefer docker should ensure they use the docker image appended with `_freqaiRL`. - +!!! Note "Installation size" + Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?" Users who prefer docker should ensure they use the docker image appended with `_freqaiRL`. ## Background and terminology ### What is RL and why does FreqAI need it? -Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-the-reward)). The reward is used to train weights in a neural network. +Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-the-reward)). The reward is used to train weights in a neural network. -A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployements.* +A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployments.* Reinforcement learning is a natural progression for FreqAI, since it adds a new layer of adaptivity and market reactivity that Classifiers and Regressors cannot match. However, Classifiers and Regressors have strengths that RL does not have such as robust predictions. Improperly trained RL agents may find "cheats" and "tricks" to maximize reward without actually winning any trades. For this reason, RL is more complex and demands a higher level of understanding than typical Classifiers and Regressors. @@ -18,16 +17,14 @@ Reinforcement learning is a natural progression for FreqAI, since it adds a new With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-the-reward). -We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configruation settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. - -The framework is built on stable_baselines3 (torch) and openai gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library. +We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. +The framework is built on stable_baselines3 (torch) and OpenAI gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library. ### Important considerations As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, ect) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. - ## Running Reinforcement Learning Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line: @@ -87,7 +84,7 @@ where `ReinforcementLearner` will use the templated `ReinforcementLearner` from return df ``` -Most of the function remains the same as for typical Regressors, however, the function above shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environent: +Most of the function remains the same as for typical Regressors, however, the function above shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environment: ```python # The following features are necessary for RL models @@ -154,7 +151,7 @@ In order to configure the `Reinforcement Learner` the following dictionary must } ``` -Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environment to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link). +Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environment to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link). !!! Note Remember that the general `model_training_parameters` dictionary should contain all the model hyperparameter customizations for the particular `model_type`. For example, `PPO` parameters can be found [here](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html). @@ -220,15 +217,14 @@ where `unique-id` is the `identifier` set in the `freqai` configuration file. Th ![tensorboard](assets/tensorboard.jpg) - ### Choosing a base environment -FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 4 or 5 actions. In the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Meanwhile, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include: +FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 4 or 5 actions. In the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Meanwhile, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include: * the actions available in the `calculate_reward` * the actions consumed by the user strategy -Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-the-reward)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. +Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-the-reward)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. !!! Note FreqAI does not provide by default, a long-only training environment. However, creating one should be as simple as copy-pasting one of the built in environments and removing the `short` actions (and all associated references to those). From e5fc21f577285f0f168467b4b5ea27765d656313 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 23 Nov 2022 20:59:45 +0100 Subject: [PATCH 175/421] Fix broken table rendering --- docs/freqai-parameter-table.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index be306fd71..9e16aec8f 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -48,7 +48,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `n_estimators` | The number of boosted trees to fit in the training of the model.
**Datatype:** Integer. | `learning_rate` | Boosting learning rate during training of the model.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. -| | *Reinforcement Learning Parameters** +| | **Reinforcement Learning Parameters** | `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. | `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. | `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process.
**Datatype:** int. From 8f1a8c752bdf3dc91f415b4d27931e87a0e6611d Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 24 Nov 2022 07:00:12 +0100 Subject: [PATCH 176/421] Add freqairl docker build process --- build_helpers/publish_docker_arm64.sh | 8 ++++++++ build_helpers/publish_docker_multi.sh | 4 ++++ docker/Dockerfile.freqai_rl | 8 ++++++++ docs/freqai-reinforcement-learning.md | 3 ++- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 5 ++--- 5 files changed, 24 insertions(+), 4 deletions(-) create mode 100644 docker/Dockerfile.freqai_rl diff --git a/build_helpers/publish_docker_arm64.sh b/build_helpers/publish_docker_arm64.sh index 4c66f4483..071eb0fa2 100755 --- a/build_helpers/publish_docker_arm64.sh +++ b/build_helpers/publish_docker_arm64.sh @@ -7,11 +7,13 @@ export DOCKER_BUILDKIT=1 TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g") TAG_PLOT=${TAG}_plot TAG_FREQAI=${TAG}_freqai +TAG_FREQAI_RL=${TAG_FREQAI}rl TAG_PI="${TAG}_pi" TAG_ARM=${TAG}_arm TAG_PLOT_ARM=${TAG_PLOT}_arm TAG_FREQAI_ARM=${TAG_FREQAI}_arm +TAG_FREQAI_RL_ARM=${TAG_FREQAI_RL}_arm CACHE_IMAGE=freqtradeorg/freqtrade_cache echo "Running for ${TAG}" @@ -41,9 +43,11 @@ docker tag freqtrade:$TAG_ARM ${CACHE_IMAGE}:$TAG_ARM docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_PLOT_ARM} -f docker/Dockerfile.plot . docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_FREQAI_ARM} -f docker/Dockerfile.freqai . +docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_FREQAI_RL_ARM} -f docker/Dockerfile.freqai_rl . docker tag freqtrade:$TAG_PLOT_ARM ${CACHE_IMAGE}:$TAG_PLOT_ARM docker tag freqtrade:$TAG_FREQAI_ARM ${CACHE_IMAGE}:$TAG_FREQAI_ARM +docker tag freqtrade:$TAG_FREQAI_RL_ARM ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM # Run backtest docker run --rm -v $(pwd)/config_examples/config_bittrex.example.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG_ARM} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3 @@ -58,6 +62,7 @@ docker images # docker push ${IMAGE_NAME} docker push ${CACHE_IMAGE}:$TAG_PLOT_ARM docker push ${CACHE_IMAGE}:$TAG_FREQAI_ARM +docker push ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM docker push ${CACHE_IMAGE}:$TAG_ARM # Create multi-arch image @@ -74,6 +79,9 @@ docker manifest push -p ${IMAGE_NAME}:${TAG_PLOT} docker manifest create ${IMAGE_NAME}:${TAG_FREQAI} ${CACHE_IMAGE}:${TAG_FREQAI_ARM} ${CACHE_IMAGE}:${TAG_FREQAI} docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI} +docker manifest create ${IMAGE_NAME}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL_ARM} ${CACHE_IMAGE}:${TAG_FREQAI_RL} +docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI_RL} + # Tag as latest for develop builds if [ "${TAG}" = "develop" ]; then docker manifest create ${IMAGE_NAME}:latest ${CACHE_IMAGE}:${TAG_ARM} ${IMAGE_NAME}:${TAG_PI} ${CACHE_IMAGE}:${TAG} diff --git a/build_helpers/publish_docker_multi.sh b/build_helpers/publish_docker_multi.sh index c13732003..a608c1282 100755 --- a/build_helpers/publish_docker_multi.sh +++ b/build_helpers/publish_docker_multi.sh @@ -6,6 +6,7 @@ TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g") TAG_PLOT=${TAG}_plot TAG_FREQAI=${TAG}_freqai +TAG_FREQAI_RL=${TAG_FREQAI}rl TAG_PI="${TAG}_pi" PI_PLATFORM="linux/arm/v7" @@ -51,9 +52,11 @@ docker tag freqtrade:$TAG ${CACHE_IMAGE}:$TAG docker build --cache-from freqtrade:${TAG} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG} -t freqtrade:${TAG_PLOT} -f docker/Dockerfile.plot . docker build --cache-from freqtrade:${TAG} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG} -t freqtrade:${TAG_FREQAI} -f docker/Dockerfile.freqai . +docker build --cache-from freqtrade:${TAG_FREQAI} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_FREQAI} -t freqtrade:${TAG_FREQAI_RL} -f docker/Dockerfile.freqai_rl . docker tag freqtrade:$TAG_PLOT ${CACHE_IMAGE}:$TAG_PLOT docker tag freqtrade:$TAG_FREQAI ${CACHE_IMAGE}:$TAG_FREQAI +docker tag freqtrade:$TAG_FREQAI_RL ${CACHE_IMAGE}:$TAG_FREQAI_RL # Run backtest docker run --rm -v $(pwd)/config_examples/config_bittrex.example.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3 @@ -68,6 +71,7 @@ docker images docker push ${CACHE_IMAGE} docker push ${CACHE_IMAGE}:$TAG_PLOT docker push ${CACHE_IMAGE}:$TAG_FREQAI +docker push ${CACHE_IMAGE}:$TAG_FREQAI_RL docker push ${CACHE_IMAGE}:$TAG diff --git a/docker/Dockerfile.freqai_rl b/docker/Dockerfile.freqai_rl new file mode 100644 index 000000000..18fb9afa2 --- /dev/null +++ b/docker/Dockerfile.freqai_rl @@ -0,0 +1,8 @@ +ARG sourceimage=freqtradeorg/freqtrade +ARG sourcetag=develop_freqai +FROM ${sourceimage}:${sourcetag} + +# Install dependencies +COPY requirements-freqai.txt requirements-freqai-rl.txt /freqtrade/ + +RUN pip install -r requirements-freqai-rl.txt --user --no-cache-dir diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 45f29c6ea..0e4388cf1 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -1,7 +1,8 @@ # Reinforcement Learning !!! Note "Installation size" - Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?" Users who prefer docker should ensure they use the docker image appended with `_freqaiRL`. + Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?". + Users who prefer docker should ensure they use the docker image appended with `_freqairl`. ## Background and terminology diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 629633814..16cab4c7d 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -133,8 +133,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): :param data_dictionary: dict = common data dictionary containing train and test features/labels/weights. :param prices_train/test: DataFrame = dataframe comprised of the prices to be used in the - environment during training - or testing + environment during training or testing :param dk: FreqaiDataKitchen = the datakitchen for the current pair """ train_df = data_dictionary["train_features"] @@ -201,7 +200,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: """ Filter the prediction features data and predict with it. - :param: unfiltered_dataframe: Full dataframe for the current backtest period. + :param unfiltered_dataframe: Full dataframe for the current backtest period. :return: :pred_df: dataframe containing the predictions :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove From 44b042ba51aa8827dabd07fe296d3a893c71a421 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 24 Nov 2022 17:53:26 +0100 Subject: [PATCH 177/421] remove unused function --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 0d101ee9c..0d7672b2f 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -18,12 +18,6 @@ class Actions(Enum): Short_exit = 4 -def mean_over_std(x): - std = np.std(x, ddof=1) - mean = np.mean(x) - return mean / std if std > 0 else 0 - - class Base5ActionRLEnv(BaseEnvironment): """ Base class for a 5 action environment From 8855e36f577ba6d2769da545b97709bfc8ef95e2 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 24 Nov 2022 18:16:33 +0100 Subject: [PATCH 178/421] reduce freqai testing time by reducing retrain frequency and number of features --- tests/freqai/conftest.py | 3 +- tests/freqai/test_freqai_interface.py | 18 ++++---- .../ReinforcementLearner_test_4ac.py | 42 +------------------ tests/strategy/strats/freqai_rl_test_strat.py | 19 --------- 4 files changed, 12 insertions(+), 70 deletions(-) diff --git a/tests/freqai/conftest.py b/tests/freqai/conftest.py index 00efad3a7..bee7df27e 100644 --- a/tests/freqai/conftest.py +++ b/tests/freqai/conftest.py @@ -27,10 +27,9 @@ def freqai_conf(default_conf, tmpdir): "timerange": "20180110-20180115", "freqai": { "enabled": True, - "startup_candles": 10000, "purge_old_models": True, "train_period_days": 2, - "backtest_period_days": 2, + "backtest_period_days": 10, "live_retrain_hours": 0, "expiration_hours": 1, "identifier": "uniqe-id100", diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index b379d05d7..335cce519 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -192,13 +192,13 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): @pytest.mark.parametrize( "model, num_files, strat", [ - ("LightGBMRegressor", 6, "freqai_test_strat"), - ("XGBoostRegressor", 6, "freqai_test_strat"), - ("CatboostRegressor", 6, "freqai_test_strat"), - ("ReinforcementLearner", 7, "freqai_rl_test_strat"), - ("XGBoostClassifier", 6, "freqai_test_classifier"), - ("LightGBMClassifier", 6, "freqai_test_classifier"), - ("CatboostClassifier", 6, "freqai_test_classifier") + ("LightGBMRegressor", 2, "freqai_test_strat"), + ("XGBoostRegressor", 2, "freqai_test_strat"), + ("CatboostRegressor", 2, "freqai_test_strat"), + ("ReinforcementLearner", 3, "freqai_rl_test_strat"), + ("XGBoostClassifier", 2, "freqai_test_classifier"), + ("LightGBMClassifier", 2, "freqai_test_classifier"), + ("CatboostClassifier", 2, "freqai_test_classifier") ], ) def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog): @@ -305,7 +305,7 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): freqai.start_backtesting(df, metadata, freqai.dk) model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] - assert len(model_folders) == 6 + assert len(model_folders) == 2 # without deleting the existing folder structure, re-run @@ -333,7 +333,7 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): path = (freqai.dd.full_path / freqai.dk.backtest_predictions_folder) prediction_files = [x for x in path.iterdir() if x.is_file()] - assert len(prediction_files) == 5 + assert len(prediction_files) == 1 shutil.rmtree(Path(freqai.dk.full_path)) diff --git a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py index 13e5af02f..1f40d86d1 100644 --- a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py +++ b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py @@ -1,57 +1,19 @@ import logging -from pathlib import Path -from typing import Any, Dict import numpy as np -import torch as th -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.Base4ActionRLEnv import Actions, Base4ActionRLEnv, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner logger = logging.getLogger(__name__) -class ReinforcementLearner_test_4ac(BaseReinforcementLearningModel): +class ReinforcementLearner_test_4ac(ReinforcementLearner): """ User created Reinforcement Learning Model prediction model. """ - def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): - - train_df = data_dictionary["train_features"] - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[64, 64]) - - if dk.pair not in self.dd.model_dictionary or not self.continual_learning: - model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path( - dk.full_path / "tensorboard" / dk.pair.split('/')[0]), - **self.freqai_info['model_training_parameters'] - ) - else: - logger.info('Continual training activated - starting training from previously ' - 'trained agent.') - model = self.dd.model_dictionary[dk.pair] - model.set_env(self.train_env) - - model.learn( - total_timesteps=int(total_timesteps), - callback=self.eval_callback - ) - - if Path(dk.data_path / "best_model.zip").is_file(): - logger.info('Callback found a best model.') - best_model = self.MODELCLASS.load(dk.data_path / "best_model") - return best_model - - logger.info('Couldnt find best model, using final model instead.') - - return model - class MyRLEnv(Base4ActionRLEnv): """ User can override any function in BaseRLEnv and gym.Env. Here the user diff --git a/tests/strategy/strats/freqai_rl_test_strat.py b/tests/strategy/strats/freqai_rl_test_strat.py index 8d507a6da..f32a4adca 100644 --- a/tests/strategy/strats/freqai_rl_test_strat.py +++ b/tests/strategy/strats/freqai_rl_test_strat.py @@ -19,19 +19,6 @@ class freqai_rl_test_strat(IStrategy): minimal_roi = {"0": 0.1, "240": -1} - plot_config = { - "main_plot": {}, - "subplots": { - "prediction": {"prediction": {"color": "blue"}}, - "target_roi": { - "target_roi": {"color": "brown"}, - }, - "do_predict": { - "do_predict": {"color": "brown"}, - }, - }, - } - process_only_new_candles = True stoploss = -0.05 use_exit_signal = True @@ -50,10 +37,7 @@ class freqai_rl_test_strat(IStrategy): t = int(t) informative[f"%-{pair}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, window=t) - # FIXME: add these outside the user strategy? # The following columns are necessary for RL models. informative[f"%-{pair}raw_close"] = informative["close"] informative[f"%-{pair}raw_open"] = informative["open"] @@ -79,9 +63,6 @@ class freqai_rl_test_strat(IStrategy): # function to populate indicators during training). Notice how we ensure not to # add them multiple times if set_generalized_indicators: - df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 - df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 - # For RL, there are no direct targets to set. This is filler (neutral) # until the agent sends an action. df["&-action"] = 0 From 3a07749fcc47570259649c1107bec0e2a0bab407 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 24 Nov 2022 18:46:54 +0100 Subject: [PATCH 179/421] fix docstring --- .../freqai/RL/BaseReinforcementLearningModel.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 16cab4c7d..bddac23b3 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -166,14 +166,14 @@ class BaseReinforcementLearningModel(IFreqaiModel): def get_state_info(self, pair: str) -> Tuple[float, float, int]: """ - State info during dry/live/backtesting which is fed back + State info during dry/live (not backtesting) which is fed back into the model. - :param: - pair: str = COIN/STAKE to get the environment information for - :returns: - market_side: float = representing short, long, or neutral for + :param pair: str = COIN/STAKE to get the environment information for + :return: + :market_side: float = representing short, long, or neutral for pair - trade_duration: int = the number of candles that the trade has + :current_profit: float = unrealized profit of the current trade + :trade_duration: int = the number of candles that the trade has been open for """ open_trades = Trade.get_trades_proxy(is_open=True) From 00d2a01bf077c0ae140773091553cc74ec0092aa Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 24 Nov 2022 18:57:01 +0100 Subject: [PATCH 180/421] isort --- tests/freqai/test_models/ReinforcementLearner_test_4ac.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py index 1f40d86d1..9861acfd8 100644 --- a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py +++ b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py @@ -2,8 +2,8 @@ import logging import numpy as np -from freqtrade.freqai.RL.Base4ActionRLEnv import Actions, Base4ActionRLEnv, Positions from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner +from freqtrade.freqai.RL.Base4ActionRLEnv import Actions, Base4ActionRLEnv, Positions logger = logging.getLogger(__name__) From 73c458d47b31f59e8a2f841ed650272b53756553 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 24 Nov 2022 19:04:35 +0100 Subject: [PATCH 181/421] use importlib instead of __import___ --- freqtrade/freqai/data_drawer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 96b481074..3b9352efe 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -1,4 +1,5 @@ import collections +import importlib import logging import re import shutil @@ -573,8 +574,8 @@ class FreqaiDataDrawer: from tensorflow import keras model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") elif self.model_type == 'stable_baselines': - mod = __import__('stable_baselines3', fromlist=[ - self.freqai_info['rl_config']['model_type']]) + mod = importlib.import_module( + 'stable_baselines3', self.freqai_info['rl_config']['model_type']) MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model") From 2e82e6784a1cb5b8ebee3bcca8ddbfcd7782a917 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 24 Nov 2022 19:07:38 +0100 Subject: [PATCH 182/421] move data_provider cleanup to shutdown() --- freqtrade/freqai/freqai_interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 19d6b4faa..806dbf6f7 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -168,7 +168,6 @@ class IFreqaiModel(ABC): """ self.model = None self.dk = None - self.data_provider = None def _on_stop(self): """ @@ -185,6 +184,7 @@ class IFreqaiModel(ABC): logger.info("Stopping FreqAI") self._stop_event.set() + self.data_provider = None self._on_stop() logger.info("Waiting on Training iteration") From 101dec461e40c2b8ed15a7075bb4b7dc9099c7b2 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Thu, 24 Nov 2022 11:35:50 -0700 Subject: [PATCH 183/421] close ws channel if can't accept --- freqtrade/rpc/api_server/ws/channel.py | 56 ++++++++++++++------------ 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index ad183ce5b..7343bc306 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -125,9 +125,14 @@ class WebSocketChannel: async def accept(self): """ - Accept the underlying websocket connection + Accept the underlying websocket connection, + if the connection has been closed before we can + accept, just close the channel. """ - return await self._websocket.accept() + try: + return await self._websocket.accept() + except RuntimeError: + await self.close() async def close(self): """ @@ -172,17 +177,18 @@ class WebSocketChannel: :param **kwargs: Any extra kwargs to pass to gather """ - # Wrap the coros into tasks if they aren't already - self._channel_tasks = [ - task if isinstance(task, asyncio.Task) else asyncio.create_task(task) - for task in tasks - ] + if not self.is_closed(): + # Wrap the coros into tasks if they aren't already + self._channel_tasks = [ + task if isinstance(task, asyncio.Task) else asyncio.create_task(task) + for task in tasks + ] - try: - return await asyncio.gather(*self._channel_tasks, **kwargs) - except Exception: - # If an exception occurred, cancel the rest of the tasks - await self.cancel_channel_tasks() + try: + return await asyncio.gather(*self._channel_tasks, **kwargs) + except Exception: + # If an exception occurred, cancel the rest of the tasks + await self.cancel_channel_tasks() async def cancel_channel_tasks(self): """ @@ -191,19 +197,19 @@ class WebSocketChannel: for task in self._channel_tasks: task.cancel() - # Wait for tasks to finish cancelling - try: - await task - except ( - asyncio.CancelledError, - asyncio.TimeoutError, - WebSocketDisconnect, - ConnectionClosed, - RuntimeError - ): - pass - except Exception as e: - logger.info(f"Encountered unknown exception: {e}", exc_info=e) + # Wait for tasks to finish cancelling + try: + await task + except ( + asyncio.CancelledError, + asyncio.TimeoutError, + WebSocketDisconnect, + ConnectionClosed, + RuntimeError + ): + pass + except Exception as e: + logger.info(f"Encountered unknown exception: {e}", exc_info=e) self._channel_tasks = [] From fc59b02255e3b91e8329b6bf02517102b05d0996 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Thu, 24 Nov 2022 13:41:10 -0700 Subject: [PATCH 184/421] prevent ws endpoint from running without valid token --- freqtrade/rpc/api_server/api_auth.py | 2 -- freqtrade/rpc/api_server/api_ws.py | 11 ++++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/freqtrade/rpc/api_server/api_auth.py b/freqtrade/rpc/api_server/api_auth.py index ee66fce2b..71f1145a9 100644 --- a/freqtrade/rpc/api_server/api_auth.py +++ b/freqtrade/rpc/api_server/api_auth.py @@ -81,8 +81,6 @@ async def validate_ws_token( except HTTPException: pass - # No checks passed, deny the connection - logger.debug("Denying websocket request.") # If it doesn't match, close the websocket connection await ws.close(code=status.WS_1008_POLICY_VIOLATION) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index fe2968c05..77950923d 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -97,8 +97,9 @@ async def message_endpoint( rpc: RPC = Depends(get_rpc), message_stream: MessageStream = Depends(get_message_stream) ): - async with create_channel(websocket) as channel: - await channel.run_channel_tasks( - channel_reader(channel, rpc), - channel_broadcaster(channel, message_stream) - ) + if token: + async with create_channel(websocket) as channel: + await channel.run_channel_tasks( + channel_reader(channel, rpc), + channel_broadcaster(channel, message_stream) + ) From 391817243cab8ba944933e19c26280caebf1baf0 Mon Sep 17 00:00:00 2001 From: froggleston Date: Fri, 25 Nov 2022 16:12:15 +0000 Subject: [PATCH 185/421] Tidy up complex functions --- freqtrade/data/entryexitanalysis.py | 31 +++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/freqtrade/data/entryexitanalysis.py b/freqtrade/data/entryexitanalysis.py index b22c3f87e..10969431d 100755 --- a/freqtrade/data/entryexitanalysis.py +++ b/freqtrade/data/entryexitanalysis.py @@ -152,9 +152,30 @@ def _do_group_table_output(bigdf, glist): logger.warning("Invalid group mask specified.") +def _select_rows_within_dates(df, date_start=None, date_end=None): + if (date_start is not None): + df = df.loc[(df['date'] >= date_start)] + + if (date_end is not None): + df = df.loc[(df['date'] < date_end)] + + return df + + +def _select_rows_by_entry_exit_tags(df, enter_reason_list, exit_reason_list): + if enter_reason_list and "all" not in enter_reason_list: + df = df.loc[(df['enter_reason'].isin(enter_reason_list))] + + if exit_reason_list and "all" not in exit_reason_list: + df = df.loc[(df['exit_reason'].isin(exit_reason_list))] + + return df + + def _print_results(analysed_trades, stratname, analysis_groups, enter_reason_list, exit_reason_list, - indicator_list, columns=None): + indicator_list, columns=None, + date_start=None, date_end=None): if columns is None: columns = ['pair', 'open_date', 'close_date', 'profit_abs', 'enter_reason', 'exit_reason'] @@ -162,15 +183,13 @@ def _print_results(analysed_trades, stratname, analysis_groups, for pair, trades in analysed_trades[stratname].items(): bigdf = pd.concat([bigdf, trades], ignore_index=True) + bigdf = _select_rows_within_dates(bigdf, date_start, date_end) + if bigdf.shape[0] > 0 and ('enter_reason' in bigdf.columns): if analysis_groups: _do_group_table_output(bigdf, analysis_groups) - if enter_reason_list and "all" not in enter_reason_list: - bigdf = bigdf.loc[(bigdf['enter_reason'].isin(enter_reason_list))] - - if exit_reason_list and "all" not in exit_reason_list: - bigdf = bigdf.loc[(bigdf['exit_reason'].isin(exit_reason_list))] + bigdf = _select_rows_by_entry_exit_tags(bigdf, enter_reason_list, exit_reason_list) if "all" in indicator_list: print(bigdf) From afc00bc30a94abd64fee000535e66287fd91595f Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 25 Nov 2022 12:48:57 -0700 Subject: [PATCH 186/421] log warning if channel too far behind, add docstrings to message stream --- freqtrade/rpc/api_server/api_ws.py | 11 +++++++++- freqtrade/rpc/api_server/ws/channel.py | 12 ++++++----- freqtrade/rpc/api_server/ws/message_stream.py | 21 ++++++++++++------- 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 77950923d..a80250c1b 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -1,4 +1,5 @@ import logging +import time from typing import Any, Dict from fastapi import APIRouter, Depends @@ -33,8 +34,16 @@ async def channel_broadcaster(channel: WebSocketChannel, message_stream: Message """ Iterate over messages in the message stream and send them """ - async for message in message_stream: + async for message, ts in message_stream: if channel.subscribed_to(message.get('type')): + # Log a warning if this channel is behind + # on the message stream by a lot + if (time.time() - ts) > 60: + logger.warning("Channel {channel} is behind MessageStream by 1 minute," + " this can cause a memory leak if you see this message" + " often, consider reducing pair list size or amount of" + " consumers.") + await channel.send(message, timeout=True) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 7343bc306..a5f3b6216 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -59,6 +59,10 @@ class WebSocketChannel: def remote_addr(self): return self._websocket.remote_addr + @property + def avg_send_time(self): + return sum(self._send_times) / len(self._send_times) + def _calc_send_limit(self): """ Calculate the send high limit for this channel @@ -66,11 +70,9 @@ class WebSocketChannel: # Only update if we have enough data if len(self._send_times) == self._send_times.maxlen: - # At least 1s or twice the average of send times - self._send_high_limit = max( - (sum(self._send_times) / len(self._send_times)) * 2, - 1 - ) + # At least 1s or twice the average of send times, with a + # maximum of 3 seconds per message + self._send_high_limit = min(max(self.avg_send_time * 2, 1), 3) async def send( self, diff --git a/freqtrade/rpc/api_server/ws/message_stream.py b/freqtrade/rpc/api_server/ws/message_stream.py index 9592908ab..a55a0da3c 100644 --- a/freqtrade/rpc/api_server/ws/message_stream.py +++ b/freqtrade/rpc/api_server/ws/message_stream.py @@ -1,4 +1,5 @@ import asyncio +import time class MessageStream: @@ -11,14 +12,20 @@ class MessageStream: self._waiter = self._loop.create_future() def publish(self, message): - waiter, self._waiter = self._waiter, self._loop.create_future() - waiter.set_result((message, self._waiter)) + """ + Publish a message to this MessageStream - async def subscribe(self): + :param message: The message to publish + """ + waiter, self._waiter = self._waiter, self._loop.create_future() + waiter.set_result((message, time.time(), self._waiter)) + + async def __aiter__(self): + """ + Iterate over the messages in the message stream + """ waiter = self._waiter while True: # Shield the future from being cancelled by a task waiting on it - message, waiter = await asyncio.shield(waiter) - yield message - - __aiter__ = subscribe + message, ts, waiter = await asyncio.shield(waiter) + yield message, ts From f268187e9b357127151ae45704538aed6c89f7f5 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 25 Nov 2022 12:56:33 -0700 Subject: [PATCH 187/421] offload initial df computation to thread --- freqtrade/misc.py | 43 ++++++++++++++++++++++++++++++ freqtrade/rpc/api_server/api_ws.py | 3 ++- 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/freqtrade/misc.py b/freqtrade/misc.py index 2d2c7513a..349735dcd 100644 --- a/freqtrade/misc.py +++ b/freqtrade/misc.py @@ -1,9 +1,11 @@ """ Various tool function for Freqtrade and scripts """ +import asyncio import gzip import logging import re +import threading from datetime import datetime from pathlib import Path from typing import Any, Dict, Iterator, List, Mapping, Union @@ -301,3 +303,44 @@ def remove_entry_exit_signals(dataframe: pd.DataFrame): dataframe[SignalTagType.EXIT_TAG.value] = None return dataframe + + +def sync_to_async_iter(iter): + """ + Wrap blocking iterator into an asynchronous by + offloading computation to thread and using + pubsub pattern for yielding results + + :param iter: A synchronous iterator + :returns: An asynchronous iterator + """ + + loop = asyncio.get_event_loop() + q = asyncio.Queue(1) + exception = None + _END = object() + + async def yield_queue_items(): + while True: + next_item = await q.get() + if next_item is _END: + break + yield next_item + if exception is not None: + # The iterator has raised, propagate the exception + raise exception + + def iter_to_queue(): + nonlocal exception + try: + for item in iter: + # This runs outside the event loop thread, so we + # must use thread-safe API to talk to the queue. + asyncio.run_coroutine_threadsafe(q.put(item), loop).result() + except Exception as e: + exception = e + finally: + asyncio.run_coroutine_threadsafe(q.put(_END), loop).result() + + threading.Thread(target=iter_to_queue).start() + return yield_queue_items() diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index a80250c1b..6ecc1ef2a 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -7,6 +7,7 @@ from fastapi.websockets import WebSocket from pydantic import ValidationError from freqtrade.enums import RPCMessageType, RPCRequestType +from freqtrade.misc import sync_to_async_iter from freqtrade.rpc.api_server.api_auth import validate_ws_token from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel @@ -93,7 +94,7 @@ async def _process_consumer_request( limit = min(data.get('limit', 1500), 1500) if data else None # For every pair in the generator, send a separate message - for message in rpc._ws_request_analyzed_df(limit): + async for message in sync_to_async_iter(rpc._ws_request_analyzed_df(limit)): # Format response response = WSAnalyzedDFMessage(data=message) await channel.send(response.dict(exclude_none=True)) From 4aa4c6f49d27aa724ec8a120003c20215aa90195 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 25 Nov 2022 13:08:41 -0700 Subject: [PATCH 188/421] change sleep in channel send to 0 --- freqtrade/rpc/api_server/ws/channel.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index a5f3b6216..76e48d889 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -104,14 +104,9 @@ class WebSocketChannel: logger.info(f"Connection for {self} timed out, disconnecting") raise - # Without this sleep, messages would send to one channel - # first then another after the first one finished and prevent - # any normal Rest API calls from processing at the same time. - # With the sleep call, it gives control to the event - # loop to schedule other channel send methods, and helps - # throttle how fast we send. - # 0.01 = 100 messages/second max throughput - await asyncio.sleep(0.01) + # Explicitly give control back to event loop as + # websockets.send does not + await asyncio.sleep(0) async def recv(self): """ From bd95392eea3c4cdae7c5f97557a359599664ba34 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 25 Nov 2022 13:10:22 -0700 Subject: [PATCH 189/421] fix formatted string in warning message :) --- freqtrade/rpc/api_server/api_ws.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 6ecc1ef2a..9e7bb17a4 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -40,7 +40,7 @@ async def channel_broadcaster(channel: WebSocketChannel, message_stream: Message # Log a warning if this channel is behind # on the message stream by a lot if (time.time() - ts) > 60: - logger.warning("Channel {channel} is behind MessageStream by 1 minute," + logger.warning(f"Channel {channel} is behind MessageStream by 1 minute," " this can cause a memory leak if you see this message" " often, consider reducing pair list size or amount of" " consumers.") From 9f13d99b999047237055d0650812e58fef127ab5 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 11:32:39 +0100 Subject: [PATCH 190/421] improve parameter table, add better documentation for custom calculate_reward, add various helpful notes in docstrings etc --- docs/freqai-parameter-table.md | 37 ++++++--- docs/freqai-reinforcement-learning.md | 111 ++++++++++++++++---------- 2 files changed, 97 insertions(+), 51 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 9e16aec8f..0a71f3ec9 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -6,7 +6,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | Parameter | Description | |------------|-------------| -| | **General configuration parameters** +| | **General configuration parameters within the `config.freqai` tree** | `freqai` | **Required.**
The parent dictionary containing all the parameters for controlling FreqAI.
**Datatype:** Dictionary. | `train_period_days` | **Required.**
Number of days to use for the training data (width of the sliding window).
**Datatype:** Positive integer. | `backtest_period_days` | **Required.**
Number of days to inference from the trained model before sliding the `train_period_days` window defined above, and retraining the model during backtesting (more info [here](freqai-running.md#backtesting)). This can be fractional days, but beware that the provided `timerange` will be divided by this number to yield the number of trainings necessary to complete the backtest.
**Datatype:** Float. @@ -20,7 +20,11 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)).
**Datatype:** Boolean.
Default: `False`. | `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file.
**Datatype:** Boolean.
Default: `False` | `data_kitchen_thread_count` |
Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI)
**Datatype:** Positive integer. -| | **Feature parameters** + + +| Parameter | Description | +|------------|-------------| +| | **Feature parameters within the `freqai.feature_parameters` sub dictionary** | `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](freqai-feature-engineering.md).
**Datatype:** Dictionary. | `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base indicators dataset.
**Datatype:** List of timeframes (strings). | `include_corr_pairlist` | A list of correlated coins that FreqAI will add as additional features to all `pair_whitelist` coins. All indicators set in `populate_any_indicators` during feature engineering (see details [here](freqai-feature-engineering.md)) will be created for each correlated coin. The correlated coins features are added to the base indicators dataset.
**Datatype:** List of assets (strings). @@ -39,16 +43,28 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `noise_standard_deviation` | If set, FreqAI adds noise to the training features with the aim of preventing overfitting. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. `noise_standard_deviation` should be kept relative to the normalized space, i.e., between -1 and 1. In other words, since data in FreqAI is always normalized to be between -1 and 1, `noise_standard_deviation: 0.05` would result in 32% of the data being randomly increased/decreased by more than 2.5% (i.e., the percent of data falling within the first standard deviation).
**Datatype:** Integer.
Default: `0`. | `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset.
**Datatype:** Float.
Default: `30`. | `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it.
**Datatype:** Boolean.
Default: `False` (no reversal). -| | **Data split parameters** + + +| Parameter | Description | +|------------|-------------| +| | **Data split parameters within the `freqai.data_split_parameters` sub dictionary** | `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website).
**Datatype:** Dictionary. | `test_size` | The fraction of data that should be used for testing instead of training.
**Datatype:** Positive float < 1. | `shuffle` | Shuffle the training data points during training. Typically, to not remove the chronological order of data in time-series forecasting, this is set to `False`.
**Datatype:** Boolean.
Defaut: `False`. -| | **Model training parameters** + + +| Parameter | Description | +|------------|-------------| +| | **Model training parameters within the `freqai.model_training_parameters` sub dictionary** | `model_training_parameters` | A flexible dictionary that includes all parameters available by the selected model library. For example, if you use `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If you select a different model, this dictionary can contain any parameter from that model. A list of the currently available models can be found [here](freqai-configuration.md#using-different-prediction-models).
**Datatype:** Dictionary. | `n_estimators` | The number of boosted trees to fit in the training of the model.
**Datatype:** Integer. | `learning_rate` | Boosting learning rate during training of the model.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. -| | **Reinforcement Learning Parameters** + + +| Parameter | Description | +|------------|-------------| +| | **Reinforcement Learning Parameters within the `freqai.rl_config` sub dictionary** | `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. | `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. | `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process.
**Datatype:** int. @@ -56,10 +72,13 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. | `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. | `max_training_drawdown_pct` | The maximum drawdown that the agent is allowed to experience during training.
**Datatype:** float.
Default: 0.8 -| `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not).
**Datatype:** int. +| `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not). Recommended to leave this untouched, by default, this value is set to the total number of physical cores minus 1.
**Datatype:** int. | `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. + +| Parameter | Description | +|------------|-------------| | | **Extraneous parameters** -| `keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards.
**Datatype:** Boolean.
Default: `False`. -| `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction.
**Datatype:** Integer.
Default: `2`. -| `reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI).
**Datatype:** Boolean.
Default: `False`. +| `freqai.keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards.
**Datatype:** Boolean.
Default: `False`. +| `freqai.conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction.
**Datatype:** Integer.
Default: `2`. +| `freqai.reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI).
**Datatype:** Boolean.
Default: `False`. diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 0e4388cf1..48118bb2a 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -154,55 +154,82 @@ In order to configure the `Reinforcement Learner` the following dictionary must Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environment to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link). +!!! Note + If you would like to experiment with `continual_learning`, then you should set that value to `true` in the main `freqai` configuration dictionary. This will tell the Reinforcement Learning library to continue training new models from the final state of previous models, instead of retraining new models from scratch each time a retrain is initiated. + !!! Note Remember that the general `model_training_parameters` dictionary should contain all the model hyperparameter customizations for the particular `model_type`. For example, `PPO` parameters can be found [here](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html). -## Creating the reward +## Creating a custom reward function -As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: +As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `MyRLEnv` class (see below). A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards, but users are encouraged to create their own custom reinforcement learning model class (see below) and save it to `user_data/freqaimodels`. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: ```python - class MyRLEnv(Base5ActionRLEnv): - """ - User made custom environment. This class inherits from BaseEnvironment and gym.env. - Users can override any functions from those parent classes. Here is an example - of a user customized `calculate_reward()` function. - """ - def calculate_reward(self, action): - # first, penalize if the action is not valid - if not self._is_valid(action): - return -2 - pnl = self.get_unrealized_profit() + import from freqtrade.freqai.prediction_models ReinforcementLearner import ReinforcementLearner - factor = 100 - # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ - and self._position == Positions.Neutral: - return 25 - # discourage agent from not entering trades - if action == Actions.Neutral.value and self._position == Positions.Neutral: - return -1 - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) - trade_duration = self._current_tick - self._last_trade_tick - if trade_duration <= max_trade_duration: - factor *= 1.5 - elif trade_duration > max_trade_duration: - factor *= 0.5 - # discourage sitting in position - if self._position in (Positions.Short, Positions.Long) and \ - action == Actions.Neutral.value: - return -1 * trade_duration / max_trade_duration - # close long - if action == Actions.Long_exit.value and self._position == Positions.Long: - if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(pnl * factor) - # close short - if action == Actions.Short_exit.value and self._position == Positions.Short: - if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(pnl * factor) - return 0. + class MyCoolRLModel(ReinforcementLearner): + """ + User created RL prediction model. + + Save this file to `freqtrade/user_data/freqaimodels` + + then use it with: + + freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat + + Here the users can override any of the functions + available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this + is where the user overrides `MyRLEnv` (see below), to define custom + `calculate_reward()` function, or to override any other parts of the environment. + + This class also allows users to override any other part of the IFreqaiModel tree. + For example, the user can override `def fit()` or `def train()` or `def predict()` + to take fine-tuned control over these processes. + + Another common override may be `def data_cleaning_predict()` where the user can + take fine-tuned control over the data handling pipeline. + """ + class MyRLEnv(Base5ActionRLEnv): + """ + User made custom environment. This class inherits from BaseEnvironment and gym.env. + Users can override any functions from those parent classes. Here is an example + of a user customized `calculate_reward()` function. + """ + def calculate_reward(self, action): + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + pnl = self.get_unrealized_profit() + + factor = 100 + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and \ + action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(pnl * factor) + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(pnl * factor) + return 0. ``` ### Using Tensorboard From 8dbfd2cacfcd3dcabf2e4e5b3eddf84269e850f9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 11:51:08 +0100 Subject: [PATCH 191/421] improve docstring clarity about how to inherit from ReinforcementLearner, demonstrate inherittance with ReinforcementLearner_multiproc --- .../prediction_models/ReinforcementLearner.py | 27 ++++++++++- .../ReinforcementLearner_multiproc.py | 45 ++----------------- 2 files changed, 30 insertions(+), 42 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 063af5ff5..dcf7cf54b 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -14,7 +14,32 @@ logger = logging.getLogger(__name__) class ReinforcementLearner(BaseReinforcementLearningModel): """ - User created Reinforcement Learning Model prediction model. + Reinforcement Learning Model prediction model. + + Users can inherit from this class to make their own RL model with custom + environment/training controls. Define the file as follows: + + ``` + from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner + + class MyCoolRLModel(ReinforcementLearner): + ``` + + Save the file to `user_data/freqaimodels`, then run it with: + + freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat + + Here the users can override any of the functions + available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this + is where the user overrides `MyRLEnv` (see below), to define custom + `calculate_reward()` function, or to override any other parts of the environment. + + This class also allows users to override any other part of the IFreqaiModel tree. + For example, the user can override `def fit()` or `def train()` or `def predict()` + to take fine-tuned control over these processes. + + Another common override may be `def data_cleaning_predict()` where the user can + take fine-tuned control over the data handling pipeline. """ def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index baba16066..56636c1f6 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -1,61 +1,24 @@ import logging -from pathlib import Path from typing import Any, Dict # , Tuple # import numpy.typing as npt -import torch as th from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.RL.BaseReinforcementLearningModel import (BaseReinforcementLearningModel, - make_env) +from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner +from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env logger = logging.getLogger(__name__) -class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): +class ReinforcementLearner_multiproc(ReinforcementLearner): """ - User created Reinforcement Learning Model prediction model. + Demonstration of how to build vectorized environments """ - def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): - - train_df = data_dictionary["train_features"] - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - - # model arch - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=self.net_arch) - - if dk.pair not in self.dd.model_dictionary or not self.continual_learning: - model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path( - dk.full_path / "tensorboard" / dk.pair.split('/')[0]), - **self.freqai_info['model_training_parameters'] - ) - else: - logger.info('Continual learning activated - starting training from previously ' - 'trained agent.') - model = self.dd.model_dictionary[dk.pair] - model.set_env(self.train_env) - - model.learn( - total_timesteps=int(total_timesteps), - callback=self.eval_callback - ) - - if Path(dk.data_path / "best_model.zip").is_file(): - logger.info('Callback found a best model.') - best_model = self.MODELCLASS.load(dk.data_path / "best_model") - return best_model - - logger.info('Couldnt find best model, using final model instead.') - - return model - def set_train_and_eval_environments(self, data_dictionary: Dict[str, Any], prices_train: DataFrame, prices_test: DataFrame, dk: FreqaiDataKitchen): From 81fd2e588ff8f97225f45071c59a46d42c88a269 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 12:11:59 +0100 Subject: [PATCH 192/421] ensure typing, remove unsued code --- docs/freqai-reinforcement-learning.md | 2 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 2 +- freqtrade/freqai/RL/BaseEnvironment.py | 74 ++++++++++--------- .../prediction_models/ReinforcementLearner.py | 6 +- .../ReinforcementLearner_test_4ac.py | 6 +- 5 files changed, 46 insertions(+), 44 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 48118bb2a..2a1ffc250 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -195,7 +195,7 @@ As you begin to modify the strategy and the prediction model, you will quickly r Users can override any functions from those parent classes. Here is an example of a user customized `calculate_reward()` function. """ - def calculate_reward(self, action): + def calculate_reward(self, action: int) -> float: # first, penalize if the action is not valid if not self._is_valid(action): return -2 diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 0d7672b2f..8012ff1af 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -158,7 +158,7 @@ class Base5ActionRLEnv(BaseEnvironment): (action == Actions.Long_exit.value and self._position == Positions.Short) or (action == Actions.Long_exit.value and self._position == Positions.Neutral)) - def _is_valid(self, action: int): + def _is_valid(self, action: int) -> bool: # trade signal """ Determine if the signal is valid. diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 6853377cb..7aa571697 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -208,13 +208,13 @@ class BaseEnvironment(gym.Env): """ return - def _is_valid(self, action: int): + def _is_valid(self, action: int) -> bool: """ Determine if the signal is valid.This is unique to the actions in the environment, and therefore must be inherited. """ - return + return True def add_entry_fee(self, price): return price * (1 + self.fee) @@ -230,7 +230,7 @@ class BaseEnvironment(gym.Env): self.history[key].append(value) @abstractmethod - def calculate_reward(self, action): + def calculate_reward(self, action: int) -> float: """ An example reward function. This is the one function that users will likely wish to inject their own creativity into. @@ -263,38 +263,40 @@ class BaseEnvironment(gym.Env): # assumes unit stake and no compounding self._total_profit += pnl - def most_recent_return(self, action: int): - """ - Calculate the tick to tick return if in a trade. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_entry_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_exit_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - def current_price(self) -> float: return self.prices.iloc[self._current_tick].open + + # Keeping around incase we want to start building more complex environment + # templates in the future. + # def most_recent_return(self): + # """ + # Calculate the tick to tick return if in a trade. + # Return is generated from rising prices in Long + # and falling prices in Short positions. + # The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + # """ + # # Long positions + # if self._position == Positions.Long: + # current_price = self.prices.iloc[self._current_tick].open + # previous_price = self.prices.iloc[self._current_tick - 1].open + + # if (self._position_history[self._current_tick - 1] == Positions.Short + # or self._position_history[self._current_tick - 1] == Positions.Neutral): + # previous_price = self.add_entry_fee(previous_price) + + # return np.log(current_price) - np.log(previous_price) + + # # Short positions + # if self._position == Positions.Short: + # current_price = self.prices.iloc[self._current_tick].open + # previous_price = self.prices.iloc[self._current_tick - 1].open + # if (self._position_history[self._current_tick - 1] == Positions.Long + # or self._position_history[self._current_tick - 1] == Positions.Neutral): + # previous_price = self.add_exit_fee(previous_price) + + # return np.log(previous_price) - np.log(current_price) + + # return 0 + + # def update_portfolio_log_returns(self, action): + # self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index dcf7cf54b..61b01e21b 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -89,7 +89,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): sets a custom reward based on profit and trade duration. """ - def calculate_reward(self, action): + def calculate_reward(self, action: int) -> float: """ An example reward function. This is the one function that users will likely wish to inject their own creativity into. @@ -103,7 +103,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return -2 pnl = self.get_unrealized_profit() - factor = 100 + factor = 100. # reward agent for entering trades if (action in (Actions.Long_enter.value, Actions.Short_enter.value) @@ -114,7 +114,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) - trade_duration = self._current_tick - self._last_trade_tick + trade_duration = self._current_tick - self._last_trade_tick # type: ignore if trade_duration <= max_trade_duration: factor *= 1.5 diff --git a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py index 9861acfd8..29e3e3b64 100644 --- a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py +++ b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py @@ -20,7 +20,7 @@ class ReinforcementLearner_test_4ac(ReinforcementLearner): sets a custom reward based on profit and trade duration. """ - def calculate_reward(self, action): + def calculate_reward(self, action: int) -> float: # first, penalize if the action is not valid if not self._is_valid(action): @@ -28,7 +28,7 @@ class ReinforcementLearner_test_4ac(ReinforcementLearner): pnl = self.get_unrealized_profit() rew = np.sign(pnl) * (pnl + 1) - factor = 100 + factor = 100. # reward agent for entering trades if (action in (Actions.Long_enter.value, Actions.Short_enter.value) @@ -39,7 +39,7 @@ class ReinforcementLearner_test_4ac(ReinforcementLearner): return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) - trade_duration = self._current_tick - self._last_trade_tick + trade_duration = self._current_tick - self._last_trade_tick # type: ignore if trade_duration <= max_trade_duration: factor *= 1.5 From bdfedb5fcb02b88c600ef25c88bbb5d939b8bd0a Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 26 Nov 2022 13:03:07 +0100 Subject: [PATCH 193/421] Improve typehints / reduce warnings from mypy --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 4 ++-- freqtrade/freqai/RL/Base5ActionRLEnv.py | 2 +- freqtrade/freqai/RL/BaseEnvironment.py | 4 ++-- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 0c719ea92..1a235801c 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -103,7 +103,7 @@ class Base4ActionRLEnv(BaseEnvironment): return observation, step_reward, self._done, info - def is_tradesignal(self, action: int): + def is_tradesignal(self, action: int) -> bool: """ Determine if the signal is a trade signal e.g.: agent wants a Actions.Long_exit while it is in a Positions.short @@ -117,7 +117,7 @@ class Base4ActionRLEnv(BaseEnvironment): (action == Actions.Long_enter.value and self._position == Positions.Long) or (action == Actions.Long_enter.value and self._position == Positions.Short)) - def _is_valid(self, action: int): + def _is_valid(self, action: int) -> bool: """ Determine if the signal is valid. e.g.: agent wants a Actions.Long_exit while it is in a Positions.short diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 8012ff1af..61abb8031 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -141,7 +141,7 @@ class Base5ActionRLEnv(BaseEnvironment): else: return self._current_tick - self._last_trade_tick - def is_tradesignal(self, action: int): + def is_tradesignal(self, action: int) -> bool: """ Determine if the signal is a trade signal e.g.: agent wants a Actions.Long_exit while it is in a Positions.short diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 7aa571697..3332e5a18 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -200,13 +200,13 @@ class BaseEnvironment(gym.Env): return 0. @abstractmethod - def is_tradesignal(self, action: int): + def is_tradesignal(self, action: int) -> bool: """ Determine if the signal is a trade signal. This is unique to the actions in the environment, and therefore must be inherited. """ - return + return True def _is_valid(self, action: int) -> bool: """ diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index bddac23b3..af9874d90 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -38,7 +38,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): User created Reinforcement Learning Model prediction class """ - def __init__(self, **kwargs): + def __init__(self, **kwargs) -> None: super().__init__(config=kwargs['config']) self.max_threads = min(self.freqai_info['rl_config'].get( 'cpu_count', 1), max(int(self.max_system_threads / 2), 1)) From cf2f12b47277ad8289cb92a67e8a198cf0cb59e4 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 26 Nov 2022 13:06:21 +0100 Subject: [PATCH 194/421] Headers between Tables -> Tables can be jumped to directly --- docs/freqai-parameter-table.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 0a71f3ec9..084c9118c 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -4,6 +4,8 @@ The table below will list all configuration parameters available for FreqAI. Som Mandatory parameters are marked as **Required** and have to be set in one of the suggested ways. +### General configuration parameters + | Parameter | Description | |------------|-------------| | | **General configuration parameters within the `config.freqai` tree** @@ -21,6 +23,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file.
**Datatype:** Boolean.
Default: `False` | `data_kitchen_thread_count` |
Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI)
**Datatype:** Positive integer. +### Feature parameters | Parameter | Description | |------------|-------------| @@ -44,6 +47,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset.
**Datatype:** Float.
Default: `30`. | `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it.
**Datatype:** Boolean.
Default: `False` (no reversal). +### Data split parameters | Parameter | Description | |------------|-------------| @@ -52,6 +56,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `test_size` | The fraction of data that should be used for testing instead of training.
**Datatype:** Positive float < 1. | `shuffle` | Shuffle the training data points during training. Typically, to not remove the chronological order of data in time-series forecasting, this is set to `False`.
**Datatype:** Boolean.
Defaut: `False`. +### Model training parameters | Parameter | Description | |------------|-------------| @@ -61,6 +66,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `learning_rate` | Boosting learning rate during training of the model.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. +### Reinforcement Learning parameters | Parameter | Description | |------------|-------------| @@ -76,6 +82,8 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. +### Additional parameters + | Parameter | Description | |------------|-------------| | | **Extraneous parameters** From 8660ac9aa0010950a1f9227fe6c6048b3524ba84 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 26 Nov 2022 13:12:44 +0100 Subject: [PATCH 195/421] Fix import in docs --- docs/freqai-reinforcement-learning.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 2a1ffc250..6bcba96ff 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -165,7 +165,8 @@ Parameter details can be found [here](freqai-parameter-table.md), but in general As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `MyRLEnv` class (see below). A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards, but users are encouraged to create their own custom reinforcement learning model class (see below) and save it to `user_data/freqaimodels`. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: ```python - import from freqtrade.freqai.prediction_models ReinforcementLearner import ReinforcementLearner + from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner + from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv class MyCoolRLModel(ReinforcementLearner): """ From 7ebc8ee169afc3f8668e682c09712560152eb5d3 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 26 Nov 2022 13:32:18 +0100 Subject: [PATCH 196/421] Fix missing Optional typehint --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index af9874d90..709ded048 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -3,7 +3,7 @@ import logging from abc import abstractmethod from datetime import datetime, timezone from pathlib import Path -from typing import Any, Callable, Dict, Tuple, Type, Union +from typing import Any, Callable, Dict, Optional, Tuple, Type, Union import gym import numpy as np @@ -46,7 +46,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] self.train_env: Union[SubprocVecEnv, gym.Env] = None self.eval_env: Union[SubprocVecEnv, gym.Env] = None - self.eval_callback: EvalCallback = None + self.eval_callback: Optional[EvalCallback] = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] self.continual_learning = self.freqai_info.get('continual_learning', False) From 7b0a76fb7010eac44d7000626d9f167201b87f1a Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 25 Nov 2022 10:41:37 +0100 Subject: [PATCH 197/421] Improve typehint --- freqtrade/rpc/api_server/webserver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index e4eb3895d..92bded1c5 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -1,6 +1,6 @@ import logging from ipaddress import IPv4Address -from typing import Any, Dict +from typing import Any, Dict, Optional import orjson import uvicorn @@ -46,7 +46,7 @@ class ApiServer(RPCHandler): # Exchange - only available in webserver mode. _exchange = None # websocket message stuff - _message_stream = None + _message_stream: Optional[MessageStream] = None def __new__(cls, *args, **kwargs): """ From fcf13580f14aea8e889eaf1af82140eb17596d5c Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 26 Nov 2022 13:33:54 +0100 Subject: [PATCH 198/421] Revert "offload initial df computation to thread" This reverts commit f268187e9b357127151ae45704538aed6c89f7f5. --- freqtrade/misc.py | 43 ------------------------------ freqtrade/rpc/api_server/api_ws.py | 3 +-- 2 files changed, 1 insertion(+), 45 deletions(-) diff --git a/freqtrade/misc.py b/freqtrade/misc.py index 349735dcd..2d2c7513a 100644 --- a/freqtrade/misc.py +++ b/freqtrade/misc.py @@ -1,11 +1,9 @@ """ Various tool function for Freqtrade and scripts """ -import asyncio import gzip import logging import re -import threading from datetime import datetime from pathlib import Path from typing import Any, Dict, Iterator, List, Mapping, Union @@ -303,44 +301,3 @@ def remove_entry_exit_signals(dataframe: pd.DataFrame): dataframe[SignalTagType.EXIT_TAG.value] = None return dataframe - - -def sync_to_async_iter(iter): - """ - Wrap blocking iterator into an asynchronous by - offloading computation to thread and using - pubsub pattern for yielding results - - :param iter: A synchronous iterator - :returns: An asynchronous iterator - """ - - loop = asyncio.get_event_loop() - q = asyncio.Queue(1) - exception = None - _END = object() - - async def yield_queue_items(): - while True: - next_item = await q.get() - if next_item is _END: - break - yield next_item - if exception is not None: - # The iterator has raised, propagate the exception - raise exception - - def iter_to_queue(): - nonlocal exception - try: - for item in iter: - # This runs outside the event loop thread, so we - # must use thread-safe API to talk to the queue. - asyncio.run_coroutine_threadsafe(q.put(item), loop).result() - except Exception as e: - exception = e - finally: - asyncio.run_coroutine_threadsafe(q.put(_END), loop).result() - - threading.Thread(target=iter_to_queue).start() - return yield_queue_items() diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 9e7bb17a4..e183cd7e7 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -7,7 +7,6 @@ from fastapi.websockets import WebSocket from pydantic import ValidationError from freqtrade.enums import RPCMessageType, RPCRequestType -from freqtrade.misc import sync_to_async_iter from freqtrade.rpc.api_server.api_auth import validate_ws_token from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel @@ -94,7 +93,7 @@ async def _process_consumer_request( limit = min(data.get('limit', 1500), 1500) if data else None # For every pair in the generator, send a separate message - async for message in sync_to_async_iter(rpc._ws_request_analyzed_df(limit)): + for message in rpc._ws_request_analyzed_df(limit): # Format response response = WSAnalyzedDFMessage(data=message) await channel.send(response.dict(exclude_none=True)) From aaaa5a5f64dd4b1dec7d81fa0f1e7e2ede11f963 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 13:44:03 +0100 Subject: [PATCH 199/421] add documentation for net_arch, other small changes --- docs/freqai-parameter-table.md | 1 + docs/freqai-reinforcement-learning.md | 2 +- freqtrade/constants.py | 1 + freqtrade/freqai/RL/Base5ActionRLEnv.py | 31 ------------------------- 4 files changed, 3 insertions(+), 32 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 084c9118c..02426ec13 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -81,6 +81,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not). Recommended to leave this untouched, by default, this value is set to the total number of physical cores minus 1.
**Datatype:** int. | `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. +| `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[, dict(vf=[], pi=[])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each. ### Additional parameters diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 6bcba96ff..241ccc3e2 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -34,7 +34,7 @@ Setting up and running a Reinforcement Learning model is the same as running a R freqtrade trade --freqaimodel ReinforcementLearner --strategy MyRLStrategy --config config.json ``` -where `ReinforcementLearner` will use the templated `ReinforcementLearner` from `freqai/prediction_models/ReinforcementLearner`. The strategy, on the other hand, follows the same base [feature engineering](freqai-feature-engineering.md) with `populate_any_indicators` as a typical Regressor: +where `ReinforcementLearner` will use the templated `ReinforcementLearner` from `freqai/prediction_models/ReinforcementLearner` (or a custom user defined one located in `user_data/freqaimodels`). The strategy, on the other hand, follows the same base [feature engineering](freqai-feature-engineering.md) with `populate_any_indicators` as a typical Regressor: ```python def populate_any_indicators( diff --git a/freqtrade/constants.py b/freqtrade/constants.py index ba43e1328..3d7dbb13e 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -590,6 +590,7 @@ CONF_SCHEMA = { "cpu_count": {"type": "integer", "default": 1}, "model_type": {"type": "string", "default": "PPO"}, "policy_type": {"type": "string", "default": "MlpPolicy"}, + "net_arch": {"type": "list", "default": [128, 128]}, "model_reward_parameters": { "type": "object", "properties": { diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 61abb8031..ee43ac868 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -26,31 +26,6 @@ class Base5ActionRLEnv(BaseEnvironment): def set_action_space(self): self.action_space = spaces.Discrete(len(Actions)) - def reset(self): - - self._done = False - - if self.starting_point is True: - self._position_history = (self._start_tick * [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - self._total_unrealized_profit = 1 - - return self._get_observation() - def step(self, action: int): """ Logic for a single step (incrementing one candle in time) @@ -135,12 +110,6 @@ class Base5ActionRLEnv(BaseEnvironment): return observation, step_reward, self._done, info - def get_trade_duration(self): - if self._last_trade_tick is None: - return 0 - else: - return self._current_tick - self._last_trade_tick - def is_tradesignal(self, action: int) -> bool: """ Determine if the signal is a trade signal From be890b52fdb5afaaf12c1e8b7c0be52f83522935 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 13:44:48 +0100 Subject: [PATCH 200/421] remove np import --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 1 - 1 file changed, 1 deletion(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index ee43ac868..68b2e011b 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -1,7 +1,6 @@ import logging from enum import Enum -import numpy as np from gym import spaces from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions From b52f05923aac4fed453f03e3eae133884909038f Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 13:47:47 +0100 Subject: [PATCH 201/421] fix list to array in constants.py --- freqtrade/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 3d7dbb13e..878c38929 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -590,7 +590,7 @@ CONF_SCHEMA = { "cpu_count": {"type": "integer", "default": 1}, "model_type": {"type": "string", "default": "PPO"}, "policy_type": {"type": "string", "default": "MlpPolicy"}, - "net_arch": {"type": "list", "default": [128, 128]}, + "net_arch": {"type": "array", "default": [128, 128]}, "model_reward_parameters": { "type": "object", "properties": { From a26b3a9ca8031753f406df690abd638b09ca8d31 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sat, 26 Nov 2022 09:40:22 -0700 Subject: [PATCH 202/421] change sleep call back to 0.01 --- freqtrade/rpc/api_server/ws/channel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 76e48d889..c50aff8be 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -106,7 +106,7 @@ class WebSocketChannel: # Explicitly give control back to event loop as # websockets.send does not - await asyncio.sleep(0) + await asyncio.sleep(0.01) async def recv(self): """ From 4790aaaae1eaa85657674d91b48621539af77711 Mon Sep 17 00:00:00 2001 From: froggleston Date: Sat, 26 Nov 2022 16:58:56 +0000 Subject: [PATCH 203/421] Implement cli options for backtesting-analysis date filtering --- docs/advanced-backtesting.md | 15 ++++ docs/utils.md | 8 ++ freqtrade/commands/analyze_commands.py | 8 +- freqtrade/commands/arguments.py | 3 +- freqtrade/commands/cli_options.py | 10 +++ freqtrade/configuration/configuration.py | 6 ++ freqtrade/data/entryexitanalysis.py | 95 ++++++++++++++---------- tests/data/test_entryexitanalysis.py | 9 +++ 8 files changed, 107 insertions(+), 47 deletions(-) diff --git a/docs/advanced-backtesting.md b/docs/advanced-backtesting.md index 5c2500f18..78e692f84 100644 --- a/docs/advanced-backtesting.md +++ b/docs/advanced-backtesting.md @@ -100,3 +100,18 @@ freqtrade backtesting-analysis -c --analysis-groups 0 2 --enter-re The indicators have to be present in your strategy's main DataFrame (either for your main timeframe or for informative timeframes) otherwise they will simply be ignored in the script output. + +### Filtering the trade output by date + +To show only trades between dates within your backtested timerange, supply the following option(s) in YYYYMMDD format: + +``` +--analysis-date-start : Start date to filter output trades, inclusive. e.g. 20220101 +--analysis-date-end : End date to filter output trades, exclusive. e.g. 20220131 +``` + +For example, if your backtest timerange was `20220101-20221231` but you only want to output trades in January: + +```bash +freqtrade backtesting-analysis -c --analysis-date-start 20220101 --analysis-date-end 20220201 +``` diff --git a/docs/utils.md b/docs/utils.md index 3d8a3bd03..e88a13a9a 100644 --- a/docs/utils.md +++ b/docs/utils.md @@ -722,6 +722,8 @@ usage: freqtrade backtesting-analysis [-h] [-v] [--logfile FILE] [-V] [--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]] [--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]] [--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]] + [--analysis-date-start YYYYMMDD] + [--analysis-date-end YYYYMMDD] optional arguments: -h, --help show this help message and exit @@ -744,6 +746,12 @@ optional arguments: --indicator-list INDICATOR_LIST [INDICATOR_LIST ...] Comma separated list of indicators to analyse. e.g. 'close,rsi,bb_lowerband,profit_abs' + --analysis-date-start YYYYMMDD + Start date to filter trades for analysis (inclusive). e.g. + 20220101 + --analysis-date-end YYYYMMDD + End date to filter trades for analysis (exclusive). e.g. + 20220131 Common arguments: -v, --verbose Verbose mode (-vv for more, -vvv to get all messages). diff --git a/freqtrade/commands/analyze_commands.py b/freqtrade/commands/analyze_commands.py index b6b790788..20afa7ffd 100755 --- a/freqtrade/commands/analyze_commands.py +++ b/freqtrade/commands/analyze_commands.py @@ -60,10 +60,4 @@ def start_analysis_entries_exits(args: Dict[str, Any]) -> None: logger.info('Starting freqtrade in analysis mode') - process_entry_exit_reasons(config['exportfilename'], - config['exchange']['pair_whitelist'], - config['analysis_groups'], - config['enter_reason_list'], - config['exit_reason_list'], - config['indicator_list'] - ) + process_entry_exit_reasons(config) diff --git a/freqtrade/commands/arguments.py b/freqtrade/commands/arguments.py index 79ab9dafa..159b18439 100644 --- a/freqtrade/commands/arguments.py +++ b/freqtrade/commands/arguments.py @@ -106,7 +106,8 @@ ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperop "disableparamexport", "backtest_breakdown"] ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list", - "exit_reason_list", "indicator_list"] + "exit_reason_list", "indicator_list", + "analysis_date_start", "analysis_date_end"] NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes", "list-markets", "list-pairs", "list-strategies", "list-freqaimodels", diff --git a/freqtrade/commands/cli_options.py b/freqtrade/commands/cli_options.py index 91ac16365..0592b0e53 100644 --- a/freqtrade/commands/cli_options.py +++ b/freqtrade/commands/cli_options.py @@ -658,6 +658,16 @@ AVAILABLE_CLI_OPTIONS = { nargs='+', default=[], ), + "analysis_date_start": Arg( + "--analysis-date-start", + help=("Start date to filter trades for analysis (inclusive). " + "e.g. '20220101'"), + ), + "analysis_date_end": Arg( + "--analysis-date-end", + help=("End date to filter trades for analysis (exclusive). " + "e.g. '20220131'"), + ), "freqaimodel": Arg( '--freqaimodel', help='Specify a custom freqaimodels.', diff --git a/freqtrade/configuration/configuration.py b/freqtrade/configuration/configuration.py index 4929c023d..4e8abf48e 100644 --- a/freqtrade/configuration/configuration.py +++ b/freqtrade/configuration/configuration.py @@ -462,6 +462,12 @@ class Configuration: self._args_to_config(config, argname='indicator_list', logstring='Analysis indicator list: {}') + self._args_to_config(config, argname='analysis_date_start', + logstring='Analysis filter start date: {}') + + self._args_to_config(config, argname='analysis_date_end', + logstring='Analysis filter end date: {}') + def _process_runmode(self, config: Config) -> None: self._args_to_config(config, argname='dry_run', diff --git a/freqtrade/data/entryexitanalysis.py b/freqtrade/data/entryexitanalysis.py index 10969431d..77f14d0c6 100755 --- a/freqtrade/data/entryexitanalysis.py +++ b/freqtrade/data/entryexitanalysis.py @@ -1,11 +1,12 @@ import logging +from datetime import datetime from pathlib import Path -from typing import List, Optional import joblib import pandas as pd from tabulate import tabulate +from freqtrade.constants import Config from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backtest_data, load_backtest_stats) from freqtrade.exceptions import OperationalException @@ -153,55 +154,64 @@ def _do_group_table_output(bigdf, glist): def _select_rows_within_dates(df, date_start=None, date_end=None): + dtfmt = "%Y%m%d" + try: + bool(datetime.strptime(date_start, dtfmt)) + bool(datetime.strptime(date_end, dtfmt)) + except ValueError: + logger.error("Invalid start and/or end date provided. Use YYYYMMDD.") + return None + except TypeError: + return df + if (date_start is not None): df = df.loc[(df['date'] >= date_start)] if (date_end is not None): df = df.loc[(df['date'] < date_end)] - return df -def _select_rows_by_entry_exit_tags(df, enter_reason_list, exit_reason_list): +def _select_rows_by_tags(df, enter_reason_list, exit_reason_list): if enter_reason_list and "all" not in enter_reason_list: df = df.loc[(df['enter_reason'].isin(enter_reason_list))] if exit_reason_list and "all" not in exit_reason_list: df = df.loc[(df['exit_reason'].isin(exit_reason_list))] - return df -def _print_results(analysed_trades, stratname, analysis_groups, - enter_reason_list, exit_reason_list, - indicator_list, columns=None, - date_start=None, date_end=None): - if columns is None: - columns = ['pair', 'open_date', 'close_date', 'profit_abs', 'enter_reason', 'exit_reason'] - - bigdf = pd.DataFrame() +def prepare_results(analysed_trades, stratname, + enter_reason_list, exit_reason_list, + date_start=None, date_end=None): + res_df = pd.DataFrame() for pair, trades in analysed_trades[stratname].items(): - bigdf = pd.concat([bigdf, trades], ignore_index=True) + res_df = pd.concat([res_df, trades], ignore_index=True) - bigdf = _select_rows_within_dates(bigdf, date_start, date_end) + res_df = _select_rows_within_dates(res_df, date_start, date_end) - if bigdf.shape[0] > 0 and ('enter_reason' in bigdf.columns): + if res_df is not None and res_df.shape[0] > 0 and ('enter_reason' in res_df.columns): + res_df = _select_rows_by_tags(res_df, enter_reason_list, exit_reason_list) + + return res_df + + +def print_results(res_df, analysis_groups, indicator_list): + if res_df.shape[0] > 0: if analysis_groups: - _do_group_table_output(bigdf, analysis_groups) - - bigdf = _select_rows_by_entry_exit_tags(bigdf, enter_reason_list, exit_reason_list) + _do_group_table_output(res_df, analysis_groups) if "all" in indicator_list: - print(bigdf) + print(res_df) elif indicator_list is not None: available_inds = [] for ind in indicator_list: - if ind in bigdf: + if ind in res_df: available_inds.append(ind) ilist = ["pair", "enter_reason", "exit_reason"] + available_inds - _print_table(bigdf[ilist], sortcols=['exit_reason'], show_index=False) + _print_table(res_df[ilist], sortcols=['exit_reason'], show_index=False) else: - print("\\_ No trades to show") + print("\\No trades to show") def _print_table(df, sortcols=None, show_index=False): @@ -220,27 +230,34 @@ def _print_table(df, sortcols=None, show_index=False): ) -def process_entry_exit_reasons(backtest_dir: Path, - pairlist: List[str], - analysis_groups: Optional[List[str]] = ["0", "1", "2"], - enter_reason_list: Optional[List[str]] = ["all"], - exit_reason_list: Optional[List[str]] = ["all"], - indicator_list: Optional[List[str]] = []): +def process_entry_exit_reasons(config: Config): try: - backtest_stats = load_backtest_stats(backtest_dir) + analysis_groups = config.get('analysis_groups', []) + enter_reason_list = config.get('enter_reason_list', ["all"]) + exit_reason_list = config.get('exit_reason_list', ["all"]) + indicator_list = config.get('indicator_list', []) + analysis_date_start = config.get('analysis_date_start', None) + analysis_date_end = config.get('analysis_date_end', None) + + backtest_stats = load_backtest_stats(config['exportfilename']) + for strategy_name, results in backtest_stats['strategy'].items(): - trades = load_backtest_data(backtest_dir, strategy_name) + trades = load_backtest_data(config['exportfilename'], strategy_name) if not trades.empty: - signal_candles = _load_signal_candles(backtest_dir) - analysed_trades_dict = _process_candles_and_indicators(pairlist, strategy_name, - trades, signal_candles) - _print_results(analysed_trades_dict, - strategy_name, - analysis_groups, - enter_reason_list, - exit_reason_list, - indicator_list) + signal_candles = _load_signal_candles(config['exportfilename']) + analysed_trades_dict = _process_candles_and_indicators( + config['exchange']['pair_whitelist'], strategy_name, + trades, signal_candles) + + res_df = prepare_results(analysed_trades_dict, strategy_name, + enter_reason_list, exit_reason_list, + date_start=analysis_date_start, + date_end=analysis_date_end) + + print_results(res_df, + analysis_groups, + indicator_list) except ValueError as e: raise OperationalException(e) from e diff --git a/tests/data/test_entryexitanalysis.py b/tests/data/test_entryexitanalysis.py index 588220465..8daca1a67 100755 --- a/tests/data/test_entryexitanalysis.py +++ b/tests/data/test_entryexitanalysis.py @@ -189,3 +189,12 @@ def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, tmp assert '0.5' in captured.out assert '1' in captured.out assert '2.5' in captured.out + + # test date filtering + args = get_args(base_args + + ['--analysis-date-start', "20180129", + '--analysis-date-end', "20180130"] + ) + start_analysis_entries_exits(args) + captured = capsys.readouterr() + assert 'enter_tag_long_b' not in captured.out From 51d21b413da418444d54a906f492a6a0999fef7f Mon Sep 17 00:00:00 2001 From: stm <37817561+initrv@users.noreply.github.com> Date: Sat, 26 Nov 2022 23:35:20 +0300 Subject: [PATCH 204/421] Fix 4ac update_total_profit _update_total_profit() must be executed before "self._position = Positions.Neutral" because _update_total_profit() calls get_unrealized_profit(), which returns 0 if position is neutral and total_profit is not updated --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 1a235801c..df4e79bea 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -73,8 +73,8 @@ class Base4ActionRLEnv(BaseEnvironment): trade_type = "short" self._last_trade_tick = self._current_tick elif action == Actions.Exit.value: - self._position = Positions.Neutral self._update_total_profit() + self._position = Positions.Neutral trade_type = "neutral" self._last_trade_tick = None else: From 1a3f88c7b93e10b65869cef1adb6e023f650d916 Mon Sep 17 00:00:00 2001 From: froggleston Date: Sun, 27 Nov 2022 11:30:13 +0000 Subject: [PATCH 205/421] Replace separate start/end date option with usual timerange option --- docs/advanced-backtesting.md | 7 ++--- freqtrade/commands/arguments.py | 3 +- freqtrade/commands/cli_options.py | 10 ------- freqtrade/configuration/configuration.py | 7 ++--- freqtrade/data/entryexitanalysis.py | 36 +++++++++--------------- tests/data/test_entryexitanalysis.py | 6 ++-- 6 files changed, 21 insertions(+), 48 deletions(-) diff --git a/docs/advanced-backtesting.md b/docs/advanced-backtesting.md index 78e692f84..ae3eb2e4e 100644 --- a/docs/advanced-backtesting.md +++ b/docs/advanced-backtesting.md @@ -103,15 +103,14 @@ output. ### Filtering the trade output by date -To show only trades between dates within your backtested timerange, supply the following option(s) in YYYYMMDD format: +To show only trades between dates within your backtested timerange, supply the usual `timerange` option in `YYYYMMDD-[YYYYMMDD]` format: ``` ---analysis-date-start : Start date to filter output trades, inclusive. e.g. 20220101 ---analysis-date-end : End date to filter output trades, exclusive. e.g. 20220131 +--timerange : Timerange to filter output trades, start date inclusive, end date exclusive. e.g. 20220101-20221231 ``` For example, if your backtest timerange was `20220101-20221231` but you only want to output trades in January: ```bash -freqtrade backtesting-analysis -c --analysis-date-start 20220101 --analysis-date-end 20220201 +freqtrade backtesting-analysis -c --timerange 20220101-20220201 ``` diff --git a/freqtrade/commands/arguments.py b/freqtrade/commands/arguments.py index 159b18439..b53a1022d 100644 --- a/freqtrade/commands/arguments.py +++ b/freqtrade/commands/arguments.py @@ -106,8 +106,7 @@ ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperop "disableparamexport", "backtest_breakdown"] ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list", - "exit_reason_list", "indicator_list", - "analysis_date_start", "analysis_date_end"] + "exit_reason_list", "indicator_list", "timerange"] NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes", "list-markets", "list-pairs", "list-strategies", "list-freqaimodels", diff --git a/freqtrade/commands/cli_options.py b/freqtrade/commands/cli_options.py index 0592b0e53..91ac16365 100644 --- a/freqtrade/commands/cli_options.py +++ b/freqtrade/commands/cli_options.py @@ -658,16 +658,6 @@ AVAILABLE_CLI_OPTIONS = { nargs='+', default=[], ), - "analysis_date_start": Arg( - "--analysis-date-start", - help=("Start date to filter trades for analysis (inclusive). " - "e.g. '20220101'"), - ), - "analysis_date_end": Arg( - "--analysis-date-end", - help=("End date to filter trades for analysis (exclusive). " - "e.g. '20220131'"), - ), "freqaimodel": Arg( '--freqaimodel', help='Specify a custom freqaimodels.', diff --git a/freqtrade/configuration/configuration.py b/freqtrade/configuration/configuration.py index 4e8abf48e..664610f33 100644 --- a/freqtrade/configuration/configuration.py +++ b/freqtrade/configuration/configuration.py @@ -462,11 +462,8 @@ class Configuration: self._args_to_config(config, argname='indicator_list', logstring='Analysis indicator list: {}') - self._args_to_config(config, argname='analysis_date_start', - logstring='Analysis filter start date: {}') - - self._args_to_config(config, argname='analysis_date_end', - logstring='Analysis filter end date: {}') + self._args_to_config(config, argname='timerange', + logstring='Filter trades by timerange: {}') def _process_runmode(self, config: Config) -> None: diff --git a/freqtrade/data/entryexitanalysis.py b/freqtrade/data/entryexitanalysis.py index 77f14d0c6..565a279b1 100755 --- a/freqtrade/data/entryexitanalysis.py +++ b/freqtrade/data/entryexitanalysis.py @@ -1,11 +1,11 @@ import logging -from datetime import datetime from pathlib import Path import joblib import pandas as pd from tabulate import tabulate +from freqtrade.configuration import TimeRange from freqtrade.constants import Config from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backtest_data, load_backtest_stats) @@ -153,22 +153,12 @@ def _do_group_table_output(bigdf, glist): logger.warning("Invalid group mask specified.") -def _select_rows_within_dates(df, date_start=None, date_end=None): - dtfmt = "%Y%m%d" - try: - bool(datetime.strptime(date_start, dtfmt)) - bool(datetime.strptime(date_end, dtfmt)) - except ValueError: - logger.error("Invalid start and/or end date provided. Use YYYYMMDD.") - return None - except TypeError: - return df - - if (date_start is not None): - df = df.loc[(df['date'] >= date_start)] - - if (date_end is not None): - df = df.loc[(df['date'] < date_end)] +def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'): + if timerange: + if timerange.starttype == 'date': + df = df.loc[(df[df_date_col] >= timerange.startdt)] + if timerange.stoptype == 'date': + df = df.loc[(df[df_date_col] < timerange.stopdt)] return df @@ -183,12 +173,12 @@ def _select_rows_by_tags(df, enter_reason_list, exit_reason_list): def prepare_results(analysed_trades, stratname, enter_reason_list, exit_reason_list, - date_start=None, date_end=None): + timerange=None): res_df = pd.DataFrame() for pair, trades in analysed_trades[stratname].items(): res_df = pd.concat([res_df, trades], ignore_index=True) - res_df = _select_rows_within_dates(res_df, date_start, date_end) + res_df = _select_rows_within_dates(res_df, timerange) if res_df is not None and res_df.shape[0] > 0 and ('enter_reason' in res_df.columns): res_df = _select_rows_by_tags(res_df, enter_reason_list, exit_reason_list) @@ -236,8 +226,9 @@ def process_entry_exit_reasons(config: Config): enter_reason_list = config.get('enter_reason_list', ["all"]) exit_reason_list = config.get('exit_reason_list', ["all"]) indicator_list = config.get('indicator_list', []) - analysis_date_start = config.get('analysis_date_start', None) - analysis_date_end = config.get('analysis_date_end', None) + + timerange = TimeRange.parse_timerange(None if config.get( + 'timerange') is None else str(config.get('timerange'))) backtest_stats = load_backtest_stats(config['exportfilename']) @@ -252,8 +243,7 @@ def process_entry_exit_reasons(config: Config): res_df = prepare_results(analysed_trades_dict, strategy_name, enter_reason_list, exit_reason_list, - date_start=analysis_date_start, - date_end=analysis_date_end) + timerange=timerange) print_results(res_df, analysis_groups, diff --git a/tests/data/test_entryexitanalysis.py b/tests/data/test_entryexitanalysis.py index 8daca1a67..e33ed4955 100755 --- a/tests/data/test_entryexitanalysis.py +++ b/tests/data/test_entryexitanalysis.py @@ -191,10 +191,8 @@ def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, tmp assert '2.5' in captured.out # test date filtering - args = get_args(base_args + - ['--analysis-date-start', "20180129", - '--analysis-date-end', "20180130"] - ) + args = get_args(base_args + ['--timerange', "20180129-20180130"]) start_analysis_entries_exits(args) captured = capsys.readouterr() + assert 'enter_tag_long_a' in captured.out assert 'enter_tag_long_b' not in captured.out From e4a3efc7d4b0ce94a02407315e60e689a20af900 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 27 Nov 2022 15:44:14 +0100 Subject: [PATCH 206/421] Don't use strategy.stoploss too often discovered in #7760 --- freqtrade/freqtradebot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index 34d18b3d8..f9cb28c28 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -1151,7 +1151,7 @@ class FreqtradeBot(LoggingMixin): stoploss = ( self.edge.stoploss(pair=trade.pair) if self.edge else - self.strategy.stoploss / trade.leverage + trade.stop_loss_pct / trade.leverage ) if trade.is_short: stop_price = trade.open_rate * (1 - stoploss) From cf000a4c0090a56c8983666d67f9abca9e463c74 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 27 Nov 2022 16:08:54 +0100 Subject: [PATCH 207/421] Bump develop version to 2022.12-dev --- freqtrade/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/__init__.py b/freqtrade/__init__.py index ad80410ee..b44189cb0 100644 --- a/freqtrade/__init__.py +++ b/freqtrade/__init__.py @@ -1,5 +1,5 @@ """ Freqtrade bot """ -__version__ = '2022.11.dev' +__version__ = '2022.12.dev' if 'dev' in __version__: try: From fe00a651632e040860b70a80140c62487588199c Mon Sep 17 00:00:00 2001 From: Emre Date: Sun, 27 Nov 2022 21:34:07 +0300 Subject: [PATCH 208/421] FIx custom reward link --- docs/freqai-reinforcement-learning.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 241ccc3e2..741a9bbb4 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -1,14 +1,14 @@ # Reinforcement Learning !!! Note "Installation size" - Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?". + Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?". Users who prefer docker should ensure they use the docker image appended with `_freqairl`. ## Background and terminology ### What is RL and why does FreqAI need it? -Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-the-reward)). The reward is used to train weights in a neural network. +Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-a-custom-reward-function)). The reward is used to train weights in a neural network. A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployments.* @@ -16,9 +16,9 @@ Reinforcement learning is a natural progression for FreqAI, since it adds a new ### The RL interface -With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-the-reward). +With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-a-custom-reward-function). -We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. +We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-a-custom-reward-function), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. The framework is built on stable_baselines3 (torch) and OpenAI gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library. @@ -130,7 +130,7 @@ After users realize there are no labels to set, they will soon understand that t return df ``` -It is important to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short. +It is important to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short. ## Configuring the Reinforcement Learner @@ -170,21 +170,21 @@ As you begin to modify the strategy and the prediction model, you will quickly r class MyCoolRLModel(ReinforcementLearner): """ - User created RL prediction model. + User created RL prediction model. Save this file to `freqtrade/user_data/freqaimodels` then use it with: freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat - - Here the users can override any of the functions - available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this + + Here the users can override any of the functions + available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this is where the user overrides `MyRLEnv` (see below), to define custom `calculate_reward()` function, or to override any other parts of the environment. - + This class also allows users to override any other part of the IFreqaiModel tree. - For example, the user can override `def fit()` or `def train()` or `def predict()` + For example, the user can override `def fit()` or `def train()` or `def predict()` to take fine-tuned control over these processes. Another common override may be `def data_cleaning_predict()` where the user can @@ -253,7 +253,7 @@ FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5Action * the actions available in the `calculate_reward` * the actions consumed by the user strategy -Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-the-reward)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. +Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-a-custom-reward-function)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. !!! Note FreqAI does not provide by default, a long-only training environment. However, creating one should be as simple as copy-pasting one of the built in environments and removing the `short` actions (and all associated references to those). From 5b5859238b45795852d06ef6bc7d82681b9dee6a Mon Sep 17 00:00:00 2001 From: Emre Date: Sun, 27 Nov 2022 22:06:14 +0300 Subject: [PATCH 209/421] Fix typo --- docs/freqai-reinforcement-learning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 741a9bbb4..ae3f67ed1 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -24,7 +24,7 @@ The framework is built on stable_baselines3 (torch) and OpenAI gym for the base ### Important considerations -As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, ect) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. +As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. ## Running Reinforcement Learning From a85602eb9c68f5bb36cbb2e2aae2c61ad2518bc1 Mon Sep 17 00:00:00 2001 From: Joe Schr <8218910+TheJoeSchr@users.noreply.github.com> Date: Tue, 22 Nov 2022 11:41:28 +0100 Subject: [PATCH 210/421] add "how to run tests" --- docs/developer.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/developer.md b/docs/developer.md index b4961ac77..94923b035 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -49,6 +49,13 @@ For more information about the [Remote container extension](https://code.visuals New code should be covered by basic unittests. Depending on the complexity of the feature, Reviewers may request more in-depth unittests. If necessary, the Freqtrade team can assist and give guidance with writing good tests (however please don't expect anyone to write the tests for you). +#### How to run tests + +Use `py.test` in root folder to run all available testcases and confirm your local environment is setup correctly + +!!! Note "develop branch" + This assumes that you have `stable` branch checked out. Other branches may be work in progress with tests not working yet. + #### Checking log content in tests Freqtrade uses 2 main methods to check log content in tests, `log_has()` and `log_has_re()` (to check using regex, in case of dynamic log-messages). From 320535a227357366c3211175bfdae5d46c541680 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 27 Nov 2022 20:06:10 +0100 Subject: [PATCH 211/421] improve tests doc wording --- docs/developer.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/developer.md b/docs/developer.md index 94923b035..ea2e36ce1 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -51,10 +51,10 @@ If necessary, the Freqtrade team can assist and give guidance with writing good #### How to run tests -Use `py.test` in root folder to run all available testcases and confirm your local environment is setup correctly +Use `pytest` in root folder to run all available testcases and confirm your local environment is setup correctly -!!! Note "develop branch" - This assumes that you have `stable` branch checked out. Other branches may be work in progress with tests not working yet. +!!! Note "feature branches" + Tests are expected to pass on the `develop` and `stable` branches. Other branches may be work in progress with tests not working yet. #### Checking log content in tests From a02da08065a305ed822c5795befff39afdbc97c3 Mon Sep 17 00:00:00 2001 From: Emre Date: Sun, 27 Nov 2022 22:23:00 +0300 Subject: [PATCH 212/421] Fix typo --- docs/freqai-reinforcement-learning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index ae3f67ed1..226c02919 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -95,7 +95,7 @@ Most of the function remains the same as for typical Regressors, however, the fu informative[f"%-{pair}raw_low"] = informative["low"] ``` -Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. +Finally, there is no explicit "label" to make - instead the user need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: From 67d94692774eade7cb25c9ddb22ba81a5ce65ee0 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 27 Nov 2022 20:42:04 +0100 Subject: [PATCH 213/421] small wording fix --- docs/freqai-reinforcement-learning.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 226c02919..d690c7645 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -24,7 +24,7 @@ The framework is built on stable_baselines3 (torch) and OpenAI gym for the base ### Important considerations -As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. +As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL training environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks like `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. ## Running Reinforcement Learning @@ -95,7 +95,7 @@ Most of the function remains the same as for typical Regressors, however, the fu informative[f"%-{pair}raw_low"] = informative["low"] ``` -Finally, there is no explicit "label" to make - instead the user need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. +Finally, there is no explicit "label" to make - instead it is necessary to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: From 64d4a52a5615ff9d5ddc2be693d8a79c002d0c9f Mon Sep 17 00:00:00 2001 From: richardjozsa Date: Sun, 27 Nov 2022 20:43:50 +0100 Subject: [PATCH 214/421] Improve the RL learning process Improve the RL learning process by selecting random start point for the agent, it can help to block the agent to only learn on the selected period of time, while improving the quality of the model. --- freqtrade/freqai/RL/BaseEnvironment.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 3332e5a18..5d881ba32 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -9,6 +9,7 @@ import pandas as pd from gym import spaces from gym.utils import seeding from pandas import DataFrame +import random from freqtrade.data.dataprovider import DataProvider @@ -121,6 +122,9 @@ class BaseEnvironment(gym.Env): self._done = False if self.starting_point is True: + length_of_data = int(self._end_tick/4) + start_tick = random.randint(self.window_size+1, length_of_data) + self._start_tick = start_tick self._position_history = (self._start_tick * [None]) + [self._position] else: self._position_history = (self.window_size * [None]) + [self._position] From 25e041b98eabd62513d4c4494ed9e2b12100dd6e Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 27 Nov 2022 20:50:03 +0100 Subject: [PATCH 215/421] sneak in small change to FreqaiExampleHybridStrategy docstring and startup count --- freqtrade/templates/FreqaiExampleHybridStrategy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/templates/FreqaiExampleHybridStrategy.py b/freqtrade/templates/FreqaiExampleHybridStrategy.py index 26335956f..9d1842cd7 100644 --- a/freqtrade/templates/FreqaiExampleHybridStrategy.py +++ b/freqtrade/templates/FreqaiExampleHybridStrategy.py @@ -19,7 +19,7 @@ class FreqaiExampleHybridStrategy(IStrategy): Launching this strategy would be: - freqtrade trade --strategy FreqaiExampleHyridStrategy --strategy-path freqtrade/templates + freqtrade trade --strategy FreqaiExampleHybridStrategy --strategy-path freqtrade/templates --freqaimodel CatboostClassifier --config config_examples/config_freqai.example.json or the user simply adds this to their config: @@ -86,7 +86,7 @@ class FreqaiExampleHybridStrategy(IStrategy): process_only_new_candles = True stoploss = -0.05 use_exit_signal = True - startup_candle_count: int = 300 + startup_candle_count: int = 30 can_short = True # Hyperoptable parameters From 7fd6bc526e38537a8595abcbe562af6ac6f53729 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 27 Nov 2022 21:03:13 +0100 Subject: [PATCH 216/421] add randomize_starting_position to the rl_config --- docs/freqai-parameter-table.md | 1 + freqtrade/constants.py | 1 + freqtrade/freqai/RL/BaseEnvironment.py | 7 ++++--- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 02426ec13..f2a52a9b8 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -82,6 +82,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. | `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[, dict(vf=[], pi=[])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each. +| `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting.
**Datatype:** bool.
Default: `False`. ### Additional parameters diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 878c38929..d869b89f6 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -591,6 +591,7 @@ CONF_SCHEMA = { "model_type": {"type": "string", "default": "PPO"}, "policy_type": {"type": "string", "default": "MlpPolicy"}, "net_arch": {"type": "array", "default": [128, 128]}, + "randomize_startinng_position": {"type": "boolean", "default": False}, "model_reward_parameters": { "type": "object", "properties": { diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 5d881ba32..8f940dd1b 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -122,9 +122,10 @@ class BaseEnvironment(gym.Env): self._done = False if self.starting_point is True: - length_of_data = int(self._end_tick/4) - start_tick = random.randint(self.window_size+1, length_of_data) - self._start_tick = start_tick + if self.rl_config.get('randomize_starting_position', False): + length_of_data = int(self._end_tick / 4) + start_tick = random.randint(self.window_size + 1, length_of_data) + self._start_tick = start_tick self._position_history = (self._start_tick * [None]) + [self._position] else: self._position_history = (self.window_size * [None]) + [self._position] From 56518def42fab1fd3d89f12bcda281a1eff11ef7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 27 Nov 2022 21:06:01 +0100 Subject: [PATCH 217/421] isort --- freqtrade/freqai/RL/BaseEnvironment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 8f940dd1b..66bdb8435 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -1,4 +1,5 @@ import logging +import random from abc import abstractmethod from enum import Enum from typing import Optional @@ -9,7 +10,6 @@ import pandas as pd from gym import spaces from gym.utils import seeding from pandas import DataFrame -import random from freqtrade.data.dataprovider import DataProvider From f21dbbd8bb54a42203db28d28b017036e5e62d65 Mon Sep 17 00:00:00 2001 From: Emre Date: Mon, 28 Nov 2022 00:06:02 +0300 Subject: [PATCH 218/421] Update imports of custom model --- docs/freqai-reinforcement-learning.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index d690c7645..353d7a2cc 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -166,7 +166,8 @@ As you begin to modify the strategy and the prediction model, you will quickly r ```python from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner - from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv + from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions + class MyCoolRLModel(ReinforcementLearner): """ From 49e41925b01bfd4f66de4893afaa399f4347e829 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:00:42 +0000 Subject: [PATCH 219/421] Bump flake8 from 5.0.4 to 6.0.0 Bumps [flake8](https://github.com/pycqa/flake8) from 5.0.4 to 6.0.0. - [Release notes](https://github.com/pycqa/flake8/releases) - [Commits](https://github.com/pycqa/flake8/compare/5.0.4...6.0.0) --- updated-dependencies: - dependency-name: flake8 dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index b46c244b5..ffce3d696 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -7,7 +7,7 @@ -r docs/requirements-docs.txt coveralls==3.3.1 -flake8==5.0.4 +flake8==6.0.0 flake8-tidy-imports==4.8.0 mypy==0.991 pre-commit==2.20.0 From 7e75bc8fcf40e8f250e6c0cd082b87051c081d3d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:00:48 +0000 Subject: [PATCH 220/421] Bump sb3-contrib from 1.6.1 to 1.6.2 Bumps [sb3-contrib](https://github.com/Stable-Baselines-Team/stable-baselines3-contrib) from 1.6.1 to 1.6.2. - [Release notes](https://github.com/Stable-Baselines-Team/stable-baselines3-contrib/releases) - [Commits](https://github.com/Stable-Baselines-Team/stable-baselines3-contrib/compare/v1.6.1...v1.6.2) --- updated-dependencies: - dependency-name: sb3-contrib dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-freqai-rl.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index b6bd7ef15..2a0a04455 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -5,4 +5,4 @@ torch==1.12.1 stable-baselines3==1.6.1 gym==0.21 -sb3-contrib==1.6.1 +sb3-contrib==1.6.2 From 5aec51a16c37d44993e36d5cacfd8c01d464a93b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:00:55 +0000 Subject: [PATCH 221/421] Bump urllib3 from 1.26.12 to 1.26.13 Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.12 to 1.26.13. - [Release notes](https://github.com/urllib3/urllib3/releases) - [Changelog](https://github.com/urllib3/urllib3/blob/1.26.13/CHANGES.rst) - [Commits](https://github.com/urllib3/urllib3/compare/1.26.12...1.26.13) --- updated-dependencies: - dependency-name: urllib3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9555b90c..881ae04ae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ python-telegram-bot==13.14 arrow==1.2.3 cachetools==4.2.2 requests==2.28.1 -urllib3==1.26.12 +urllib3==1.26.13 jsonschema==4.17.0 TA-Lib==0.4.25 technical==1.3.0 From 924bbad199a0d65147d6208ae9e3d20136bbab9e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:00:58 +0000 Subject: [PATCH 222/421] Bump pyarrow from 10.0.0 to 10.0.1 Bumps [pyarrow](https://github.com/apache/arrow) from 10.0.0 to 10.0.1. - [Release notes](https://github.com/apache/arrow/releases) - [Commits](https://github.com/apache/arrow/compare/go/v10.0.0...go/v10.0.1) --- updated-dependencies: - dependency-name: pyarrow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9555b90c..cde6b0344 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,7 +22,7 @@ jinja2==3.1.2 tables==3.7.0 blosc==1.10.6 joblib==1.2.0 -pyarrow==10.0.0; platform_machine != 'armv7l' +pyarrow==10.0.1; platform_machine != 'armv7l' # find first, C search in arrays py_find_1st==1.1.5 From a46b09d400ec78d0b278c14212728df6b6c46345 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:01:01 +0000 Subject: [PATCH 223/421] Bump prompt-toolkit from 3.0.32 to 3.0.33 Bumps [prompt-toolkit](https://github.com/prompt-toolkit/python-prompt-toolkit) from 3.0.32 to 3.0.33. - [Release notes](https://github.com/prompt-toolkit/python-prompt-toolkit/releases) - [Changelog](https://github.com/prompt-toolkit/python-prompt-toolkit/blob/master/CHANGELOG) - [Commits](https://github.com/prompt-toolkit/python-prompt-toolkit/compare/3.0.32...3.0.33) --- updated-dependencies: - dependency-name: prompt-toolkit dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9555b90c..f598d2377 100644 --- a/requirements.txt +++ b/requirements.txt @@ -47,7 +47,7 @@ psutil==5.9.4 colorama==0.4.6 # Building config files interactively questionary==1.10.0 -prompt-toolkit==3.0.32 +prompt-toolkit==3.0.33 # Extensions to datetime library python-dateutil==2.8.2 From 348731598e633d23576f0b89f69423021c396c5c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:01:25 +0000 Subject: [PATCH 224/421] Bump ccxt from 2.1.96 to 2.2.36 Bumps [ccxt](https://github.com/ccxt/ccxt) from 2.1.96 to 2.2.36. - [Release notes](https://github.com/ccxt/ccxt/releases) - [Changelog](https://github.com/ccxt/ccxt/blob/master/exchanges.cfg) - [Commits](https://github.com/ccxt/ccxt/compare/2.1.96...2.2.36) --- updated-dependencies: - dependency-name: ccxt dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9555b90c..2cb829c3d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ numpy==1.23.5 pandas==1.5.1 pandas-ta==0.3.14b -ccxt==2.1.96 +ccxt==2.2.36 # Pin cryptography for now due to rust build errors with piwheels cryptography==38.0.1; platform_machine == 'armv7l' cryptography==38.0.3; platform_machine != 'armv7l' From 9c28cc810d4ee384773d02481226686b2cbc9715 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 05:33:45 +0000 Subject: [PATCH 225/421] Bump cryptography from 38.0.1 to 38.0.4 Bumps [cryptography](https://github.com/pyca/cryptography) from 38.0.1 to 38.0.4. - [Release notes](https://github.com/pyca/cryptography/releases) - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/38.0.1...38.0.4) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2cb829c3d..2e5293cf6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ pandas-ta==0.3.14b ccxt==2.2.36 # Pin cryptography for now due to rust build errors with piwheels cryptography==38.0.1; platform_machine == 'armv7l' -cryptography==38.0.3; platform_machine != 'armv7l' +cryptography==38.0.4; platform_machine != 'armv7l' aiohttp==3.8.3 SQLAlchemy==1.4.44 python-telegram-bot==13.14 From d73fd42769298721c3a2306540263d74c9172ed9 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 06:38:35 +0100 Subject: [PATCH 226/421] Fix flake8 error introduced with 6.0 update --- freqtrade/persistence/pairlock_middleware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/persistence/pairlock_middleware.py b/freqtrade/persistence/pairlock_middleware.py index ec57e91fc..69d8b098b 100644 --- a/freqtrade/persistence/pairlock_middleware.py +++ b/freqtrade/persistence/pairlock_middleware.py @@ -87,7 +87,7 @@ class PairLocks(): Get the lock that expires the latest for the pair given. """ locks = PairLocks.get_pair_locks(pair, now, side=side) - locks = sorted(locks, key=lambda l: l.lock_end_time, reverse=True) + locks = sorted(locks, key=lambda lock: lock.lock_end_time, reverse=True) return locks[0] if locks else None @staticmethod From dc03317cc89c4a75359c866bf9673e9305bde0f3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 07:02:54 +0000 Subject: [PATCH 227/421] Bump jsonschema from 4.17.0 to 4.17.1 Bumps [jsonschema](https://github.com/python-jsonschema/jsonschema) from 4.17.0 to 4.17.1. - [Release notes](https://github.com/python-jsonschema/jsonschema/releases) - [Changelog](https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst) - [Commits](https://github.com/python-jsonschema/jsonschema/compare/v4.17.0...v4.17.1) --- updated-dependencies: - dependency-name: jsonschema dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cc38bfc96..9ae85ac89 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ arrow==1.2.3 cachetools==4.2.2 requests==2.28.1 urllib3==1.26.13 -jsonschema==4.17.0 +jsonschema==4.17.1 TA-Lib==0.4.25 technical==1.3.0 tabulate==0.9.0 From 9880e9ab600832f6479bedeafbbce267ba92c6e3 Mon Sep 17 00:00:00 2001 From: Ikko Ashimine Date: Mon, 28 Nov 2022 17:10:17 +0900 Subject: [PATCH 228/421] Fix typo in strategy_analysis_example.md seperate -> separate --- docs/strategy_analysis_example.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/strategy_analysis_example.md b/docs/strategy_analysis_example.md index 1526ea038..bae4a9108 100644 --- a/docs/strategy_analysis_example.md +++ b/docs/strategy_analysis_example.md @@ -232,7 +232,7 @@ graph = generate_candlestick_graph(pair=pair, # Show graph inline # graph.show() -# Render graph in a seperate window +# Render graph in a separate window graph.show(renderer="browser") ``` From 05a7fca2424c2c10b85f4d5e44f8ba5fa26fdb4c Mon Sep 17 00:00:00 2001 From: Robert Davey Date: Mon, 28 Nov 2022 12:12:45 +0000 Subject: [PATCH 229/421] Fix utils docs for backtesting-analysis --- docs/utils.md | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/docs/utils.md b/docs/utils.md index e88a13a9a..e717a0f9c 100644 --- a/docs/utils.md +++ b/docs/utils.md @@ -722,8 +722,7 @@ usage: freqtrade backtesting-analysis [-h] [-v] [--logfile FILE] [-V] [--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]] [--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]] [--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]] - [--analysis-date-start YYYYMMDD] - [--analysis-date-end YYYYMMDD] + [--timerange YYYYMMDD-[YYYYMMDD]] optional arguments: -h, --help show this help message and exit @@ -746,12 +745,10 @@ optional arguments: --indicator-list INDICATOR_LIST [INDICATOR_LIST ...] Comma separated list of indicators to analyse. e.g. 'close,rsi,bb_lowerband,profit_abs' - --analysis-date-start YYYYMMDD - Start date to filter trades for analysis (inclusive). e.g. - 20220101 - --analysis-date-end YYYYMMDD - End date to filter trades for analysis (exclusive). e.g. - 20220131 + --timerange YYYYMMDD-[YYYYMMDD] + Timerange to filter trades for analysis, + start inclusive, end exclusive. e.g. + 20220101-20220201 Common arguments: -v, --verbose Verbose mode (-vv for more, -vvv to get all messages). From 9cbfa1201113afeb143fb22b3b9ee4be125c5263 Mon Sep 17 00:00:00 2001 From: Emre Date: Mon, 28 Nov 2022 16:02:17 +0300 Subject: [PATCH 230/421] Directly set model_type in base RL model --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 1 + freqtrade/freqai/data_drawer.py | 7 +------ 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 709ded048..e1381ab62 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -64,6 +64,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.policy_type = self.freqai_info['rl_config']['policy_type'] self.unset_outlier_removal() self.net_arch = self.rl_config.get('net_arch', [128, 128]) + self.dd.model_type = "stable_baselines" def unset_outlier_removal(self): """ diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 3b9352efe..ab41240e9 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -99,12 +99,7 @@ class FreqaiDataDrawer: self.empty_pair_dict: pair_info = { "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} - if 'Reinforcement' in self.config['freqaimodel']: - self.model_type = 'stable_baselines' - logger.warning('User passed a ReinforcementLearner model, FreqAI will ' - 'now use stable_baselines3 to save models.') - else: - self.model_type = self.freqai_info.get('model_save_type', 'joblib') + self.model_type = self.freqai_info.get('model_save_type', 'joblib') def update_metric_tracker(self, metric: str, value: float, pair: str) -> None: """ From e891c41760e38b41d57639933c1f986b6d8abcc3 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 18:20:30 +0100 Subject: [PATCH 231/421] Fix typo in ipynb, too. --- freqtrade/templates/strategy_analysis_example.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/templates/strategy_analysis_example.ipynb b/freqtrade/templates/strategy_analysis_example.ipynb index 77444a023..5fb14ab2f 100644 --- a/freqtrade/templates/strategy_analysis_example.ipynb +++ b/freqtrade/templates/strategy_analysis_example.ipynb @@ -328,7 +328,7 @@ "# Show graph inline\n", "# graph.show()\n", "\n", - "# Render graph in a seperate window\n", + "# Render graph in a separate window\n", "graph.show(renderer=\"browser\")\n" ] }, From 8efa8bc78a445067637f51cbd952b2e55552831a Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 19:35:17 +0100 Subject: [PATCH 232/421] Update stable-baselines3 to 1.6.2 --- requirements-freqai-rl.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index 2a0a04455..df541c701 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -3,6 +3,7 @@ # Required for freqai-rl torch==1.12.1 -stable-baselines3==1.6.1 -gym==0.21 +stable-baselines3==1.6.2 sb3-contrib==1.6.2 +# Gym is forced to this version by stable-baselines3. +gym==0.21 From 2c75b5e027d137b55904d78aeadc0063291b876a Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 13:26:27 +0000 Subject: [PATCH 233/421] Extract "live" test from regular tests --- .github/workflows/ci.yml | 55 +++++++++++++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0d5a7540d..334f7bec3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -66,12 +66,6 @@ jobs: - name: Tests run: | pytest --random-order --cov=freqtrade --cov-config=.coveragerc - if: matrix.python-version != '3.9' || matrix.os != 'ubuntu-22.04' - - - name: Tests incl. ccxt compatibility tests - run: | - pytest --random-order --cov=freqtrade --cov-config=.coveragerc --longrun - if: matrix.python-version == '3.9' && matrix.os == 'ubuntu-22.04' - name: Coveralls if: (runner.os == 'Linux' && matrix.python-version == '3.10' && matrix.os == 'ubuntu-22.04') @@ -310,9 +304,56 @@ jobs: details: Freqtrade doc test failed! webhookUrl: ${{ secrets.DISCORD_WEBHOOK }} + + build_linux_online: + # Run pytest with "live" checks + runs-on: ubuntu-22.04 + # permissions: + steps: + - uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.9" + + - name: Cache_dependencies + uses: actions/cache@v3 + id: cache + with: + path: ~/dependencies/ + key: ${{ runner.os }}-dependencies + + - name: pip cache (linux) + uses: actions/cache@v3 + if: runner.os == 'Linux' + with: + path: ~/.cache/pip + key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip + + - name: TA binary *nix + if: steps.cache.outputs.cache-hit != 'true' + run: | + cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd .. + + - name: Installation - *nix + if: runner.os == 'Linux' + run: | + python -m pip install --upgrade pip wheel + export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH + export TA_LIBRARY_PATH=${HOME}/dependencies/lib + export TA_INCLUDE_PATH=${HOME}/dependencies/include + pip install -r requirements-dev.txt + pip install -e . + + - name: Tests incl. ccxt compatibility tests + run: | + pytest --random-order --cov=freqtrade --cov-config=.coveragerc --longrun + + # Notify only once - when CI completes (and after deploy) in case it's successfull notify-complete: - needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit ] + needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit, build_linux_online ] runs-on: ubuntu-22.04 # Discord notification can't handle schedule events if: (github.event_name != 'schedule') From 5500c10f7853eeb09c08c59490807f2fb9df217f Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 19:40:43 +0100 Subject: [PATCH 234/421] Improve CI file layout --- .github/workflows/ci.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 334f7bec3..e730d1489 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -353,7 +353,15 @@ jobs: # Notify only once - when CI completes (and after deploy) in case it's successfull notify-complete: - needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit, build_linux_online ] + needs: [ + build_linux, + build_macos, + build_windows, + docs_check, + mypy_version_check, + pre-commit, + build_linux_online + ] runs-on: ubuntu-22.04 # Discord notification can't handle schedule events if: (github.event_name != 'schedule') From b87545cd1256faf439313c47dad0bed89267fb5b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Nov 2022 07:46:38 +0000 Subject: [PATCH 235/421] Bump torch from 1.12.1 to 1.13.0 Bumps [torch](https://github.com/pytorch/pytorch) from 1.12.1 to 1.13.0. - [Release notes](https://github.com/pytorch/pytorch/releases) - [Changelog](https://github.com/pytorch/pytorch/blob/master/RELEASE.md) - [Commits](https://github.com/pytorch/pytorch/compare/v1.12.1...v1.13.0) --- updated-dependencies: - dependency-name: torch dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements-freqai-rl.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index df541c701..67bd66102 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -2,7 +2,7 @@ -r requirements-freqai.txt # Required for freqai-rl -torch==1.12.1 +torch==1.13.0 stable-baselines3==1.6.2 sb3-contrib==1.6.2 # Gym is forced to this version by stable-baselines3. From 8ea58ab35243cd238a989faabc429160b180cb52 Mon Sep 17 00:00:00 2001 From: Wagner Costa Date: Tue, 29 Nov 2022 10:38:35 -0300 Subject: [PATCH 236/421] change BT prediction files to feather format --- freqtrade/freqai/data_kitchen.py | 40 ++++++++++------------------ freqtrade/freqai/freqai_interface.py | 1 - 2 files changed, 14 insertions(+), 27 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index d438aaede..1c4177381 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1317,41 +1317,24 @@ class FreqaiDataKitchen: self, append_df: DataFrame ) -> None: """ - Save prediction dataframe from backtesting to h5 file format + Save prediction dataframe from backtesting to feather file format :param append_df: dataframe for backtesting period """ full_predictions_folder = Path(self.full_path / self.backtest_predictions_folder) if not full_predictions_folder.is_dir(): full_predictions_folder.mkdir(parents=True, exist_ok=True) - append_df.to_hdf(self.backtesting_results_path, key=self.model_filename) + append_df.to_feather(self.backtesting_results_path) def get_backtesting_prediction( self ) -> DataFrame: """ - Get prediction dataframe from h5 file format + Get prediction dataframe from feather file format """ - append_df = self.backtesting_h5_data[self.model_filename] + append_df = pd.read_feather(self.backtesting_results_path) return append_df - def load_prediction_pair_file( - self - ) -> None: - """ - Load prediction file if it exists - """ - pair_file_name = self.pair.split(':')[0].replace('/', '_').lower() - path_to_predictionfile = Path(self.full_path / - self.backtest_predictions_folder / - f"{pair_file_name}_prediction.h5") - self.backtesting_results_path = path_to_predictionfile - file_exists = path_to_predictionfile.is_file() - if file_exists: - self.backtesting_h5_data = pd.HDFStore(path_to_predictionfile) - else: - self.backtesting_h5_data = {} - def check_if_backtest_prediction_is_valid( self, len_backtest_df: int @@ -1363,11 +1346,17 @@ class FreqaiDataKitchen: :return: :boolean: whether the prediction file is valid. """ - if self.model_filename in self.backtesting_h5_data: + path_to_predictionfile = Path(self.full_path / + self.backtest_predictions_folder / + f"{self.model_filename}_prediction.feather") + self.backtesting_results_path = path_to_predictionfile + + file_exists = path_to_predictionfile.is_file() + + if file_exists: append_df = self.get_backtesting_prediction() if len(append_df) == len_backtest_df and 'date' in append_df: - logger.info("Found backtesting prediction file " - f"at {self.backtesting_results_path.name}") + logger.info(f"Found backtesting prediction file at {path_to_predictionfile}") return True else: logger.info("A new backtesting prediction file is required. " @@ -1376,8 +1365,7 @@ class FreqaiDataKitchen: return False else: logger.info( - "Could not find backtesting prediction file " - f"at {self.backtesting_results_path.name}" + f"Could not find backtesting prediction file at {path_to_predictionfile}" ) return False diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index b2f931760..129571d4a 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -275,7 +275,6 @@ class IFreqaiModel(ABC): self.pair_it += 1 train_it = 0 - dk.load_prediction_pair_file() # Loop enforcing the sliding window training/backtesting paradigm # tr_train is the training time range e.g. 1 historical month # tr_backtest is the backtesting time range e.g. the week directly From 3c322bf7dfab2910d223a35d9f04f50799d1e651 Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 29 Nov 2022 18:27:08 +0100 Subject: [PATCH 237/421] Improve forceenter validation messages --- freqtrade/rpc/rpc.py | 33 +++++++++++++++++++-------------- tests/rpc/test_rpc.py | 4 ++++ 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/freqtrade/rpc/rpc.py b/freqtrade/rpc/rpc.py index 011543a09..334e18dc7 100644 --- a/freqtrade/rpc/rpc.py +++ b/freqtrade/rpc/rpc.py @@ -740,6 +740,24 @@ class RPC: self._freqtrade.wallets.update() return {'result': f'Created sell order for trade {trade_id}.'} + def _force_entry_validations(self, pair: str, order_side: SignalDirection): + if not self._freqtrade.config.get('force_entry_enable', False): + raise RPCException('Force_entry not enabled.') + + if self._freqtrade.state != State.RUNNING: + raise RPCException('trader is not running') + + if order_side == SignalDirection.SHORT and self._freqtrade.trading_mode == TradingMode.SPOT: + raise RPCException("Can't go short on Spot markets.") + + if pair not in self._freqtrade.exchange.get_markets(tradable_only=True): + raise RPCException('Symbol does not exist or market is not active.') + # Check if pair quote currency equals to the stake currency. + stake_currency = self._freqtrade.config.get('stake_currency') + if not self._freqtrade.exchange.get_pair_quote_currency(pair) == stake_currency: + raise RPCException( + f'Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed.') + def _rpc_force_entry(self, pair: str, price: Optional[float], *, order_type: Optional[str] = None, order_side: SignalDirection = SignalDirection.LONG, @@ -750,21 +768,8 @@ class RPC: Handler for forcebuy Buys a pair trade at the given or current price """ + self._force_entry_validations(pair, order_side) - if not self._freqtrade.config.get('force_entry_enable', False): - raise RPCException('Force_entry not enabled.') - - if self._freqtrade.state != State.RUNNING: - raise RPCException('trader is not running') - - if order_side == SignalDirection.SHORT and self._freqtrade.trading_mode == TradingMode.SPOT: - raise RPCException("Can't go short on Spot markets.") - - # Check if pair quote currency equals to the stake currency. - stake_currency = self._freqtrade.config.get('stake_currency') - if not self._freqtrade.exchange.get_pair_quote_currency(pair) == stake_currency: - raise RPCException( - f'Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed.') # check if valid pair # check if pair already has an open pair diff --git a/tests/rpc/test_rpc.py b/tests/rpc/test_rpc.py index 8828b6f33..24b5f1cbe 100644 --- a/tests/rpc/test_rpc.py +++ b/tests/rpc/test_rpc.py @@ -1056,6 +1056,10 @@ def test_rpc_force_entry(mocker, default_conf, ticker, fee, limit_buy_order_open assert trade.pair == pair assert trade.open_rate == 0.0001 + with pytest.raises(RPCException, + match=r'Symbol does not exist or market is not active.'): + rpc._rpc_force_entry('LTC/NOTHING', 0.0001) + # Test buy pair not with stakes with pytest.raises(RPCException, match=r'Wrong pair selected. Only pairs with stake-currency.*'): From 4571aedb33bac90dcb7f669bfd4c707f1c760173 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 30 Nov 2022 00:53:35 +0100 Subject: [PATCH 238/421] consolidate and clean code --- docs/freqai-running.md | 4 +- freqtrade/freqai/data_kitchen.py | 23 ----------- freqtrade/freqai/freqai_interface.py | 59 +++++++++++++--------------- 3 files changed, 29 insertions(+), 57 deletions(-) diff --git a/docs/freqai-running.md b/docs/freqai-running.md index 23873547f..b046e7bb8 100644 --- a/docs/freqai-running.md +++ b/docs/freqai-running.md @@ -79,11 +79,11 @@ To change your **features**, you **must** set a new `identifier` in the config t To save the models generated during a particular backtest so that you can start a live deployment from one of them instead of training a new model, you must set `save_backtest_models` to `True` in the config. -### Backtest live models +### Backtest live collected predictions FreqAI allow you to reuse live historic predictions through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study. -The `--timerange` parameter must not be informed, as it will be automatically calculated through the data in historic predictions file. +The `--timerange` parameter must not be informed, as it will be automatically calculated through the data in the historic predictions file. ### Downloading data to cover the full backtest period diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 1c4177381..3201fc451 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -75,7 +75,6 @@ class FreqaiDataKitchen: self.training_features_list: List = [] self.model_filename: str = "" self.backtesting_results_path = Path() - self.backtesting_h5_data: HDFStore = {} self.backtest_predictions_folder: str = "backtesting_predictions" self.live = live self.pair = pair @@ -456,28 +455,6 @@ class FreqaiDataKitchen: # print(tr_training_list, tr_backtesting_list) return tr_training_list_timerange, tr_backtesting_list_timerange - # def split_timerange_live_models( - # self - # ) -> Tuple[list, list]: - - # tr_backtesting_list_timerange = [] - # asset = self.pair.split("/")[0] - # if asset not in self.backtest_live_models_data["assets_end_dates"]: - # raise OperationalException( - # f"Model not available for pair {self.pair}. " - # "Please, try again after removing this pair from the configuration file." - # ) - # asset_data = self.backtest_live_models_data["assets_end_dates"][asset] - # backtesting_timerange = self.backtest_live_models_data["backtesting_timerange"] - # model_end_dates = [x for x in asset_data] - # model_end_dates.append(backtesting_timerange.stopts) - # model_end_dates.sort() - # for index, item in enumerate(model_end_dates): - # if len(model_end_dates) > (index + 1): - # tr_to_add = TimeRange("date", "date", item, model_end_dates[index + 1]) - # tr_backtesting_list_timerange.append(tr_to_add) - - # return tr_backtesting_list_timerange, tr_backtesting_list_timerange def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame: """ diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 129571d4a..cf7c4151b 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -55,7 +55,6 @@ class IFreqaiModel(ABC): def __init__(self, config: Config) -> None: self.config = config - self.metadata: Dict[str, Any] = {} self.assert_config(self.config) self.freqai_info: Dict[str, Any] = config["freqai"] self.data_split_parameters: Dict[str, Any] = config.get("freqai", {}).get( @@ -102,7 +101,7 @@ class IFreqaiModel(ABC): self.get_corr_dataframes: bool = True self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() - self.metadata = self.dd.load_global_metadata_from_disk() + self.metadata: Dict[str, Any] = self.dd.load_global_metadata_from_disk() self.data_provider: Optional[DataProvider] = None self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1) @@ -148,18 +147,13 @@ class IFreqaiModel(ABC): # the concatenated results for the full backtesting period back to the strategy. elif not self.follow_mode: self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"]) - if self.dk.backtest_live_models: - logger.info( - "Backtesting using historic predictions (live models)") - else: - logger.info(f"Training {len(self.dk.training_timeranges)} timeranges") - dataframe = self.dk.use_strategy_to_populate_indicators( - strategy, prediction_dataframe=dataframe, pair=metadata["pair"] - ) if not self.config.get("freqai_backtest_live_models", False): + logger.info(f"Training {len(self.dk.training_timeranges)} timeranges") dk = self.start_backtesting(dataframe, metadata, self.dk) dataframe = dk.remove_features_from_df(dk.return_dataframe) else: + logger.info( + "Backtesting using historic predictions (live models)") dk = self.start_backtesting_from_historic_predictions( dataframe, metadata, self.dk) dataframe = dk.return_dataframe @@ -167,7 +161,6 @@ class IFreqaiModel(ABC): self.clean_up() if self.live: self.inference_timer('stop', metadata["pair"]) - self.set_start_dry_live_date(dataframe) return dataframe @@ -336,27 +329,6 @@ class IFreqaiModel(ABC): return dk - def start_backtesting_from_historic_predictions( - self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen - ) -> FreqaiDataKitchen: - """ - :param dataframe: DataFrame = strategy passed dataframe - :param metadata: Dict = pair metadata - :param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only - :return: - FreqaiDataKitchen = Data management/analysis tool associated to present pair only - """ - pair = metadata["pair"] - dk.return_dataframe = dataframe - saved_dataframe = self.dd.historic_predictions[pair] - columns_to_drop = list(set(saved_dataframe.columns).intersection( - dk.return_dataframe.columns)) - dk.return_dataframe = dk.return_dataframe.drop(columns=list(columns_to_drop)) - dk.return_dataframe = pd.merge( - dk.return_dataframe, saved_dataframe, how='left', left_on='date', right_on="date_pred") - # dk.return_dataframe = dk.return_dataframe[saved_dataframe.columns].fillna(0) - return dk - def start_live( self, dataframe: DataFrame, metadata: dict, strategy: IStrategy, dk: FreqaiDataKitchen ) -> FreqaiDataKitchen: @@ -665,6 +637,8 @@ class IFreqaiModel(ABC): self.dd.historic_predictions[pair] = pred_df hist_preds_df = self.dd.historic_predictions[pair] + self.set_start_dry_live_date(pred_df) + for label in hist_preds_df.columns: if hist_preds_df[label].dtype == object: continue @@ -913,6 +887,27 @@ class IFreqaiModel(ABC): pd.to_datetime(live_dataframe.tail(1)["date"].values[0]).timestamp()) self.update_metadata(metadata) + def start_backtesting_from_historic_predictions( + self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen + ) -> FreqaiDataKitchen: + """ + :param dataframe: DataFrame = strategy passed dataframe + :param metadata: Dict = pair metadata + :param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only + :return: + FreqaiDataKitchen = Data management/analysis tool associated to present pair only + """ + pair = metadata["pair"] + dk.return_dataframe = dataframe + saved_dataframe = self.dd.historic_predictions[pair] + columns_to_drop = list(set(saved_dataframe.columns).intersection( + dk.return_dataframe.columns)) + dk.return_dataframe = dk.return_dataframe.drop(columns=list(columns_to_drop)) + dk.return_dataframe = pd.merge( + dk.return_dataframe, saved_dataframe, how='left', left_on='date', right_on="date_pred") + # dk.return_dataframe = dk.return_dataframe[saved_dataframe.columns].fillna(0) + return dk + # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example. From 10a45474e87ac9943593bee1b6b6afe0fd434616 Mon Sep 17 00:00:00 2001 From: rzhb <3757123+rzhb@users.noreply.github.com> Date: Wed, 30 Nov 2022 12:28:21 +0800 Subject: [PATCH 239/421] Update data-analysis.md fix typo in code --- docs/data-analysis.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/data-analysis.md b/docs/data-analysis.md index 926ed3eae..7a6c6bb96 100644 --- a/docs/data-analysis.md +++ b/docs/data-analysis.md @@ -83,7 +83,7 @@ from pathlib import Path project_root = "somedir/freqtrade" i=0 try: - os.chdirdir(project_root) + os.chdir(project_root) assert Path('LICENSE').is_file() except: while i<4 and (not Path('LICENSE').is_file()): From 17cf3c7e837123620988908a085d190d9afa9b54 Mon Sep 17 00:00:00 2001 From: Wagner Costa Date: Wed, 30 Nov 2022 08:28:45 -0300 Subject: [PATCH 240/421] bug fixes and removed fillna from fit_live_predictions --- freqtrade/freqai/freqai_interface.py | 8 ++++++-- tests/freqai/test_freqai_interface.py | 3 ++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index cf7c4151b..3386d2881 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -147,6 +147,9 @@ class IFreqaiModel(ABC): # the concatenated results for the full backtesting period back to the strategy. elif not self.follow_mode: self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"]) + dataframe = self.dk.use_strategy_to_populate_indicators( + strategy, prediction_dataframe=dataframe, pair=metadata["pair"] + ) if not self.config.get("freqai_backtest_live_models", False): logger.info(f"Training {len(self.dk.training_timeranges)} timeranges") dk = self.start_backtesting(dataframe, metadata, self.dk) @@ -637,7 +640,7 @@ class IFreqaiModel(ABC): self.dd.historic_predictions[pair] = pred_df hist_preds_df = self.dd.historic_predictions[pair] - self.set_start_dry_live_date(pred_df) + self.set_start_dry_live_date(strat_df) for label in hist_preds_df.columns: if hist_preds_df[label].dtype == object: @@ -680,7 +683,7 @@ class IFreqaiModel(ABC): if self.dd.historic_predictions[dk.pair][label].dtype == object: continue f = spy.stats.norm.fit( - self.dd.historic_predictions[dk.pair][label].fillna(0).tail(num_candles)) + self.dd.historic_predictions[dk.pair][label].tail(num_candles)) dk.data["labels_mean"][label], dk.data["labels_std"][label] = f[0], f[1] return @@ -844,6 +847,7 @@ class IFreqaiModel(ABC): """ fit_live_predictions_candles = self.freqai_info.get("fit_live_predictions_candles", 0) if fit_live_predictions_candles: + logger.info("Applying fit_live_predictions in backtesting") label_columns = [col for col in dk.full_df.columns if ( col.startswith("&") and not (col.startswith("&") and col.endswith("_mean")) and diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 605485e12..c53137093 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -363,7 +363,8 @@ def test_backtesting_fit_live_predictions(mocker, freqai_conf, caplog): corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk) df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") freqai.dk.pair = "ADA/BTC" - freqai.dk.full_df = df + freqai.dk.full_df = df.fillna(0) + freqai.dk.full_df assert "&-s_close_mean" not in freqai.dk.full_df.columns assert "&-s_close_std" not in freqai.dk.full_df.columns freqai.backtesting_fit_live_predictions(freqai.dk) From e7f72d52b8faddfc35ed27b1840aa6a2c3d69ea7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 30 Nov 2022 12:36:26 +0100 Subject: [PATCH 241/421] bring back market side setting in get_state_info --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index e1381ab62..9d2fae583 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -193,6 +193,10 @@ class BaseReinforcementLearningModel(IFreqaiModel): now = datetime.now(timezone.utc).timestamp() trade_duration = int((now - trade.open_date_utc.timestamp()) / self.base_tf_seconds) current_profit = trade.calc_profit_ratio(current_rate) + if trade.is_short: + market_side = 0 + else: + market_side = 1 return market_side, current_profit, int(trade_duration) From 79821ebb33bf6eea901e51ae4e24ba8e16837ac4 Mon Sep 17 00:00:00 2001 From: Wagner Costa Date: Wed, 30 Nov 2022 08:41:44 -0300 Subject: [PATCH 242/421] fix flake8 errors --- freqtrade/freqai/data_kitchen.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 3201fc451..c6f22e468 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -10,7 +10,7 @@ import numpy as np import numpy.typing as npt import pandas as pd import psutil -from pandas import DataFrame, HDFStore +from pandas import DataFrame from scipy import stats from sklearn import linear_model from sklearn.cluster import DBSCAN @@ -455,7 +455,6 @@ class FreqaiDataKitchen: # print(tr_training_list, tr_backtesting_list) return tr_training_list_timerange, tr_backtesting_list_timerange - def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame: """ Given a full dataframe, extract the user desired window From 59c7ce02f5c91461c3ee501023dbb2a6e92cd0c2 Mon Sep 17 00:00:00 2001 From: gautier pialat Date: Wed, 30 Nov 2022 21:29:34 +0100 Subject: [PATCH 243/421] binance restricted locations and server location Inform end user before he creates server in a binance restricted location https://github.com/ccxt/ccxt/issues/15872 --- docs/exchanges.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/exchanges.md b/docs/exchanges.md index b4eb7e023..7070fc690 100644 --- a/docs/exchanges.md +++ b/docs/exchanges.md @@ -54,6 +54,9 @@ This configuration enables kraken, as well as rate-limiting to avoid bans from t ## Binance +!!! Warning "Server location and geo-ip restrictions" + Please be aware that binance restrict api access regarding the server country. The currents and non exhaustive countries blocked are United States, Malaysia (Singapour), Ontario (Canada). Please go to [binance terms > b. Eligibility](https://www.binance.com/en/terms) to find up to date list. + Binance supports [time_in_force](configuration.md#understand-order_time_in_force). !!! Tip "Stoploss on Exchange" From 95651fcd5a421e2d0d8eb0211844e4cff187159b Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 1 Dec 2022 06:27:19 +0100 Subject: [PATCH 244/421] Improve/simplify telegram exception handling Move exceptionhandling to the decorator. --- freqtrade/rpc/telegram.py | 691 ++++++++++++++++++-------------------- 1 file changed, 322 insertions(+), 369 deletions(-) diff --git a/freqtrade/rpc/telegram.py b/freqtrade/rpc/telegram.py index 708a1ce53..38fe0cd13 100644 --- a/freqtrade/rpc/telegram.py +++ b/freqtrade/rpc/telegram.py @@ -79,6 +79,8 @@ def authorized_only(command_handler: Callable[..., None]) -> Callable[..., Any]: ) try: return command_handler(self, *args, **kwargs) + except RPCException as e: + self._send_msg(str(e)) except BaseException: logger.exception('Exception occurred within Telegram module') @@ -538,72 +540,67 @@ class Telegram(RPCHandler): handler for `/status` and `/status `. """ - try: + # Check if there's at least one numerical ID provided. + # If so, try to get only these trades. + trade_ids = [] + if context.args and len(context.args) > 0: + trade_ids = [int(i) for i in context.args if i.isnumeric()] - # Check if there's at least one numerical ID provided. - # If so, try to get only these trades. - trade_ids = [] - if context.args and len(context.args) > 0: - trade_ids = [int(i) for i in context.args if i.isnumeric()] + results = self._rpc._rpc_trade_status(trade_ids=trade_ids) + position_adjust = self._config.get('position_adjustment_enable', False) + max_entries = self._config.get('max_entry_position_adjustment', -1) + for r in results: + r['open_date_hum'] = arrow.get(r['open_date']).humanize() + r['num_entries'] = len([o for o in r['orders'] if o['ft_is_entry']]) + r['exit_reason'] = r.get('exit_reason', "") + lines = [ + "*Trade ID:* `{trade_id}`" + + (" `(since {open_date_hum})`" if r['is_open'] else ""), + "*Current Pair:* {pair}", + "*Direction:* " + ("`Short`" if r.get('is_short') else "`Long`"), + "*Leverage:* `{leverage}`" if r.get('leverage') else "", + "*Amount:* `{amount} ({stake_amount} {quote_currency})`", + "*Enter Tag:* `{enter_tag}`" if r['enter_tag'] else "", + "*Exit Reason:* `{exit_reason}`" if r['exit_reason'] else "", + ] - results = self._rpc._rpc_trade_status(trade_ids=trade_ids) - position_adjust = self._config.get('position_adjustment_enable', False) - max_entries = self._config.get('max_entry_position_adjustment', -1) - for r in results: - r['open_date_hum'] = arrow.get(r['open_date']).humanize() - r['num_entries'] = len([o for o in r['orders'] if o['ft_is_entry']]) - r['exit_reason'] = r.get('exit_reason', "") - lines = [ - "*Trade ID:* `{trade_id}`" + - (" `(since {open_date_hum})`" if r['is_open'] else ""), - "*Current Pair:* {pair}", - "*Direction:* " + ("`Short`" if r.get('is_short') else "`Long`"), - "*Leverage:* `{leverage}`" if r.get('leverage') else "", - "*Amount:* `{amount} ({stake_amount} {quote_currency})`", - "*Enter Tag:* `{enter_tag}`" if r['enter_tag'] else "", - "*Exit Reason:* `{exit_reason}`" if r['exit_reason'] else "", - ] + if position_adjust: + max_buy_str = (f"/{max_entries + 1}" if (max_entries > 0) else "") + lines.append("*Number of Entries:* `{num_entries}`" + max_buy_str) - if position_adjust: - max_buy_str = (f"/{max_entries + 1}" if (max_entries > 0) else "") - lines.append("*Number of Entries:* `{num_entries}`" + max_buy_str) + lines.extend([ + "*Open Rate:* `{open_rate:.8f}`", + "*Close Rate:* `{close_rate:.8f}`" if r['close_rate'] else "", + "*Open Date:* `{open_date}`", + "*Close Date:* `{close_date}`" if r['close_date'] else "", + "*Current Rate:* `{current_rate:.8f}`" if r['is_open'] else "", + ("*Current Profit:* " if r['is_open'] else "*Close Profit: *") + + "`{profit_ratio:.2%}`", + ]) - lines.extend([ - "*Open Rate:* `{open_rate:.8f}`", - "*Close Rate:* `{close_rate:.8f}`" if r['close_rate'] else "", - "*Open Date:* `{open_date}`", - "*Close Date:* `{close_date}`" if r['close_date'] else "", - "*Current Rate:* `{current_rate:.8f}`" if r['is_open'] else "", - ("*Current Profit:* " if r['is_open'] else "*Close Profit: *") - + "`{profit_ratio:.2%}`", - ]) + if r['is_open']: + if r.get('realized_profit'): + lines.append("*Realized Profit:* `{realized_profit:.8f}`") + if (r['stop_loss_abs'] != r['initial_stop_loss_abs'] + and r['initial_stop_loss_ratio'] is not None): + # Adding initial stoploss only if it is different from stoploss + lines.append("*Initial Stoploss:* `{initial_stop_loss_abs:.8f}` " + "`({initial_stop_loss_ratio:.2%})`") - if r['is_open']: - if r.get('realized_profit'): - lines.append("*Realized Profit:* `{realized_profit:.8f}`") - if (r['stop_loss_abs'] != r['initial_stop_loss_abs'] - and r['initial_stop_loss_ratio'] is not None): - # Adding initial stoploss only if it is different from stoploss - lines.append("*Initial Stoploss:* `{initial_stop_loss_abs:.8f}` " - "`({initial_stop_loss_ratio:.2%})`") + # Adding stoploss and stoploss percentage only if it is not None + lines.append("*Stoploss:* `{stop_loss_abs:.8f}` " + + ("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else "")) + lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` " + "`({stoploss_current_dist_ratio:.2%})`") + if r['open_order']: + lines.append( + "*Open Order:* `{open_order}`" + + "- `{exit_order_status}`" if r['exit_order_status'] else "") - # Adding stoploss and stoploss percentage only if it is not None - lines.append("*Stoploss:* `{stop_loss_abs:.8f}` " + - ("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else "")) - lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` " - "`({stoploss_current_dist_ratio:.2%})`") - if r['open_order']: - lines.append( - "*Open Order:* `{open_order}`" - + "- `{exit_order_status}`" if r['exit_order_status'] else "") - - lines_detail = self._prepare_order_details( - r['orders'], r['quote_currency'], r['is_open']) - lines.extend(lines_detail if lines_detail else "") - self.__send_status_msg(lines, r) - - except RPCException as e: - self._send_msg(str(e)) + lines_detail = self._prepare_order_details( + r['orders'], r['quote_currency'], r['is_open']) + lines.extend(lines_detail if lines_detail else "") + self.__send_status_msg(lines, r) def __send_status_msg(self, lines: List[str], r: Dict[str, Any]) -> None: """ @@ -630,37 +627,34 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - fiat_currency = self._config.get('fiat_display_currency', '') - statlist, head, fiat_profit_sum = self._rpc._rpc_status_table( - self._config['stake_currency'], fiat_currency) + fiat_currency = self._config.get('fiat_display_currency', '') + statlist, head, fiat_profit_sum = self._rpc._rpc_status_table( + self._config['stake_currency'], fiat_currency) - show_total = not isnan(fiat_profit_sum) and len(statlist) > 1 - max_trades_per_msg = 50 - """ - Calculate the number of messages of 50 trades per message - 0.99 is used to make sure that there are no extra (empty) messages - As an example with 50 trades, there will be int(50/50 + 0.99) = 1 message - """ - messages_count = max(int(len(statlist) / max_trades_per_msg + 0.99), 1) - for i in range(0, messages_count): - trades = statlist[i * max_trades_per_msg:(i + 1) * max_trades_per_msg] - if show_total and i == messages_count - 1: - # append total line - trades.append(["Total", "", "", f"{fiat_profit_sum:.2f} {fiat_currency}"]) + show_total = not isnan(fiat_profit_sum) and len(statlist) > 1 + max_trades_per_msg = 50 + """ + Calculate the number of messages of 50 trades per message + 0.99 is used to make sure that there are no extra (empty) messages + As an example with 50 trades, there will be int(50/50 + 0.99) = 1 message + """ + messages_count = max(int(len(statlist) / max_trades_per_msg + 0.99), 1) + for i in range(0, messages_count): + trades = statlist[i * max_trades_per_msg:(i + 1) * max_trades_per_msg] + if show_total and i == messages_count - 1: + # append total line + trades.append(["Total", "", "", f"{fiat_profit_sum:.2f} {fiat_currency}"]) - message = tabulate(trades, - headers=head, - tablefmt='simple') - if show_total and i == messages_count - 1: - # insert separators line between Total - lines = message.split("\n") - message = "\n".join(lines[:-1] + [lines[1]] + [lines[-1]]) - self._send_msg(f"

{message}
", parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_status_table", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + message = tabulate(trades, + headers=head, + tablefmt='simple') + if show_total and i == messages_count - 1: + # insert separators line between Total + lines = message.split("\n") + message = "\n".join(lines[:-1] + [lines[1]] + [lines[-1]]) + self._send_msg(f"
{message}
", parse_mode=ParseMode.HTML, + reload_able=True, callback_path="update_status_table", + query=update.callback_query) @authorized_only def _timeunit_stats(self, update: Update, context: CallbackContext, unit: str) -> None: @@ -686,35 +680,32 @@ class Telegram(RPCHandler): timescale = int(context.args[0]) if context.args else val.default except (TypeError, ValueError, IndexError): timescale = val.default - try: - stats = self._rpc._rpc_timeunit_profit( - timescale, - stake_cur, - fiat_disp_cur, - unit - ) - stats_tab = tabulate( - [[f"{period['date']} ({period['trade_count']})", - f"{round_coin_value(period['abs_profit'], stats['stake_currency'])}", - f"{period['fiat_value']:.2f} {stats['fiat_display_currency']}", - f"{period['rel_profit']:.2%}", - ] for period in stats['data']], - headers=[ - f"{val.header} (count)", - f'{stake_cur}', - f'{fiat_disp_cur}', - 'Profit %', - 'Trades', - ], - tablefmt='simple') - message = ( - f'{val.message} Profit over the last {timescale} {val.message2}:\n' - f'
{stats_tab}
' - ) - self._send_msg(message, parse_mode=ParseMode.HTML, reload_able=True, - callback_path=val.callback, query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + stats = self._rpc._rpc_timeunit_profit( + timescale, + stake_cur, + fiat_disp_cur, + unit + ) + stats_tab = tabulate( + [[f"{period['date']} ({period['trade_count']})", + f"{round_coin_value(period['abs_profit'], stats['stake_currency'])}", + f"{period['fiat_value']:.2f} {stats['fiat_display_currency']}", + f"{period['rel_profit']:.2%}", + ] for period in stats['data']], + headers=[ + f"{val.header} (count)", + f'{stake_cur}', + f'{fiat_disp_cur}', + 'Profit %', + 'Trades', + ], + tablefmt='simple') + message = ( + f'{val.message} Profit over the last {timescale} {val.message2}:\n' + f'
{stats_tab}
' + ) + self._send_msg(message, parse_mode=ParseMode.HTML, reload_able=True, + callback_path=val.callback, query=update.callback_query) @authorized_only def _daily(self, update: Update, context: CallbackContext) -> None: @@ -878,79 +869,76 @@ class Telegram(RPCHandler): @authorized_only def _balance(self, update: Update, context: CallbackContext) -> None: """ Handler for /balance """ - try: - result = self._rpc._rpc_balance(self._config['stake_currency'], - self._config.get('fiat_display_currency', '')) + result = self._rpc._rpc_balance(self._config['stake_currency'], + self._config.get('fiat_display_currency', '')) - balance_dust_level = self._config['telegram'].get('balance_dust_level', 0.0) - if not balance_dust_level: - balance_dust_level = DUST_PER_COIN.get(self._config['stake_currency'], 1.0) + balance_dust_level = self._config['telegram'].get('balance_dust_level', 0.0) + if not balance_dust_level: + balance_dust_level = DUST_PER_COIN.get(self._config['stake_currency'], 1.0) - output = '' - if self._config['dry_run']: - output += "*Warning:* Simulated balances in Dry Mode.\n" - starting_cap = round_coin_value( - result['starting_capital'], self._config['stake_currency']) - output += f"Starting capital: `{starting_cap}`" - starting_cap_fiat = round_coin_value( - result['starting_capital_fiat'], self._config['fiat_display_currency'] - ) if result['starting_capital_fiat'] > 0 else '' - output += (f" `, {starting_cap_fiat}`.\n" - ) if result['starting_capital_fiat'] > 0 else '.\n' + output = '' + if self._config['dry_run']: + output += "*Warning:* Simulated balances in Dry Mode.\n" + starting_cap = round_coin_value( + result['starting_capital'], self._config['stake_currency']) + output += f"Starting capital: `{starting_cap}`" + starting_cap_fiat = round_coin_value( + result['starting_capital_fiat'], self._config['fiat_display_currency'] + ) if result['starting_capital_fiat'] > 0 else '' + output += (f" `, {starting_cap_fiat}`.\n" + ) if result['starting_capital_fiat'] > 0 else '.\n' - total_dust_balance = 0 - total_dust_currencies = 0 - for curr in result['currencies']: - curr_output = '' - if curr['est_stake'] > balance_dust_level: - if curr['is_position']: - curr_output = ( - f"*{curr['currency']}:*\n" - f"\t`{curr['side']}: {curr['position']:.8f}`\n" - f"\t`Leverage: {curr['leverage']:.1f}`\n" - f"\t`Est. {curr['stake']}: " - f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n") - else: - curr_output = ( - f"*{curr['currency']}:*\n" - f"\t`Available: {curr['free']:.8f}`\n" - f"\t`Balance: {curr['balance']:.8f}`\n" - f"\t`Pending: {curr['used']:.8f}`\n" - f"\t`Est. {curr['stake']}: " - f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n") - elif curr['est_stake'] <= balance_dust_level: - total_dust_balance += curr['est_stake'] - total_dust_currencies += 1 - - # Handle overflowing message length - if len(output + curr_output) >= MAX_MESSAGE_LENGTH: - self._send_msg(output) - output = curr_output + total_dust_balance = 0 + total_dust_currencies = 0 + for curr in result['currencies']: + curr_output = '' + if curr['est_stake'] > balance_dust_level: + if curr['is_position']: + curr_output = ( + f"*{curr['currency']}:*\n" + f"\t`{curr['side']}: {curr['position']:.8f}`\n" + f"\t`Leverage: {curr['leverage']:.1f}`\n" + f"\t`Est. {curr['stake']}: " + f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n") else: - output += curr_output + curr_output = ( + f"*{curr['currency']}:*\n" + f"\t`Available: {curr['free']:.8f}`\n" + f"\t`Balance: {curr['balance']:.8f}`\n" + f"\t`Pending: {curr['used']:.8f}`\n" + f"\t`Est. {curr['stake']}: " + f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n") + elif curr['est_stake'] <= balance_dust_level: + total_dust_balance += curr['est_stake'] + total_dust_currencies += 1 - if total_dust_balance > 0: - output += ( - f"*{total_dust_currencies} Other " - f"{plural(total_dust_currencies, 'Currency', 'Currencies')} " - f"(< {balance_dust_level} {result['stake']}):*\n" - f"\t`Est. {result['stake']}: " - f"{round_coin_value(total_dust_balance, result['stake'], False)}`\n") - tc = result['trade_count'] > 0 - stake_improve = f" `({result['starting_capital_ratio']:.2%})`" if tc else '' - fiat_val = f" `({result['starting_capital_fiat_ratio']:.2%})`" if tc else '' + # Handle overflowing message length + if len(output + curr_output) >= MAX_MESSAGE_LENGTH: + self._send_msg(output) + output = curr_output + else: + output += curr_output - output += ("\n*Estimated Value*:\n" - f"\t`{result['stake']}: " - f"{round_coin_value(result['total'], result['stake'], False)}`" - f"{stake_improve}\n" - f"\t`{result['symbol']}: " - f"{round_coin_value(result['value'], result['symbol'], False)}`" - f"{fiat_val}\n") - self._send_msg(output, reload_able=True, callback_path="update_balance", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + if total_dust_balance > 0: + output += ( + f"*{total_dust_currencies} Other " + f"{plural(total_dust_currencies, 'Currency', 'Currencies')} " + f"(< {balance_dust_level} {result['stake']}):*\n" + f"\t`Est. {result['stake']}: " + f"{round_coin_value(total_dust_balance, result['stake'], False)}`\n") + tc = result['trade_count'] > 0 + stake_improve = f" `({result['starting_capital_ratio']:.2%})`" if tc else '' + fiat_val = f" `({result['starting_capital_fiat_ratio']:.2%})`" if tc else '' + + output += ("\n*Estimated Value*:\n" + f"\t`{result['stake']}: " + f"{round_coin_value(result['total'], result['stake'], False)}`" + f"{stake_improve}\n" + f"\t`{result['symbol']}: " + f"{round_coin_value(result['value'], result['symbol'], False)}`" + f"{fiat_val}\n") + self._send_msg(output, reload_able=True, callback_path="update_balance", + query=update.callback_query) @authorized_only def _start(self, update: Update, context: CallbackContext) -> None: @@ -1125,26 +1113,23 @@ class Telegram(RPCHandler): nrecent = int(context.args[0]) if context.args else 10 except (TypeError, ValueError, IndexError): nrecent = 10 - try: - trades = self._rpc._rpc_trade_history( - nrecent - ) - trades_tab = tabulate( - [[arrow.get(trade['close_date']).humanize(), - trade['pair'] + " (#" + str(trade['trade_id']) + ")", - f"{(trade['close_profit']):.2%} ({trade['close_profit_abs']})"] - for trade in trades['trades']], - headers=[ - 'Close Date', - 'Pair (ID)', - f'Profit ({stake_cur})', - ], - tablefmt='simple') - message = (f"{min(trades['trades_count'], nrecent)} recent trades:\n" - + (f"
{trades_tab}
" if trades['trades_count'] > 0 else '')) - self._send_msg(message, parse_mode=ParseMode.HTML) - except RPCException as e: - self._send_msg(str(e)) + trades = self._rpc._rpc_trade_history( + nrecent + ) + trades_tab = tabulate( + [[arrow.get(trade['close_date']).humanize(), + trade['pair'] + " (#" + str(trade['trade_id']) + ")", + f"{(trade['close_profit']):.2%} ({trade['close_profit_abs']})"] + for trade in trades['trades']], + headers=[ + 'Close Date', + 'Pair (ID)', + f'Profit ({stake_cur})', + ], + tablefmt='simple') + message = (f"{min(trades['trades_count'], nrecent)} recent trades:\n" + + (f"
{trades_tab}
" if trades['trades_count'] > 0 else '')) + self._send_msg(message, parse_mode=ParseMode.HTML) @authorized_only def _delete_trade(self, update: Update, context: CallbackContext) -> None: @@ -1155,18 +1140,14 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - if not context.args or len(context.args) == 0: - raise RPCException("Trade-id not set.") - trade_id = int(context.args[0]) - msg = self._rpc._rpc_delete(trade_id) - self._send_msg(( - f"`{msg['result_msg']}`\n" - 'Please make sure to take care of this asset on the exchange manually.' - )) - - except RPCException as e: - self._send_msg(str(e)) + if not context.args or len(context.args) == 0: + raise RPCException("Trade-id not set.") + trade_id = int(context.args[0]) + msg = self._rpc._rpc_delete(trade_id) + self._send_msg(( + f"`{msg['result_msg']}`\n" + 'Please make sure to take care of this asset on the exchange manually.' + )) @authorized_only def _performance(self, update: Update, context: CallbackContext) -> None: @@ -1177,27 +1158,24 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - trades = self._rpc._rpc_performance() - output = "Performance:\n" - for i, trade in enumerate(trades): - stat_line = ( - f"{i+1}.\t {trade['pair']}\t" - f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " - f"({trade['profit_ratio']:.2%}) " - f"({trade['count']})\n") + trades = self._rpc._rpc_performance() + output = "Performance:\n" + for i, trade in enumerate(trades): + stat_line = ( + f"{i+1}.\t {trade['pair']}\t" + f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " + f"({trade['profit_ratio']:.2%}) " + f"({trade['count']})\n") - if len(output + stat_line) >= MAX_MESSAGE_LENGTH: - self._send_msg(output, parse_mode=ParseMode.HTML) - output = stat_line - else: - output += stat_line + if len(output + stat_line) >= MAX_MESSAGE_LENGTH: + self._send_msg(output, parse_mode=ParseMode.HTML) + output = stat_line + else: + output += stat_line - self._send_msg(output, parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_performance", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + self._send_msg(output, parse_mode=ParseMode.HTML, + reload_able=True, callback_path="update_performance", + query=update.callback_query) @authorized_only def _enter_tag_performance(self, update: Update, context: CallbackContext) -> None: @@ -1208,31 +1186,28 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - pair = None - if context.args and isinstance(context.args[0], str): - pair = context.args[0] + pair = None + if context.args and isinstance(context.args[0], str): + pair = context.args[0] - trades = self._rpc._rpc_enter_tag_performance(pair) - output = "Entry Tag Performance:\n" - for i, trade in enumerate(trades): - stat_line = ( - f"{i+1}.\t {trade['enter_tag']}\t" - f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " - f"({trade['profit_ratio']:.2%}) " - f"({trade['count']})\n") + trades = self._rpc._rpc_enter_tag_performance(pair) + output = "Entry Tag Performance:\n" + for i, trade in enumerate(trades): + stat_line = ( + f"{i+1}.\t {trade['enter_tag']}\t" + f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " + f"({trade['profit_ratio']:.2%}) " + f"({trade['count']})\n") - if len(output + stat_line) >= MAX_MESSAGE_LENGTH: - self._send_msg(output, parse_mode=ParseMode.HTML) - output = stat_line - else: - output += stat_line + if len(output + stat_line) >= MAX_MESSAGE_LENGTH: + self._send_msg(output, parse_mode=ParseMode.HTML) + output = stat_line + else: + output += stat_line - self._send_msg(output, parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_enter_tag_performance", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + self._send_msg(output, parse_mode=ParseMode.HTML, + reload_able=True, callback_path="update_enter_tag_performance", + query=update.callback_query) @authorized_only def _exit_reason_performance(self, update: Update, context: CallbackContext) -> None: @@ -1243,31 +1218,28 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - pair = None - if context.args and isinstance(context.args[0], str): - pair = context.args[0] + pair = None + if context.args and isinstance(context.args[0], str): + pair = context.args[0] - trades = self._rpc._rpc_exit_reason_performance(pair) - output = "Exit Reason Performance:\n" - for i, trade in enumerate(trades): - stat_line = ( - f"{i+1}.\t {trade['exit_reason']}\t" - f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " - f"({trade['profit_ratio']:.2%}) " - f"({trade['count']})\n") + trades = self._rpc._rpc_exit_reason_performance(pair) + output = "Exit Reason Performance:\n" + for i, trade in enumerate(trades): + stat_line = ( + f"{i+1}.\t {trade['exit_reason']}\t" + f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " + f"({trade['profit_ratio']:.2%}) " + f"({trade['count']})\n") - if len(output + stat_line) >= MAX_MESSAGE_LENGTH: - self._send_msg(output, parse_mode=ParseMode.HTML) - output = stat_line - else: - output += stat_line + if len(output + stat_line) >= MAX_MESSAGE_LENGTH: + self._send_msg(output, parse_mode=ParseMode.HTML) + output = stat_line + else: + output += stat_line - self._send_msg(output, parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_exit_reason_performance", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + self._send_msg(output, parse_mode=ParseMode.HTML, + reload_able=True, callback_path="update_exit_reason_performance", + query=update.callback_query) @authorized_only def _mix_tag_performance(self, update: Update, context: CallbackContext) -> None: @@ -1278,31 +1250,28 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - pair = None - if context.args and isinstance(context.args[0], str): - pair = context.args[0] + pair = None + if context.args and isinstance(context.args[0], str): + pair = context.args[0] - trades = self._rpc._rpc_mix_tag_performance(pair) - output = "Mix Tag Performance:\n" - for i, trade in enumerate(trades): - stat_line = ( - f"{i+1}.\t {trade['mix_tag']}\t" - f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " - f"({trade['profit']:.2%}) " - f"({trade['count']})\n") + trades = self._rpc._rpc_mix_tag_performance(pair) + output = "Mix Tag Performance:\n" + for i, trade in enumerate(trades): + stat_line = ( + f"{i+1}.\t {trade['mix_tag']}\t" + f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " + f"({trade['profit']:.2%}) " + f"({trade['count']})\n") - if len(output + stat_line) >= MAX_MESSAGE_LENGTH: - self._send_msg(output, parse_mode=ParseMode.HTML) - output = stat_line - else: - output += stat_line + if len(output + stat_line) >= MAX_MESSAGE_LENGTH: + self._send_msg(output, parse_mode=ParseMode.HTML) + output = stat_line + else: + output += stat_line - self._send_msg(output, parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_mix_tag_performance", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + self._send_msg(output, parse_mode=ParseMode.HTML, + reload_able=True, callback_path="update_mix_tag_performance", + query=update.callback_query) @authorized_only def _count(self, update: Update, context: CallbackContext) -> None: @@ -1313,18 +1282,15 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - counts = self._rpc._rpc_count() - message = tabulate({k: [v] for k, v in counts.items()}, - headers=['current', 'max', 'total stake'], - tablefmt='simple') - message = "
{}
".format(message) - logger.debug(message) - self._send_msg(message, parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_count", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + counts = self._rpc._rpc_count() + message = tabulate({k: [v] for k, v in counts.items()}, + headers=['current', 'max', 'total stake'], + tablefmt='simple') + message = "
{}
".format(message) + logger.debug(message) + self._send_msg(message, parse_mode=ParseMode.HTML, + reload_able=True, callback_path="update_count", + query=update.callback_query) @authorized_only def _locks(self, update: Update, context: CallbackContext) -> None: @@ -1372,22 +1338,19 @@ class Telegram(RPCHandler): Handler for /whitelist Shows the currently active whitelist """ - try: - whitelist = self._rpc._rpc_whitelist() + whitelist = self._rpc._rpc_whitelist() - if context.args: - if "sorted" in context.args: - whitelist['whitelist'] = sorted(whitelist['whitelist']) - if "baseonly" in context.args: - whitelist['whitelist'] = [pair.split("/")[0] for pair in whitelist['whitelist']] + if context.args: + if "sorted" in context.args: + whitelist['whitelist'] = sorted(whitelist['whitelist']) + if "baseonly" in context.args: + whitelist['whitelist'] = [pair.split("/")[0] for pair in whitelist['whitelist']] - message = f"Using whitelist `{whitelist['method']}` with {whitelist['length']} pairs\n" - message += f"`{', '.join(whitelist['whitelist'])}`" + message = f"Using whitelist `{whitelist['method']}` with {whitelist['length']} pairs\n" + message += f"`{', '.join(whitelist['whitelist'])}`" - logger.debug(message) - self._send_msg(message) - except RPCException as e: - self._send_msg(str(e)) + logger.debug(message) + self._send_msg(message) @authorized_only def _blacklist(self, update: Update, context: CallbackContext) -> None: @@ -1425,30 +1388,27 @@ class Telegram(RPCHandler): Shows the latest logs """ try: - try: - limit = int(context.args[0]) if context.args else 10 - except (TypeError, ValueError, IndexError): - limit = 10 - logs = RPC._rpc_get_logs(limit)['logs'] - msgs = '' - msg_template = "*{}* {}: {} \\- `{}`" - for logrec in logs: - msg = msg_template.format(escape_markdown(logrec[0], version=2), - escape_markdown(logrec[2], version=2), - escape_markdown(logrec[3], version=2), - escape_markdown(logrec[4], version=2)) - if len(msgs + msg) + 10 >= MAX_MESSAGE_LENGTH: - # Send message immediately if it would become too long - self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2) - msgs = msg + '\n' - else: - # Append message to messages to send - msgs += msg + '\n' - - if msgs: + limit = int(context.args[0]) if context.args else 10 + except (TypeError, ValueError, IndexError): + limit = 10 + logs = RPC._rpc_get_logs(limit)['logs'] + msgs = '' + msg_template = "*{}* {}: {} \\- `{}`" + for logrec in logs: + msg = msg_template.format(escape_markdown(logrec[0], version=2), + escape_markdown(logrec[2], version=2), + escape_markdown(logrec[3], version=2), + escape_markdown(logrec[4], version=2)) + if len(msgs + msg) + 10 >= MAX_MESSAGE_LENGTH: + # Send message immediately if it would become too long self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2) - except RPCException as e: - self._send_msg(str(e)) + msgs = msg + '\n' + else: + # Append message to messages to send + msgs += msg + '\n' + + if msgs: + self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2) @authorized_only def _edge(self, update: Update, context: CallbackContext) -> None: @@ -1456,21 +1416,17 @@ class Telegram(RPCHandler): Handler for /edge Shows information related to Edge """ - try: - edge_pairs = self._rpc._rpc_edge() - if not edge_pairs: - message = 'Edge only validated following pairs:' - self._send_msg(message, parse_mode=ParseMode.HTML) + edge_pairs = self._rpc._rpc_edge() + if not edge_pairs: + message = 'Edge only validated following pairs:' + self._send_msg(message, parse_mode=ParseMode.HTML) - for chunk in chunks(edge_pairs, 25): - edge_pairs_tab = tabulate(chunk, headers='keys', tablefmt='simple') - message = (f'Edge only validated following pairs:\n' - f'
{edge_pairs_tab}
') + for chunk in chunks(edge_pairs, 25): + edge_pairs_tab = tabulate(chunk, headers='keys', tablefmt='simple') + message = (f'Edge only validated following pairs:\n' + f'
{edge_pairs_tab}
') - self._send_msg(message, parse_mode=ParseMode.HTML) - - except RPCException as e: - self._send_msg(str(e)) + self._send_msg(message, parse_mode=ParseMode.HTML) @authorized_only def _help(self, update: Update, context: CallbackContext) -> None: @@ -1551,12 +1507,9 @@ class Telegram(RPCHandler): Handler for /health Shows the last process timestamp """ - try: - health = self._rpc._health() - message = f"Last process: `{health['last_process_loc']}`" - self._send_msg(message) - except RPCException as e: - self._send_msg(str(e)) + health = self._rpc._health() + message = f"Last process: `{health['last_process_loc']}`" + self._send_msg(message) @authorized_only def _version(self, update: Update, context: CallbackContext) -> None: From 4a9982f86bdc340441cd9c7fff1259f9813a715d Mon Sep 17 00:00:00 2001 From: Emre Date: Thu, 1 Dec 2022 10:08:42 +0300 Subject: [PATCH 245/421] Fix sb3_contrib loading issue --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- freqtrade/freqai/data_drawer.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 9d2fae583..81f8edfc4 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -64,7 +64,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.policy_type = self.freqai_info['rl_config']['policy_type'] self.unset_outlier_removal() self.net_arch = self.rl_config.get('net_arch', [128, 128]) - self.dd.model_type = "stable_baselines" + self.dd.model_type = import_str def unset_outlier_removal(self): """ diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 99e3686b3..5e1f3a344 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -503,7 +503,7 @@ class FreqaiDataDrawer: dump(model, save_path / f"{dk.model_filename}_model.joblib") elif self.model_type == 'keras': model.save(save_path / f"{dk.model_filename}_model.h5") - elif 'stable_baselines' in self.model_type: + elif self.model_type in ['stable_baselines3', 'sb3_contrib']: model.save(save_path / f"{dk.model_filename}_model.zip") if dk.svm_model is not None: @@ -589,9 +589,9 @@ class FreqaiDataDrawer: elif self.model_type == 'keras': from tensorflow import keras model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") - elif self.model_type == 'stable_baselines': + elif self.model_type in ['stable_baselines3', 'sb3_contrib']: mod = importlib.import_module( - 'stable_baselines3', self.freqai_info['rl_config']['model_type']) + self.model_type, self.freqai_info['rl_config']['model_type']) MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model") From 396e666e9b46c4447907c9c093bef67931b09087 Mon Sep 17 00:00:00 2001 From: Emre Date: Thu, 1 Dec 2022 11:03:51 +0300 Subject: [PATCH 246/421] Keep old behavior of model loading --- freqtrade/freqai/data_drawer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 5e1f3a344..848fb20eb 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -503,7 +503,7 @@ class FreqaiDataDrawer: dump(model, save_path / f"{dk.model_filename}_model.joblib") elif self.model_type == 'keras': model.save(save_path / f"{dk.model_filename}_model.h5") - elif self.model_type in ['stable_baselines3', 'sb3_contrib']: + elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type: model.save(save_path / f"{dk.model_filename}_model.zip") if dk.svm_model is not None: @@ -589,7 +589,7 @@ class FreqaiDataDrawer: elif self.model_type == 'keras': from tensorflow import keras model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") - elif self.model_type in ['stable_baselines3', 'sb3_contrib']: + elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type: mod = importlib.import_module( self.model_type, self.freqai_info['rl_config']['model_type']) MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) From eb81cccedebb79bd363b4d8fb48b49b6700e9749 Mon Sep 17 00:00:00 2001 From: k <> Date: Thu, 1 Dec 2022 16:37:24 +0800 Subject: [PATCH 247/421] add download-data command change directory fix relative config path --- .../templates/strategy_analysis_example.ipynb | 28 ++++++++++++++----- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/freqtrade/templates/strategy_analysis_example.ipynb b/freqtrade/templates/strategy_analysis_example.ipynb index 5fb14ab2f..f7d68b41c 100644 --- a/freqtrade/templates/strategy_analysis_example.ipynb +++ b/freqtrade/templates/strategy_analysis_example.ipynb @@ -7,7 +7,7 @@ "# Strategy analysis example\n", "\n", "Debugging a strategy can be time-consuming. Freqtrade offers helper functions to visualize raw data.\n", - "The following assumes you work with SampleStrategy, data for 5m timeframe from Binance and have downloaded them into the data directory in the default location." + "The following assumes you work with SampleStrategy, data for 5m timeframe from Binance and have downloaded them into the data directory in the default location, using command like `freqtrade download-data --exchange binance --trading-mod spot --pairs BTC/USDT --days 7 -t 5m`." ] }, { @@ -23,7 +23,21 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", "from pathlib import Path\n", + "\n", + "# Change current working directory from `somedir/freqtrade/user_data/notebooks` to project root `somedir/freqtrade`, so relative paths remain consistent.\n", + "if not Path(\"LICENSE\").is_file():\n", + " os.chdir(\"../../\")\n", + "print(Path.cwd())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "from freqtrade.configuration import Configuration\n", "\n", "# Customize these according to your needs.\n", @@ -31,14 +45,14 @@ "# Initialize empty configuration object\n", "config = Configuration.from_files([])\n", "# Optionally (recommended), use existing configuration file\n", - "# config = Configuration.from_files([\"config.json\"])\n", + "# config = Configuration.from_files([\"user_data/config.json\"])\n", "\n", "# Define some constants\n", "config[\"timeframe\"] = \"5m\"\n", "# Name of the strategy class\n", "config[\"strategy\"] = \"SampleStrategy\"\n", "# Location of the data\n", - "data_location = config['datadir']\n", + "data_location = config[\"datadir\"]\n", "# Pair to analyze - Only use one pair here\n", "pair = \"BTC/USDT\"" ] @@ -56,7 +70,7 @@ "candles = load_pair_history(datadir=data_location,\n", " timeframe=config[\"timeframe\"],\n", " pair=pair,\n", - " data_format = \"hdf5\",\n", + " data_format = \"json\",\n", " candle_type=CandleType.SPOT,\n", " )\n", "\n", @@ -365,7 +379,7 @@ "metadata": { "file_extension": ".py", "kernelspec": { - "display_name": "Python 3.9.7 64-bit ('trade_397')", + "display_name": "Python 3.11.0 64-bit", "language": "python", "name": "python3" }, @@ -379,7 +393,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.11.0" }, "mimetype": "text/x-python", "name": "python", @@ -430,7 +444,7 @@ "version": 3, "vscode": { "interpreter": { - "hash": "675f32a300d6d26767470181ad0b11dd4676bcce7ed1dd2ffe2fbc370c95fc7c" + "hash": "945ba00099661281427cc644a7000ee9eeea5ce6ad3bf937939d3d384b8f3881" } } }, From 2b3e166dc2590f994aebce2329a99f041b1aec0e Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 1 Dec 2022 10:10:28 +0100 Subject: [PATCH 248/421] fix fees RL --- freqtrade/freqai/RL/BaseEnvironment.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 66bdb8435..e7bd26a92 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -194,12 +194,12 @@ class BaseEnvironment(gym.Env): if self._position == Positions.Neutral: return 0. elif self._position == Positions.Short: - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price) / last_trade_price - elif self._position == Positions.Long: current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) return (current_price - last_trade_price) / last_trade_price else: return 0. From 77dc2c92a7147239fc4853b361f54010ecc7b38e Mon Sep 17 00:00:00 2001 From: Wagner Costa Date: Thu, 1 Dec 2022 12:53:19 -0300 Subject: [PATCH 249/421] performance improvevemnts - backtest freqai from saved predictions --- freqtrade/freqai/data_kitchen.py | 6 +++--- freqtrade/freqai/freqai_interface.py | 18 ++++++++++-------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index c6f22e468..9c8158c8a 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -462,10 +462,10 @@ class FreqaiDataKitchen: :param df: Dataframe containing all candles to run the entire backtest. Here it is sliced down to just the present training period. """ - - df = df.loc[df["date"] >= timerange.startdt, :] if not self.live: - df = df.loc[df["date"] < timerange.stopdt, :] + df = df.loc[(df["date"] >= timerange.startdt) & (df["date"] < timerange.stopdt), :] + else: + df = df.loc[df["date"] >= timerange.startdt, :] return df diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 3386d2881..34780f930 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -282,10 +282,10 @@ class IFreqaiModel(ABC): train_it += 1 total_trains = len(dk.backtesting_timeranges) self.training_timerange = tr_train - dataframe_train = dk.slice_dataframe(tr_train, dataframe) - dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe) + len_backtest_df = len(dataframe.loc[(dataframe["date"] >= tr_backtest.startdt) & ( + dataframe["date"] < tr_backtest.stopdt), :]) - if not self.ensure_data_exists(dataframe_backtest, tr_backtest, pair): + if not self.ensure_data_exists(len_backtest_df, tr_backtest, pair): continue self.log_backtesting_progress(tr_train, pair, train_it, total_trains) @@ -298,13 +298,15 @@ class IFreqaiModel(ABC): dk.set_new_model_names(pair, timestamp_model_id) - if dk.check_if_backtest_prediction_is_valid(len(dataframe_backtest)): + if dk.check_if_backtest_prediction_is_valid(len_backtest_df): self.dd.load_metadata(dk) - dk.find_features(dataframe_train) + dk.find_features(dataframe) self.check_if_feature_list_matches_strategy(dk) append_df = dk.get_backtesting_prediction() dk.append_predictions(append_df) else: + dataframe_train = dk.slice_dataframe(tr_train, dataframe) + dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe) if not self.model_exists(dk): dk.find_features(dataframe_train) dk.find_labels(dataframe_train) @@ -804,16 +806,16 @@ class IFreqaiModel(ABC): self.pair_it = 1 self.current_candle = self.dd.current_candle - def ensure_data_exists(self, dataframe_backtest: DataFrame, + def ensure_data_exists(self, len_dataframe_backtest: int, tr_backtest: TimeRange, pair: str) -> bool: """ Check if the dataframe is empty, if not, report useful information to user. - :param dataframe_backtest: the backtesting dataframe, maybe empty. + :param len_dataframe_backtest: the len of backtesting dataframe :param tr_backtest: current backtesting timerange. :param pair: current pair :return: if the data exists or not """ - if self.config.get("freqai_backtest_live_models", False) and len(dataframe_backtest) == 0: + if self.config.get("freqai_backtest_live_models", False) and len_dataframe_backtest == 0: logger.info(f"No data found for pair {pair} from " f"from { tr_backtest.start_fmt} to {tr_backtest.stop_fmt}. " "Probably more than one training within the same candle period.") From 05424045b0f5ec1bc6221bd5114a65d4922cebef Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 2 Dec 2022 06:12:21 +0100 Subject: [PATCH 250/421] Temporarily disable since binance blocks US --- tests/exchange/test_ccxt_compat.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/exchange/test_ccxt_compat.py b/tests/exchange/test_ccxt_compat.py index 55d463c68..280876ae8 100644 --- a/tests/exchange/test_ccxt_compat.py +++ b/tests/exchange/test_ccxt_compat.py @@ -28,15 +28,15 @@ EXCHANGES = { 'leverage_tiers_public': False, 'leverage_in_spot_market': False, }, - 'binance': { - 'pair': 'BTC/USDT', - 'stake_currency': 'USDT', - 'hasQuoteVolume': True, - 'timeframe': '5m', - 'futures': True, - 'leverage_tiers_public': False, - 'leverage_in_spot_market': False, - }, + # 'binance': { + # 'pair': 'BTC/USDT', + # 'stake_currency': 'USDT', + # 'hasQuoteVolume': True, + # 'timeframe': '5m', + # 'futures': True, + # 'leverage_tiers_public': False, + # 'leverage_in_spot_market': False, + # }, 'kraken': { 'pair': 'BTC/USDT', 'stake_currency': 'USDT', From 7ddf7ec0aecb8366b28c9131fd21dd5c800b2f2e Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Fri, 2 Dec 2022 11:28:00 +0100 Subject: [PATCH 251/421] Update freqai-parameter-table.md --- docs/freqai-parameter-table.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index f2a52a9b8..30ae9c62e 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -37,7 +37,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `populate_any_indicators()` for indicator creation. FreqAI uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN.
**Datatype:** Positive integer. | `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset.
**Datatype:** List of positive integers. | `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis)
**Datatype:** Boolean.
Default: `False`. -| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features.
**Datatype:** Integer.
Default: `0`. +| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. Plot is stored in `user_data/models/identifier/sub-train-COIN_`.
**Datatype:** Integer.
Default: `0`. | `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di).
**Datatype:** Positive float (typically < 1). | `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm).
**Datatype:** Boolean. | `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm).
**Datatype:** Dictionary. From 075c8c23c8bf50294e4a49b60466291dd63c2522 Mon Sep 17 00:00:00 2001 From: smarmau <42020297+smarmau@users.noreply.github.com> Date: Sat, 3 Dec 2022 21:16:04 +1100 Subject: [PATCH 252/421] add state/action info to callbacks --- .../prediction_models/ReinforcementLearner.py | 44 +++++++++++++++++-- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 61b01e21b..ff39a66e0 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -71,7 +71,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): model.learn( total_timesteps=int(total_timesteps), - callback=self.eval_callback + callback=[self.eval_callback, self.tensorboard_callback] ) if Path(dk.data_path / "best_model.zip").is_file(): @@ -88,6 +88,33 @@ class ReinforcementLearner(BaseReinforcementLearningModel): User can override any function in BaseRLEnv and gym.Env. Here the user sets a custom reward based on profit and trade duration. """ + def reset(self): + + # Reset custom info + self.custom_info = {} + self.custom_info["Invalid"] = 0 + self.custom_info["Hold"] = 0 + self.custom_info["Unknown"] = 0 + self.custom_info["pnl_factor"] = 0 + self.custom_info["duration_factor"] = 0 + self.custom_info["reward_exit"] = 0 + self.custom_info["reward_hold"] = 0 + for action in Actions: + self.custom_info[f"{action.name}"] = 0 + return super().reset() + + def step(self, action: int): + observation, step_reward, done, info = super().step(action) + info = dict( + tick=self._current_tick, + action=action, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value, + trade_duration=self.get_trade_duration(), + current_profit_pct=self.get_unrealized_profit() + ) + return observation, step_reward, done, info def calculate_reward(self, action: int) -> float: """ @@ -100,17 +127,24 @@ class ReinforcementLearner(BaseReinforcementLearningModel): """ # first, penalize if the action is not valid if not self._is_valid(action): + self.custom_info["Invalid"] += 1 return -2 pnl = self.get_unrealized_profit() factor = 100. # reward agent for entering trades - if (action in (Actions.Long_enter.value, Actions.Short_enter.value) + if (action ==Actions.Long_enter.value and self._position == Positions.Neutral): + self.custom_info[f"{Actions.Long_enter.name}"] += 1 + return 25 + if (action == Actions.Short_enter.value + and self._position == Positions.Neutral): + self.custom_info[f"{Actions.Short_enter.name}"] += 1 return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: + self.custom_info[f"{Actions.Neutral.name}"] += 1 return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) @@ -124,18 +158,22 @@ class ReinforcementLearner(BaseReinforcementLearningModel): # discourage sitting in position if (self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value): + self.custom_info["Hold"] += 1 return -1 * trade_duration / max_trade_duration # close long if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + self.custom_info[f"{Actions.Long_exit.name}"] += 1 return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + self.custom_info[f"{Actions.Short_exit.name}"] += 1 return float(pnl * factor) - + + self.custom_info["Unknown"] += 1 return 0. From 469aa0d43fcc7e2176690ab834a3f2be98709e32 Mon Sep 17 00:00:00 2001 From: smarmau <42020297+smarmau@users.noreply.github.com> Date: Sat, 3 Dec 2022 21:16:46 +1100 Subject: [PATCH 253/421] add state/action info to callbacks --- .../RL/BaseReinforcementLearningModel.py | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 81f8edfc4..15acde6fb 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -13,9 +13,11 @@ import torch as th import torch.multiprocessing from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv +from stable_baselines3.common.logger import HParam from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen @@ -155,6 +157,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) + + self.tensorboard_callback = TensorboardCallback() @abstractmethod def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): @@ -398,3 +402,48 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, return env set_random_seed(seed) return _init + +class TensorboardCallback(BaseCallback): + """ + Custom callback for plotting additional values in tensorboard. + """ + def __init__(self, verbose=1): + super(TensorboardCallback, self).__init__(verbose) + + def _on_training_start(self) -> None: + hparam_dict = { + "algorithm": self.model.__class__.__name__, + "learning_rate": self.model.learning_rate, + "gamma": self.model.gamma, + "gae_lambda": self.model.gae_lambda, + "batch_size": self.model.batch_size, + "n_steps": self.model.n_steps, + } + metric_dict = { + "eval/mean_reward": 0, + "rollout/ep_rew_mean": 0, + "rollout/ep_len_mean":0 , + "train/value_loss": 0, + "train/explained_variance": 0, + } + self.logger.record( + "hparams", + HParam(hparam_dict, metric_dict), + exclude=("stdout", "log", "json", "csv"), + ) + + def _on_step(self) -> bool: + custom_info = self.training_env.get_attr("custom_info")[0] + self.logger.record(f"_state/position", self.locals["infos"][0]["position"]) + self.logger.record(f"_state/trade_duration", self.locals["infos"][0]["trade_duration"]) + self.logger.record(f"_state/current_profit_pct", self.locals["infos"][0]["current_profit_pct"]) + self.logger.record(f"_reward/total_profit", self.locals["infos"][0]["total_profit"]) + self.logger.record(f"_reward/total_reward", self.locals["infos"][0]["total_reward"]) + self.logger.record_mean(f"_reward/mean_trade_duration", self.locals["infos"][0]["trade_duration"]) + self.logger.record(f"_actions/action", self.locals["infos"][0]["action"]) + self.logger.record(f"_actions/_Invalid", custom_info["Invalid"]) + self.logger.record(f"_actions/_Unknown", custom_info["Unknown"]) + self.logger.record(f"_actions/Hold", custom_info["Hold"]) + for action in Actions: + self.logger.record(f"_actions/{action.name}", custom_info[action.name]) + return True \ No newline at end of file From d6f45a12ae0778c6de86bd8020a69299ee474d31 Mon Sep 17 00:00:00 2001 From: smarmau <42020297+smarmau@users.noreply.github.com> Date: Sat, 3 Dec 2022 22:30:04 +1100 Subject: [PATCH 254/421] add multiproc fix flake8 --- freqtrade/freqai/prediction_models/ReinforcementLearner.py | 6 +++--- .../prediction_models/ReinforcementLearner_multiproc.py | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index ff39a66e0..fa1087497 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -102,7 +102,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): for action in Actions: self.custom_info[f"{action.name}"] = 0 return super().reset() - + def step(self, action: int): observation, step_reward, done, info = super().step(action) info = dict( @@ -134,7 +134,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): factor = 100. # reward agent for entering trades - if (action ==Actions.Long_enter.value + if (action == Actions.Long_enter.value and self._position == Positions.Neutral): self.custom_info[f"{Actions.Long_enter.name}"] += 1 return 25 @@ -174,6 +174,6 @@ class ReinforcementLearner(BaseReinforcementLearningModel): factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) self.custom_info[f"{Actions.Short_exit.name}"] += 1 return float(pnl * factor) - + self.custom_info["Unknown"] += 1 return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 56636c1f6..dd5430aa7 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -8,7 +8,7 @@ from stable_baselines3.common.vec_env import SubprocVecEnv from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner -from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env +from freqtrade.freqai.RL.BaseReinforcementLearningModel import TensorboardCallback, make_env logger = logging.getLogger(__name__) @@ -49,3 +49,5 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) + + self.tensorboard_callback = TensorboardCallback() From b2edc58089a98994861409a106a8804b9f92270c Mon Sep 17 00:00:00 2001 From: smarmau <42020297+smarmau@users.noreply.github.com> Date: Sat, 3 Dec 2022 22:31:02 +1100 Subject: [PATCH 255/421] fix flake8 --- .../RL/BaseReinforcementLearningModel.py | 36 ++++++++++--------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 15acde6fb..b9b6cdd96 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -12,12 +12,11 @@ import pandas as pd import torch as th import torch.multiprocessing from pandas import DataFrame -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.callbacks import BaseCallback +from stable_baselines3.common.callbacks import BaseCallback, EvalCallback +from stable_baselines3.common.logger import HParam from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv -from stable_baselines3.common.logger import HParam from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen @@ -157,7 +156,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) - + self.tensorboard_callback = TensorboardCallback() @abstractmethod @@ -403,6 +402,7 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, set_random_seed(seed) return _init + class TensorboardCallback(BaseCallback): """ Custom callback for plotting additional values in tensorboard. @@ -422,7 +422,7 @@ class TensorboardCallback(BaseCallback): metric_dict = { "eval/mean_reward": 0, "rollout/ep_rew_mean": 0, - "rollout/ep_len_mean":0 , + "rollout/ep_len_mean": 0, "train/value_loss": 0, "train/explained_variance": 0, } @@ -431,19 +431,21 @@ class TensorboardCallback(BaseCallback): HParam(hparam_dict, metric_dict), exclude=("stdout", "log", "json", "csv"), ) - + def _on_step(self) -> bool: custom_info = self.training_env.get_attr("custom_info")[0] - self.logger.record(f"_state/position", self.locals["infos"][0]["position"]) - self.logger.record(f"_state/trade_duration", self.locals["infos"][0]["trade_duration"]) - self.logger.record(f"_state/current_profit_pct", self.locals["infos"][0]["current_profit_pct"]) - self.logger.record(f"_reward/total_profit", self.locals["infos"][0]["total_profit"]) - self.logger.record(f"_reward/total_reward", self.locals["infos"][0]["total_reward"]) - self.logger.record_mean(f"_reward/mean_trade_duration", self.locals["infos"][0]["trade_duration"]) - self.logger.record(f"_actions/action", self.locals["infos"][0]["action"]) - self.logger.record(f"_actions/_Invalid", custom_info["Invalid"]) - self.logger.record(f"_actions/_Unknown", custom_info["Unknown"]) - self.logger.record(f"_actions/Hold", custom_info["Hold"]) + self.logger.record("_state/position", self.locals["infos"][0]["position"]) + self.logger.record("_state/trade_duration", self.locals["infos"][0]["trade_duration"]) + self.logger.record("_state/current_profit_pct", self.locals["infos"] + [0]["current_profit_pct"]) + self.logger.record("_reward/total_profit", self.locals["infos"][0]["total_profit"]) + self.logger.record("_reward/total_reward", self.locals["infos"][0]["total_reward"]) + self.logger.record_mean("_reward/mean_trade_duration", self.locals["infos"] + [0]["trade_duration"]) + self.logger.record("_actions/action", self.locals["infos"][0]["action"]) + self.logger.record("_actions/_Invalid", custom_info["Invalid"]) + self.logger.record("_actions/_Unknown", custom_info["Unknown"]) + self.logger.record("_actions/Hold", custom_info["Hold"]) for action in Actions: self.logger.record(f"_actions/{action.name}", custom_info[action.name]) - return True \ No newline at end of file + return True From 38d3b4cab2e201f995682f2f5b9eee1049d23eba Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 4 Dec 2022 11:29:21 +0100 Subject: [PATCH 256/421] add details to doc plot_feature_importance doc --- docs/freqai-parameter-table.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 30ae9c62e..d05ce80f3 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -37,7 +37,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `populate_any_indicators()` for indicator creation. FreqAI uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN.
**Datatype:** Positive integer. | `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset.
**Datatype:** List of positive integers. | `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis)
**Datatype:** Boolean.
Default: `False`. -| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. Plot is stored in `user_data/models/identifier/sub-train-COIN_`.
**Datatype:** Integer.
Default: `0`. +| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. Plot is stored in `user_data/models//sub-train-_.html`.
**Datatype:** Integer.
Default: `0`. | `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di).
**Datatype:** Positive float (typically < 1). | `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm).
**Datatype:** Boolean. | `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm).
**Datatype:** Dictionary. From f7b4fc5bbc0a6b652d83e780b6b950b9cbc8f70a Mon Sep 17 00:00:00 2001 From: smarmau <42020297+smarmau@users.noreply.github.com> Date: Sun, 4 Dec 2022 22:22:23 +1100 Subject: [PATCH 257/421] Update freqai-reinforcement-learning.md Change typo of default Tensorboard port to reflect correct port (6006) --- docs/freqai-reinforcement-learning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 353d7a2cc..b1a212a92 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -243,7 +243,7 @@ cd freqtrade tensorboard --logdir user_data/models/unique-id ``` -where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). +where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell to view the output in their browser at 127.0.0.1:6006 (6006 is the default port used by Tensorboard). ![tensorboard](assets/tensorboard.jpg) From 24766928baddfed919be1138a64d51cdbb0d3764 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 4 Dec 2022 13:54:30 +0100 Subject: [PATCH 258/421] reorganize/generalize tensorboard callback --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 8 ++- freqtrade/freqai/RL/Base5ActionRLEnv.py | 8 ++- freqtrade/freqai/RL/BaseEnvironment.py | 37 ++++++++++- .../RL/BaseReinforcementLearningModel.py | 63 +++---------------- freqtrade/freqai/RL/TensorboardCallback.py | 61 ++++++++++++++++++ .../prediction_models/ReinforcementLearner.py | 27 -------- .../ReinforcementLearner_multiproc.py | 9 +-- 7 files changed, 125 insertions(+), 88 deletions(-) create mode 100644 freqtrade/freqai/RL/TensorboardCallback.py diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index df4e79bea..7818ac51e 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -20,6 +20,9 @@ class Base4ActionRLEnv(BaseEnvironment): """ Base class for a 4 action environment """ + def __init__(self, *args): + super().__init__(*args) + self.actions = Actions def set_action_space(self): self.action_space = spaces.Discrete(len(Actions)) @@ -92,9 +95,12 @@ class Base4ActionRLEnv(BaseEnvironment): info = dict( tick=self._current_tick, + action=action, total_reward=self.total_reward, total_profit=self._total_profit, - position=self._position.value + position=self._position.value, + trade_duration=self.get_trade_duration(), + current_profit_pct=self.get_unrealized_profit() ) observation = self._get_observation() diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 68b2e011b..1c09f9386 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -21,6 +21,9 @@ class Base5ActionRLEnv(BaseEnvironment): """ Base class for a 5 action environment """ + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.actions = Actions def set_action_space(self): self.action_space = spaces.Discrete(len(Actions)) @@ -98,9 +101,12 @@ class Base5ActionRLEnv(BaseEnvironment): info = dict( tick=self._current_tick, + action=action, total_reward=self.total_reward, total_profit=self._total_profit, - position=self._position.value + position=self._position.value, + trade_duration=self.get_trade_duration(), + current_profit_pct=self.get_unrealized_profit() ) observation = self._get_observation() diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index e7bd26a92..3fca6a25d 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -2,7 +2,7 @@ import logging import random from abc import abstractmethod from enum import Enum -from typing import Optional +from typing import Optional, Type import gym import numpy as np @@ -17,6 +17,17 @@ from freqtrade.data.dataprovider import DataProvider logger = logging.getLogger(__name__) +class BaseActions(Enum): + """ + Default action space, mostly used for type handling. + """ + Neutral = 0 + Long_enter = 1 + Long_exit = 2 + Short_enter = 3 + Short_exit = 4 + + class Positions(Enum): Short = 0 Long = 1 @@ -64,6 +75,9 @@ class BaseEnvironment(gym.Env): else: self.fee = 0.0015 + # set here to default 5Ac, but all children envs can overwrite this + self.actions: Type[Enum] = BaseActions + def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, reward_kwargs: dict, starting_point=True): """ @@ -106,6 +120,7 @@ class BaseEnvironment(gym.Env): self._total_unrealized_profit: float = 1 self.history: dict = {} self.trade_history: list = [] + self.custom_info: dict = {} @abstractmethod def set_action_space(self): @@ -118,6 +133,19 @@ class BaseEnvironment(gym.Env): return [seed] def reset(self): + """ + Reset is called at the beginning of every episode + """ + # custom_info is used for episodic reports and tensorboard logging + self.custom_info["Invalid"] = 0 + self.custom_info["Hold"] = 0 + self.custom_info["Unknown"] = 0 + self.custom_info["pnl_factor"] = 0 + self.custom_info["duration_factor"] = 0 + self.custom_info["reward_exit"] = 0 + self.custom_info["reward_hold"] = 0 + for action in self.actions: + self.custom_info[f"{action.name}"] = 0 self._done = False @@ -271,6 +299,13 @@ class BaseEnvironment(gym.Env): def current_price(self) -> float: return self.prices.iloc[self._current_tick].open + def get_actions(self) -> Type[Enum]: + """ + Used by SubprocVecEnv to get actions from + initialized env for tensorboard callback + """ + return self.actions + # Keeping around incase we want to start building more complex environment # templates in the future. # def most_recent_return(self): diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index b9b6cdd96..5e9b81108 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -12,8 +12,7 @@ import pandas as pd import torch as th import torch.multiprocessing from pandas import DataFrame -from stable_baselines3.common.callbacks import BaseCallback, EvalCallback -from stable_baselines3.common.logger import HParam +from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv @@ -22,7 +21,8 @@ from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv -from freqtrade.freqai.RL.BaseEnvironment import Positions +from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions +from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback from freqtrade.persistence import Trade @@ -45,8 +45,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): 'cpu_count', 1), max(int(self.max_system_threads / 2), 1)) th.set_num_threads(self.max_threads) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] - self.train_env: Union[SubprocVecEnv, gym.Env] = None - self.eval_env: Union[SubprocVecEnv, gym.Env] = None + self.train_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env() + self.eval_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env() self.eval_callback: Optional[EvalCallback] = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] @@ -66,6 +66,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.unset_outlier_removal() self.net_arch = self.rl_config.get('net_arch', [128, 128]) self.dd.model_type = import_str + self.tensorboard_callback: TensorboardCallback = \ + TensorboardCallback(verbose=1, actions=BaseActions) def unset_outlier_removal(self): """ @@ -157,7 +159,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) - self.tensorboard_callback = TensorboardCallback() + actions = self.train_env.get_actions() + self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions) @abstractmethod def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): @@ -401,51 +404,3 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, return env set_random_seed(seed) return _init - - -class TensorboardCallback(BaseCallback): - """ - Custom callback for plotting additional values in tensorboard. - """ - def __init__(self, verbose=1): - super(TensorboardCallback, self).__init__(verbose) - - def _on_training_start(self) -> None: - hparam_dict = { - "algorithm": self.model.__class__.__name__, - "learning_rate": self.model.learning_rate, - "gamma": self.model.gamma, - "gae_lambda": self.model.gae_lambda, - "batch_size": self.model.batch_size, - "n_steps": self.model.n_steps, - } - metric_dict = { - "eval/mean_reward": 0, - "rollout/ep_rew_mean": 0, - "rollout/ep_len_mean": 0, - "train/value_loss": 0, - "train/explained_variance": 0, - } - self.logger.record( - "hparams", - HParam(hparam_dict, metric_dict), - exclude=("stdout", "log", "json", "csv"), - ) - - def _on_step(self) -> bool: - custom_info = self.training_env.get_attr("custom_info")[0] - self.logger.record("_state/position", self.locals["infos"][0]["position"]) - self.logger.record("_state/trade_duration", self.locals["infos"][0]["trade_duration"]) - self.logger.record("_state/current_profit_pct", self.locals["infos"] - [0]["current_profit_pct"]) - self.logger.record("_reward/total_profit", self.locals["infos"][0]["total_profit"]) - self.logger.record("_reward/total_reward", self.locals["infos"][0]["total_reward"]) - self.logger.record_mean("_reward/mean_trade_duration", self.locals["infos"] - [0]["trade_duration"]) - self.logger.record("_actions/action", self.locals["infos"][0]["action"]) - self.logger.record("_actions/_Invalid", custom_info["Invalid"]) - self.logger.record("_actions/_Unknown", custom_info["Unknown"]) - self.logger.record("_actions/Hold", custom_info["Hold"]) - for action in Actions: - self.logger.record(f"_actions/{action.name}", custom_info[action.name]) - return True diff --git a/freqtrade/freqai/RL/TensorboardCallback.py b/freqtrade/freqai/RL/TensorboardCallback.py new file mode 100644 index 000000000..4aea9bdf5 --- /dev/null +++ b/freqtrade/freqai/RL/TensorboardCallback.py @@ -0,0 +1,61 @@ +from enum import Enum +from typing import Any, Dict, Type, Union + +from stable_baselines3.common.callbacks import BaseCallback +from stable_baselines3.common.logger import HParam + +from freqtrade.freqai.RL.BaseEnvironment import BaseActions + + +class TensorboardCallback(BaseCallback): + """ + Custom callback for plotting additional values in tensorboard and + episodic summary reports. + """ + def __init__(self, verbose=1, actions: Type[Enum] = BaseActions): + super(TensorboardCallback, self).__init__(verbose) + self.model: Any = None + # An alias for self.model.get_env(), the environment used for training + self.logger = None # type: Any + # self.training_env = None # type: Union[gym.Env, VecEnv] + self.actions: Type[Enum] = actions + + def _on_training_start(self) -> None: + hparam_dict = { + "algorithm": self.model.__class__.__name__, + "learning_rate": self.model.learning_rate, + # "gamma": self.model.gamma, + # "gae_lambda": self.model.gae_lambda, + # "batch_size": self.model.batch_size, + # "n_steps": self.model.n_steps, + } + metric_dict: Dict[str, Union[float, int]] = { + "eval/mean_reward": 0, + "rollout/ep_rew_mean": 0, + "rollout/ep_len_mean": 0, + "train/value_loss": 0, + "train/explained_variance": 0, + } + self.logger.record( + "hparams", + HParam(hparam_dict, metric_dict), + exclude=("stdout", "log", "json", "csv"), + ) + + def _on_step(self) -> bool: + custom_info = self.training_env.get_attr("custom_info")[0] # type: ignore + self.logger.record("_state/position", self.locals["infos"][0]["position"]) + self.logger.record("_state/trade_duration", self.locals["infos"][0]["trade_duration"]) + self.logger.record("_state/current_profit_pct", self.locals["infos"] + [0]["current_profit_pct"]) + self.logger.record("_reward/total_profit", self.locals["infos"][0]["total_profit"]) + self.logger.record("_reward/total_reward", self.locals["infos"][0]["total_reward"]) + self.logger.record_mean("_reward/mean_trade_duration", self.locals["infos"] + [0]["trade_duration"]) + self.logger.record("_actions/action", self.locals["infos"][0]["action"]) + self.logger.record("_actions/_Invalid", custom_info["Invalid"]) + self.logger.record("_actions/_Unknown", custom_info["Unknown"]) + self.logger.record("_actions/Hold", custom_info["Hold"]) + for action in self.actions: + self.logger.record(f"_actions/{action.name}", custom_info[action.name]) + return True diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index fa1087497..47dbaf99e 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -88,33 +88,6 @@ class ReinforcementLearner(BaseReinforcementLearningModel): User can override any function in BaseRLEnv and gym.Env. Here the user sets a custom reward based on profit and trade duration. """ - def reset(self): - - # Reset custom info - self.custom_info = {} - self.custom_info["Invalid"] = 0 - self.custom_info["Hold"] = 0 - self.custom_info["Unknown"] = 0 - self.custom_info["pnl_factor"] = 0 - self.custom_info["duration_factor"] = 0 - self.custom_info["reward_exit"] = 0 - self.custom_info["reward_hold"] = 0 - for action in Actions: - self.custom_info[f"{action.name}"] = 0 - return super().reset() - - def step(self, action: int): - observation, step_reward, done, info = super().step(action) - info = dict( - tick=self._current_tick, - action=action, - total_reward=self.total_reward, - total_profit=self._total_profit, - position=self._position.value, - trade_duration=self.get_trade_duration(), - current_profit_pct=self.get_unrealized_profit() - ) - return observation, step_reward, done, info def calculate_reward(self, action: int) -> float: """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index dd5430aa7..32a2a2076 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -1,14 +1,14 @@ import logging -from typing import Any, Dict # , Tuple +from typing import Any, Dict -# import numpy.typing as npt from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner -from freqtrade.freqai.RL.BaseReinforcementLearningModel import TensorboardCallback, make_env +from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env +from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback logger = logging.getLogger(__name__) @@ -50,4 +50,5 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) - self.tensorboard_callback = TensorboardCallback() + actions = self.train_env.env_method("get_actions")[0] + self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions) From d8565261e1880f0458356fa2dc477ea487a56c0e Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 4 Dec 2022 14:10:33 +0100 Subject: [PATCH 259/421] ignore initializer type --- freqtrade/freqai/RL/BaseEnvironment.py | 1 + freqtrade/freqai/RL/TensorboardCallback.py | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 3fca6a25d..e43951142 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -77,6 +77,7 @@ class BaseEnvironment(gym.Env): # set here to default 5Ac, but all children envs can overwrite this self.actions: Type[Enum] = BaseActions + self.custom_info: dict = {} def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, reward_kwargs: dict, starting_point=True): diff --git a/freqtrade/freqai/RL/TensorboardCallback.py b/freqtrade/freqai/RL/TensorboardCallback.py index 4aea9bdf5..b5b8ba23d 100644 --- a/freqtrade/freqai/RL/TensorboardCallback.py +++ b/freqtrade/freqai/RL/TensorboardCallback.py @@ -4,7 +4,7 @@ from typing import Any, Dict, Type, Union from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.logger import HParam -from freqtrade.freqai.RL.BaseEnvironment import BaseActions +from freqtrade.freqai.RL.BaseEnvironment import BaseActions, BaseEnvironment class TensorboardCallback(BaseCallback): @@ -15,9 +15,8 @@ class TensorboardCallback(BaseCallback): def __init__(self, verbose=1, actions: Type[Enum] = BaseActions): super(TensorboardCallback, self).__init__(verbose) self.model: Any = None - # An alias for self.model.get_env(), the environment used for training self.logger = None # type: Any - # self.training_env = None # type: Union[gym.Env, VecEnv] + self.training_env: BaseEnvironment = None # type: ignore self.actions: Type[Enum] = actions def _on_training_start(self) -> None: @@ -43,7 +42,7 @@ class TensorboardCallback(BaseCallback): ) def _on_step(self) -> bool: - custom_info = self.training_env.get_attr("custom_info")[0] # type: ignore + custom_info = self.training_env.custom_info self.logger.record("_state/position", self.locals["infos"][0]["position"]) self.logger.record("_state/trade_duration", self.locals["infos"][0]["trade_duration"]) self.logger.record("_state/current_profit_pct", self.locals["infos"] From 133a081a394828f1b45e0b3c90223adca96388de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 03:00:39 +0000 Subject: [PATCH 260/421] Bump pytest-random-order from 1.0.4 to 1.1.0 Bumps [pytest-random-order](https://github.com/jbasko/pytest-random-order) from 1.0.4 to 1.1.0. - [Release notes](https://github.com/jbasko/pytest-random-order/releases) - [Commits](https://github.com/jbasko/pytest-random-order/compare/v1.0.4...v1.1.0) --- updated-dependencies: - dependency-name: pytest-random-order dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index ffce3d696..463d2656a 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -15,7 +15,7 @@ pytest==7.2.0 pytest-asyncio==0.20.2 pytest-cov==4.0.0 pytest-mock==3.10.0 -pytest-random-order==1.0.4 +pytest-random-order==1.1.0 isort==5.10.1 # For datetime mocking time-machine==2.8.2 From 16bad8dca6235a2d7272b753b4f3fa35698f61de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 03:00:56 +0000 Subject: [PATCH 261/421] Bump pymdown-extensions from 9.8 to 9.9 Bumps [pymdown-extensions](https://github.com/facelessuser/pymdown-extensions) from 9.8 to 9.9. - [Release notes](https://github.com/facelessuser/pymdown-extensions/releases) - [Commits](https://github.com/facelessuser/pymdown-extensions/compare/9.8...9.9) --- updated-dependencies: - dependency-name: pymdown-extensions dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- docs/requirements-docs.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt index 224e9b548..7a2c806ca 100644 --- a/docs/requirements-docs.txt +++ b/docs/requirements-docs.txt @@ -2,5 +2,5 @@ markdown==3.3.7 mkdocs==1.4.2 mkdocs-material==8.5.10 mdx_truly_sane_lists==1.3 -pymdown-extensions==9.8 +pymdown-extensions==9.9 jinja2==3.1.2 From 441069f36390ad90227648fbb7a69b760b97e04e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 03:01:03 +0000 Subject: [PATCH 262/421] Bump pandas from 1.5.1 to 1.5.2 Bumps [pandas](https://github.com/pandas-dev/pandas) from 1.5.1 to 1.5.2. - [Release notes](https://github.com/pandas-dev/pandas/releases) - [Changelog](https://github.com/pandas-dev/pandas/blob/main/RELEASE.md) - [Commits](https://github.com/pandas-dev/pandas/compare/v1.5.1...v1.5.2) --- updated-dependencies: - dependency-name: pandas dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index dab8ae414..755c96da2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ numpy==1.23.5 -pandas==1.5.1 +pandas==1.5.2 pandas-ta==0.3.14b ccxt==2.2.36 From caae4441e5a5b604cd5d8f5994dfd80ef3a71a7a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 03:01:12 +0000 Subject: [PATCH 263/421] Bump jsonschema from 4.17.1 to 4.17.3 Bumps [jsonschema](https://github.com/python-jsonschema/jsonschema) from 4.17.1 to 4.17.3. - [Release notes](https://github.com/python-jsonschema/jsonschema/releases) - [Changelog](https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst) - [Commits](https://github.com/python-jsonschema/jsonschema/compare/v4.17.1...v4.17.3) --- updated-dependencies: - dependency-name: jsonschema dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index dab8ae414..62887f995 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ arrow==1.2.3 cachetools==4.2.2 requests==2.28.1 urllib3==1.26.13 -jsonschema==4.17.1 +jsonschema==4.17.3 TA-Lib==0.4.25 technical==1.3.0 tabulate==0.9.0 From 66bb2c52532167b9f64aadc7ca9cb739b1980119 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 03:01:16 +0000 Subject: [PATCH 264/421] Bump fastapi from 0.87.0 to 0.88.0 Bumps [fastapi](https://github.com/tiangolo/fastapi) from 0.87.0 to 0.88.0. - [Release notes](https://github.com/tiangolo/fastapi/releases) - [Commits](https://github.com/tiangolo/fastapi/compare/0.87.0...0.88.0) --- updated-dependencies: - dependency-name: fastapi dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index dab8ae414..d9c479444 100644 --- a/requirements.txt +++ b/requirements.txt @@ -36,7 +36,7 @@ orjson==3.8.2 sdnotify==0.3.2 # API Server -fastapi==0.87.0 +fastapi==0.88.0 pydantic==1.10.2 uvicorn==0.20.0 pyjwt==2.6.0 From 2eb8f9f0282ca0f9f30731265f667c31521760bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 03:03:47 +0000 Subject: [PATCH 265/421] Bump pypa/gh-action-pypi-publish from 1.5.1 to 1.6.1 Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.5.1 to 1.6.1. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/v1.5.1...v1.6.1) --- updated-dependencies: - dependency-name: pypa/gh-action-pypi-publish dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e730d1489..273fb7ea0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -410,7 +410,7 @@ jobs: python setup.py sdist bdist_wheel - name: Publish to PyPI (Test) - uses: pypa/gh-action-pypi-publish@v1.5.1 + uses: pypa/gh-action-pypi-publish@v1.6.1 if: (github.event_name == 'release') with: user: __token__ @@ -418,7 +418,7 @@ jobs: repository_url: https://test.pypi.org/legacy/ - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.5.1 + uses: pypa/gh-action-pypi-publish@v1.6.1 if: (github.event_name == 'release') with: user: __token__ From 82d4dca1832ac306ea95dfdda52cd21d9cf8a68a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 05:21:15 +0000 Subject: [PATCH 266/421] Bump mkdocs-material from 8.5.10 to 8.5.11 Bumps [mkdocs-material](https://github.com/squidfunk/mkdocs-material) from 8.5.10 to 8.5.11. - [Release notes](https://github.com/squidfunk/mkdocs-material/releases) - [Changelog](https://github.com/squidfunk/mkdocs-material/blob/master/CHANGELOG) - [Commits](https://github.com/squidfunk/mkdocs-material/compare/8.5.10...8.5.11) --- updated-dependencies: - dependency-name: mkdocs-material dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- docs/requirements-docs.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt index 7a2c806ca..fd4f66d71 100644 --- a/docs/requirements-docs.txt +++ b/docs/requirements-docs.txt @@ -1,6 +1,6 @@ markdown==3.3.7 mkdocs==1.4.2 -mkdocs-material==8.5.10 +mkdocs-material==8.5.11 mdx_truly_sane_lists==1.3 pymdown-extensions==9.9 jinja2==3.1.2 From 179adea0e221eb2c85607a6a2e7b2a34e9116640 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 05:22:42 +0000 Subject: [PATCH 267/421] Bump ccxt from 2.2.36 to 2.2.67 Bumps [ccxt](https://github.com/ccxt/ccxt) from 2.2.36 to 2.2.67. - [Release notes](https://github.com/ccxt/ccxt/releases) - [Changelog](https://github.com/ccxt/ccxt/blob/master/exchanges.cfg) - [Commits](https://github.com/ccxt/ccxt/compare/2.2.36...2.2.67) --- updated-dependencies: - dependency-name: ccxt dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1e9c5030e..99cdca11e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ numpy==1.23.5 pandas==1.5.2 pandas-ta==0.3.14b -ccxt==2.2.36 +ccxt==2.2.67 # Pin cryptography for now due to rust build errors with piwheels cryptography==38.0.1; platform_machine == 'armv7l' cryptography==38.0.4; platform_machine != 'armv7l' From 102ab91fa44ee1873f844b9fb9fe36e914910aef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 05:23:02 +0000 Subject: [PATCH 268/421] Bump orjson from 3.8.2 to 3.8.3 Bumps [orjson](https://github.com/ijl/orjson) from 3.8.2 to 3.8.3. - [Release notes](https://github.com/ijl/orjson/releases) - [Changelog](https://github.com/ijl/orjson/blob/master/CHANGELOG.md) - [Commits](https://github.com/ijl/orjson/compare/3.8.2...3.8.3) --- updated-dependencies: - dependency-name: orjson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1e9c5030e..51d3f9dde 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ py_find_1st==1.1.5 # Load ticker files 30% faster python-rapidjson==1.9 # Properly format api responses -orjson==3.8.2 +orjson==3.8.3 # Notify systemd sdnotify==0.3.2 From e734b399296cd88e77d6962281f13f49a9a9b016 Mon Sep 17 00:00:00 2001 From: Emre Date: Mon, 5 Dec 2022 14:54:42 +0300 Subject: [PATCH 269/421] Make model_training_parameters optional --- config_examples/config_freqai.example.json | 4 +--- docs/freqai-configuration.md | 11 ++++------- freqtrade/constants.py | 5 ++--- .../freqai/prediction_models/ReinforcementLearner.py | 2 +- 4 files changed, 8 insertions(+), 14 deletions(-) diff --git a/config_examples/config_freqai.example.json b/config_examples/config_freqai.example.json index 5e564a1fc..dfd54b3d9 100644 --- a/config_examples/config_freqai.example.json +++ b/config_examples/config_freqai.example.json @@ -79,9 +79,7 @@ "test_size": 0.33, "random_state": 1 }, - "model_training_parameters": { - "n_estimators": 1000 - } + "model_training_parameters": {} }, "bot_name": "", "force_entry_enable": true, diff --git a/docs/freqai-configuration.md b/docs/freqai-configuration.md index 5c3bbf90c..10f5838c9 100644 --- a/docs/freqai-configuration.md +++ b/docs/freqai-configuration.md @@ -26,10 +26,7 @@ FreqAI is configured through the typical [Freqtrade config file](configuration.m }, "data_split_parameters" : { "test_size": 0.25 - }, - "model_training_parameters" : { - "n_estimators": 100 - }, + } } ``` @@ -118,7 +115,7 @@ The FreqAI strategy requires including the following lines of code in the standa ``` -Notice how the `populate_any_indicators()` is where [features](freqai-feature-engineering.md#feature-engineering) and labels/targets are added. A full example strategy is available in `templates/FreqaiExampleStrategy.py`. +Notice how the `populate_any_indicators()` is where [features](freqai-feature-engineering.md#feature-engineering) and labels/targets are added. A full example strategy is available in `templates/FreqaiExampleStrategy.py`. Notice also the location of the labels under `if set_generalized_indicators:` at the bottom of the example. This is where single features and labels/targets should be added to the feature set to avoid duplication of them from various configuration parameters that multiply the feature set, such as `include_timeframes`. @@ -182,7 +179,7 @@ The `startup_candle_count` in the FreqAI strategy needs to be set up in the same ## Creating a dynamic target threshold -Deciding when to enter or exit a trade can be done in a dynamic way to reflect current market conditions. FreqAI allows you to return additional information from the training of a model (more info [here](freqai-feature-engineering.md#returning-additional-info-from-training)). For example, the `&*_std/mean` return values describe the statistical distribution of the target/label *during the most recent training*. Comparing a given prediction to these values allows you to know the rarity of the prediction. In `templates/FreqaiExampleStrategy.py`, the `target_roi` and `sell_roi` are defined to be 1.25 z-scores away from the mean which causes predictions that are closer to the mean to be filtered out. +Deciding when to enter or exit a trade can be done in a dynamic way to reflect current market conditions. FreqAI allows you to return additional information from the training of a model (more info [here](freqai-feature-engineering.md#returning-additional-info-from-training)). For example, the `&*_std/mean` return values describe the statistical distribution of the target/label *during the most recent training*. Comparing a given prediction to these values allows you to know the rarity of the prediction. In `templates/FreqaiExampleStrategy.py`, the `target_roi` and `sell_roi` are defined to be 1.25 z-scores away from the mean which causes predictions that are closer to the mean to be filtered out. ```python dataframe["target_roi"] = dataframe["&-s_close_mean"] + dataframe["&-s_close_std"] * 1.25 @@ -230,7 +227,7 @@ If you want to predict multiple targets, you need to define multiple labels usin #### Classifiers -If you are using a classifier, you need to specify a target that has discrete values. FreqAI includes a variety of classifiers, such as the `CatboostClassifier` via the flag `--freqaimodel CatboostClassifier`. If you elects to use a classifier, the classes need to be set using strings. For example, if you want to predict if the price 100 candles into the future goes up or down you would set +If you are using a classifier, you need to specify a target that has discrete values. FreqAI includes a variety of classifiers, such as the `CatboostClassifier` via the flag `--freqaimodel CatboostClassifier`. If you elects to use a classifier, the classes need to be set using strings. For example, if you want to predict if the price 100 candles into the future goes up or down you would set ```python df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down') diff --git a/freqtrade/constants.py b/freqtrade/constants.py index d869b89f6..ca1be1d6a 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -608,9 +608,8 @@ CONF_SCHEMA = { "backtest_period_days", "identifier", "feature_parameters", - "data_split_parameters", - "model_training_parameters" - ] + "data_split_parameters" + ] }, }, } diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 61b01e21b..39901859c 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -61,7 +61,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, tensorboard_log=Path( dk.full_path / "tensorboard" / dk.pair.split('/')[0]), - **self.freqai_info['model_training_parameters'] + **self.freqai_info.get('model_training_parameters', {}) ) else: logger.info('Continual training activated - starting training from previously ' From 730fba956b55b67555bf5766532faf7ddc8ba856 Mon Sep 17 00:00:00 2001 From: Emre Date: Mon, 5 Dec 2022 16:16:17 +0300 Subject: [PATCH 270/421] Ensure base tf included in include_timeframes --- freqtrade/freqai/utils.py | 20 ++++++++++++++++++++ freqtrade/strategy/interface.py | 4 +++- tests/freqai/test_freqai_interface.py | 17 ++++++++++++++++- 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index 806e3ca15..7a9d3df76 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -233,3 +233,23 @@ def get_timerange_backtest_live_models(config: Config) -> str: dd = FreqaiDataDrawer(models_path, config) timerange = dd.get_timerange_from_live_historic_predictions() return timerange.timerange_str + + +def ensure_base_tf_in_include_timeframes(config: Config) -> Config: + """ + Ensure that the base timeframe is included in the include_timeframes list + :param config: Configuration dictionary + + :return config: Configuration dictionary + """ + feature_parameters = config.get('freqai', {}).get('feature_parameters', {}) + include_timeframes = feature_parameters.get('include_timeframes', []) + + if config['timeframe'] in include_timeframes: + return config + + include_timeframes = [config['timeframe']] + include_timeframes + config.get('freqai', {}).get('feature_parameters', {}) \ + .update({**feature_parameters, 'include_timeframes': include_timeframes}) + + return config diff --git a/freqtrade/strategy/interface.py b/freqtrade/strategy/interface.py index 681c5fcbb..48a03e216 100644 --- a/freqtrade/strategy/interface.py +++ b/freqtrade/strategy/interface.py @@ -148,9 +148,11 @@ class IStrategy(ABC, HyperStrategyMixin): def load_freqAI_model(self) -> None: if self.config.get('freqai', {}).get('enabled', False): # Import here to avoid importing this if freqAI is disabled - from freqtrade.freqai.utils import download_all_data_for_training + from freqtrade.freqai.utils import (download_all_data_for_training, + ensure_base_tf_in_include_timeframes) from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver self.freqai = FreqaiModelResolver.load_freqaimodel(self.config) + self.config = ensure_base_tf_in_include_timeframes(self.config) self.freqai_info = self.config["freqai"] # download the desired data in dry/live diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index c53137093..6f01c66f6 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -9,7 +9,9 @@ from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider from freqtrade.enums import RunMode from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.utils import download_all_data_for_training, get_required_data_timerange +from freqtrade.freqai.utils import (download_all_data_for_training, + ensure_base_tf_in_include_timeframes, + get_required_data_timerange) from freqtrade.optimize.backtesting import Backtesting from freqtrade.persistence import Trade from freqtrade.plugins.pairlistmanager import PairListManager @@ -528,6 +530,19 @@ def test_start_set_train_queue(mocker, freqai_conf, caplog): ) +def test_base_tf_in_include_timeframes(mocker, freqai_conf): + freqai_conf['timeframe'] = '5m' + freqai_conf['freqai']['feature_parameters'].update({ + 'include_timeframes': ['15m', '1h'] + }) + updated_conf = ensure_base_tf_in_include_timeframes(freqai_conf) + assert updated_conf['freqai']['feature_parameters']['include_timeframes'] == [ + '5m', '15m', '1h', + ] + last_conf = ensure_base_tf_in_include_timeframes(updated_conf) + assert last_conf == updated_conf + + def test_get_required_data_timerange(mocker, freqai_conf): time_range = get_required_data_timerange(freqai_conf) assert (time_range.stopts - time_range.startts) == 177300 From 189fa64052b0261a272765ed799941e8e001ef4a Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 5 Dec 2022 18:14:16 +0100 Subject: [PATCH 271/421] Add more dynamic to directory change --- docs/strategy_analysis_example.md | 33 +++++++++++++-- .../templates/strategy_analysis_example.ipynb | 40 ++++++++++++++----- 2 files changed, 59 insertions(+), 14 deletions(-) diff --git a/docs/strategy_analysis_example.md b/docs/strategy_analysis_example.md index bae4a9108..e3d2870e2 100644 --- a/docs/strategy_analysis_example.md +++ b/docs/strategy_analysis_example.md @@ -2,12 +2,37 @@ Debugging a strategy can be time-consuming. Freqtrade offers helper functions to visualize raw data. The following assumes you work with SampleStrategy, data for 5m timeframe from Binance and have downloaded them into the data directory in the default location. +Please follow the [documentation](https://www.freqtrade.io/en/stable/data-download/) for more details. ## Setup +### Change Working directory to repository root + ```python +import os from pathlib import Path + +# Change directory +# Modify this cell to insure that the output shows the correct path. +# Define all paths relative to the project root shown in the cell output +project_root = "somedir/freqtrade" +i=0 +try: + os.chdirdir(project_root) + assert Path('LICENSE').is_file() +except: + while i<4 and (not Path('LICENSE').is_file()): + os.chdir(Path(Path.cwd(), '../')) + i+=1 + project_root = Path.cwd() +print(Path.cwd()) +``` + +### Configure Freqtrade environment + + +```python from freqtrade.configuration import Configuration # Customize these according to your needs. @@ -15,14 +40,14 @@ from freqtrade.configuration import Configuration # Initialize empty configuration object config = Configuration.from_files([]) # Optionally (recommended), use existing configuration file -# config = Configuration.from_files(["config.json"]) +# config = Configuration.from_files(["user_data/config.json"]) # Define some constants config["timeframe"] = "5m" # Name of the strategy class config["strategy"] = "SampleStrategy" # Location of the data -data_location = config['datadir'] +data_location = config["datadir"] # Pair to analyze - Only use one pair here pair = "BTC/USDT" ``` @@ -36,12 +61,12 @@ from freqtrade.enums import CandleType candles = load_pair_history(datadir=data_location, timeframe=config["timeframe"], pair=pair, - data_format = "hdf5", + data_format = "json", # Make sure to update this to your data candle_type=CandleType.SPOT, ) # Confirm success -print("Loaded " + str(len(candles)) + f" rows of data for {pair} from {data_location}") +print(f"Loaded {len(candles)} rows of data for {pair} from {data_location}") candles.head() ``` diff --git a/freqtrade/templates/strategy_analysis_example.ipynb b/freqtrade/templates/strategy_analysis_example.ipynb index f7d68b41c..dfbcedb72 100644 --- a/freqtrade/templates/strategy_analysis_example.ipynb +++ b/freqtrade/templates/strategy_analysis_example.ipynb @@ -7,14 +7,17 @@ "# Strategy analysis example\n", "\n", "Debugging a strategy can be time-consuming. Freqtrade offers helper functions to visualize raw data.\n", - "The following assumes you work with SampleStrategy, data for 5m timeframe from Binance and have downloaded them into the data directory in the default location, using command like `freqtrade download-data --exchange binance --trading-mod spot --pairs BTC/USDT --days 7 -t 5m`." + "The following assumes you work with SampleStrategy, data for 5m timeframe from Binance and have downloaded them into the data directory in the default location.\n", + "Please follow the [documentation](https://www.freqtrade.io/en/stable/data-download/) for more details." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Setup" + "## Setup\n", + "\n", + "### Change Working directory to repository root" ] }, { @@ -26,12 +29,29 @@ "import os\n", "from pathlib import Path\n", "\n", - "# Change current working directory from `somedir/freqtrade/user_data/notebooks` to project root `somedir/freqtrade`, so relative paths remain consistent.\n", - "if not Path(\"LICENSE\").is_file():\n", - " os.chdir(\"../../\")\n", + "# Change directory\n", + "# Modify this cell to insure that the output shows the correct path.\n", + "# Define all paths relative to the project root shown in the cell output\n", + "project_root = \"somedir/freqtrade\"\n", + "i=0\n", + "try:\n", + " os.chdirdir(project_root)\n", + " assert Path('LICENSE').is_file()\n", + "except:\n", + " while i<4 and (not Path('LICENSE').is_file()):\n", + " os.chdir(Path(Path.cwd(), '../'))\n", + " i+=1\n", + " project_root = Path.cwd()\n", "print(Path.cwd())" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Configure Freqtrade environment" + ] + }, { "cell_type": "code", "execution_count": null, @@ -70,12 +90,12 @@ "candles = load_pair_history(datadir=data_location,\n", " timeframe=config[\"timeframe\"],\n", " pair=pair,\n", - " data_format = \"json\",\n", + " data_format = \"json\", # Make sure to update this to your data\n", " candle_type=CandleType.SPOT,\n", " )\n", "\n", "# Confirm success\n", - "print(\"Loaded \" + str(len(candles)) + f\" rows of data for {pair} from {data_location}\")\n", + "print(f\"Loaded {len(candles)} rows of data for {pair} from {data_location}\")\n", "candles.head()" ] }, @@ -379,7 +399,7 @@ "metadata": { "file_extension": ".py", "kernelspec": { - "display_name": "Python 3.11.0 64-bit", + "display_name": "Python 3.9.7 64-bit", "language": "python", "name": "python3" }, @@ -393,7 +413,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.0" + "version": "3.9.7" }, "mimetype": "text/x-python", "name": "python", @@ -444,7 +464,7 @@ "version": 3, "vscode": { "interpreter": { - "hash": "945ba00099661281427cc644a7000ee9eeea5ce6ad3bf937939d3d384b8f3881" + "hash": "675f32a300d6d26767470181ad0b11dd4676bcce7ed1dd2ffe2fbc370c95fc7c" } } }, From 5e533b550f777e6e898091cc86f354ab7ef91a48 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 26 Oct 2022 20:22:58 +0200 Subject: [PATCH 272/421] Emit a simple "new candle" message to allow UI's to refresh charts --- freqtrade/data/dataprovider.py | 18 ++++++++++++------ freqtrade/enums/rpcmessagetype.py | 1 + freqtrade/rpc/webhook.py | 1 + freqtrade/strategy/interface.py | 6 +++--- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index 4d7296ee7..7657549a3 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -101,16 +101,13 @@ class DataProvider: """ return self.__producer_pairs.get(producer_name, []).copy() - def _emit_df( - self, - pair_key: PairWithTimeframe, - dataframe: DataFrame - ) -> None: + def _emit_df(self, pair_key: PairWithTimeframe, dataframe: DataFrame, new_candle: bool) -> None: """ Send this dataframe as an ANALYZED_DF message to RPC :param pair_key: PairWithTimeframe tuple - :param data: Tuple containing the DataFrame and the datetime it was cached + :param dataframe: Dataframe to emit + :param new_candle: This is a new candle """ if self.__rpc: self.__rpc.send_msg( @@ -123,6 +120,15 @@ class DataProvider: } } ) + if new_candle: + self.__rpc.send_msg( + { + 'type': RPCMessageType.NEW_CANDLE, + 'data': { + 'key': pair_key, + } + } + ) def _add_external_df( self, diff --git a/freqtrade/enums/rpcmessagetype.py b/freqtrade/enums/rpcmessagetype.py index fae121a09..8b3596465 100644 --- a/freqtrade/enums/rpcmessagetype.py +++ b/freqtrade/enums/rpcmessagetype.py @@ -21,6 +21,7 @@ class RPCMessageType(str, Enum): WHITELIST = 'whitelist' ANALYZED_DF = 'analyzed_df' + NEW_CANDLE = 'new_candle' def __repr__(self): return self.value diff --git a/freqtrade/rpc/webhook.py b/freqtrade/rpc/webhook.py index 19c4166b3..d81d8d24f 100644 --- a/freqtrade/rpc/webhook.py +++ b/freqtrade/rpc/webhook.py @@ -68,6 +68,7 @@ class Webhook(RPCHandler): RPCMessageType.PROTECTION_TRIGGER_GLOBAL, RPCMessageType.WHITELIST, RPCMessageType.ANALYZED_DF, + RPCMessageType.NEW_CANDLE, RPCMessageType.STRATEGY_MSG): # Don't fail for non-implemented types return None diff --git a/freqtrade/strategy/interface.py b/freqtrade/strategy/interface.py index 681c5fcbb..781ae6c5c 100644 --- a/freqtrade/strategy/interface.py +++ b/freqtrade/strategy/interface.py @@ -739,10 +739,10 @@ class IStrategy(ABC, HyperStrategyMixin): """ pair = str(metadata.get('pair')) + new_candle = self._last_candle_seen_per_pair.get(pair, None) != dataframe.iloc[-1]['date'] # Test if seen this pair and last candle before. # always run if process_only_new_candles is set to false - if (not self.process_only_new_candles or - self._last_candle_seen_per_pair.get(pair, None) != dataframe.iloc[-1]['date']): + if not self.process_only_new_candles or new_candle: # Defs that only make change on new candle data. dataframe = self.analyze_ticker(dataframe, metadata) @@ -751,7 +751,7 @@ class IStrategy(ABC, HyperStrategyMixin): candle_type = self.config.get('candle_type_def', CandleType.SPOT) self.dp._set_cached_df(pair, self.timeframe, dataframe, candle_type=candle_type) - self.dp._emit_df((pair, self.timeframe, candle_type), dataframe) + self.dp._emit_df((pair, self.timeframe, candle_type), dataframe, new_candle) else: logger.debug("Skipping TA Analysis for already analyzed candle") From 687eefa06e1318b41e55a3aee5eb4e3cb3d33df4 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 26 Oct 2022 20:31:31 +0200 Subject: [PATCH 273/421] Improve emit_df testcase --- tests/data/test_dataprovider.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/data/test_dataprovider.py b/tests/data/test_dataprovider.py index 8500fa06c..025e6d08a 100644 --- a/tests/data/test_dataprovider.py +++ b/tests/data/test_dataprovider.py @@ -207,12 +207,18 @@ def test_emit_df(mocker, default_conf, ohlcv_history): assert send_mock.call_count == 0 # Rpc is added, we call emit, should call send_msg - dataprovider._emit_df(pair, ohlcv_history) + dataprovider._emit_df(pair, ohlcv_history, False) assert send_mock.call_count == 1 + send_mock.reset_mock() + dataprovider._emit_df(pair, ohlcv_history, True) + assert send_mock.call_count == 2 + + send_mock.reset_mock() + # No rpc added, emit called, should not call send_msg - dataprovider_no_rpc._emit_df(pair, ohlcv_history) - assert send_mock.call_count == 1 + dataprovider_no_rpc._emit_df(pair, ohlcv_history, False) + assert send_mock.call_count == 0 def test_refresh(mocker, default_conf, ohlcv_history): From d30a872ed48c6a72acdfc23ff64d045c2d1f33d0 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 5 Dec 2022 19:23:03 +0100 Subject: [PATCH 274/421] Move message-silencing list next to enum --- freqtrade/enums/__init__.py | 2 +- freqtrade/enums/rpcmessagetype.py | 3 +++ freqtrade/rpc/rpc_manager.py | 4 ++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/freqtrade/enums/__init__.py b/freqtrade/enums/__init__.py index 146d65f2d..eb70a2894 100644 --- a/freqtrade/enums/__init__.py +++ b/freqtrade/enums/__init__.py @@ -6,7 +6,7 @@ from freqtrade.enums.exittype import ExitType from freqtrade.enums.hyperoptstate import HyperoptState from freqtrade.enums.marginmode import MarginMode from freqtrade.enums.ordertypevalue import OrderTypeValues -from freqtrade.enums.rpcmessagetype import RPCMessageType, RPCRequestType +from freqtrade.enums.rpcmessagetype import NO_ECHO_MESSAGES, RPCMessageType, RPCRequestType from freqtrade.enums.runmode import NON_UTIL_MODES, OPTIMIZE_MODES, TRADING_MODES, RunMode from freqtrade.enums.signaltype import SignalDirection, SignalTagType, SignalType from freqtrade.enums.state import State diff --git a/freqtrade/enums/rpcmessagetype.py b/freqtrade/enums/rpcmessagetype.py index 8b3596465..2453d16d9 100644 --- a/freqtrade/enums/rpcmessagetype.py +++ b/freqtrade/enums/rpcmessagetype.py @@ -36,3 +36,6 @@ class RPCRequestType(str, Enum): WHITELIST = 'whitelist' ANALYZED_DF = 'analyzed_df' + + +NO_ECHO_MESSAGES = (RPCMessageType.ANALYZED_DF, RPCMessageType.WHITELIST, RPCMessageType.NEW_CANDLE) diff --git a/freqtrade/rpc/rpc_manager.py b/freqtrade/rpc/rpc_manager.py index 9c25723b0..c4d4fa2dd 100644 --- a/freqtrade/rpc/rpc_manager.py +++ b/freqtrade/rpc/rpc_manager.py @@ -6,7 +6,7 @@ from collections import deque from typing import Any, Dict, List from freqtrade.constants import Config -from freqtrade.enums import RPCMessageType +from freqtrade.enums import NO_ECHO_MESSAGES, RPCMessageType from freqtrade.rpc import RPC, RPCHandler @@ -67,7 +67,7 @@ class RPCManager: 'status': 'stopping bot' } """ - if msg.get('type') not in (RPCMessageType.ANALYZED_DF, RPCMessageType.WHITELIST): + if msg.get('type') not in NO_ECHO_MESSAGES: logger.info('Sending rpc message: %s', msg) if 'pair' in msg: msg.update({ From 24edc276ea31d4e733667155aecc8e403a43f7f2 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 5 Dec 2022 19:43:36 +0100 Subject: [PATCH 275/421] Simplify new_candle message --- freqtrade/data/dataprovider.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index 7657549a3..6b220c8b4 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -101,7 +101,12 @@ class DataProvider: """ return self.__producer_pairs.get(producer_name, []).copy() - def _emit_df(self, pair_key: PairWithTimeframe, dataframe: DataFrame, new_candle: bool) -> None: + def _emit_df( + self, + pair_key: PairWithTimeframe, + dataframe: DataFrame, + new_candle: bool + ) -> None: """ Send this dataframe as an ANALYZED_DF message to RPC @@ -121,14 +126,10 @@ class DataProvider: } ) if new_candle: - self.__rpc.send_msg( - { + self.__rpc.send_msg({ 'type': RPCMessageType.NEW_CANDLE, - 'data': { - 'key': pair_key, - } - } - ) + 'data': pair_key, + }) def _add_external_df( self, From 7c27eedda54e36c06471559a1dcf92bb98248405 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 5 Dec 2022 19:56:33 +0100 Subject: [PATCH 276/421] Bump API version --- freqtrade/rpc/api_server/api_v1.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/freqtrade/rpc/api_server/api_v1.py b/freqtrade/rpc/api_server/api_v1.py index c0c9b8f57..9e4b140e4 100644 --- a/freqtrade/rpc/api_server/api_v1.py +++ b/freqtrade/rpc/api_server/api_v1.py @@ -37,7 +37,8 @@ logger = logging.getLogger(__name__) # 2.16: Additional daily metrics # 2.17: Forceentry - leverage, partial force_exit # 2.20: Add websocket endpoints -API_VERSION = 2.20 +# 2.21: Add new_candle messagetype +API_VERSION = 2.21 # Public API, requires no auth. router_public = APIRouter() From 72472587ddb4251270502e19f2808493c53cb2ca Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 5 Dec 2022 20:18:47 +0100 Subject: [PATCH 277/421] Increase test range for api version test --- tests/rpc/test_rpc_apiserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/rpc/test_rpc_apiserver.py b/tests/rpc/test_rpc_apiserver.py index 043666853..ee067f911 100644 --- a/tests/rpc/test_rpc_apiserver.py +++ b/tests/rpc/test_rpc_apiserver.py @@ -588,7 +588,7 @@ def test_api_show_config(botclient): assert 'unfilledtimeout' in response assert 'version' in response assert 'api_version' in response - assert 2.1 <= response['api_version'] <= 2.2 + assert 2.1 <= response['api_version'] < 3.0 def test_api_daily(botclient, mocker, ticker, fee, markets): From 62c69bf2b5285196ce80760160712c04b339bad1 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 5 Dec 2022 20:22:54 +0100 Subject: [PATCH 278/421] fix custom_info --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 4 ++-- freqtrade/freqai/RL/BaseEnvironment.py | 3 +-- freqtrade/freqai/RL/TensorboardCallback.py | 2 +- tests/freqai/test_freqai_interface.py | 1 - 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 7818ac51e..79616d778 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -20,8 +20,8 @@ class Base4ActionRLEnv(BaseEnvironment): """ Base class for a 4 action environment """ - def __init__(self, *args): - super().__init__(*args) + def __init__(self, **kwargs): + super().__init__(**kwargs) self.actions = Actions def set_action_space(self): diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index e43951142..a31ded0c6 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -75,7 +75,7 @@ class BaseEnvironment(gym.Env): else: self.fee = 0.0015 - # set here to default 5Ac, but all children envs can overwrite this + # set here to default 5Ac, but all children envs can override this self.actions: Type[Enum] = BaseActions self.custom_info: dict = {} @@ -121,7 +121,6 @@ class BaseEnvironment(gym.Env): self._total_unrealized_profit: float = 1 self.history: dict = {} self.trade_history: list = [] - self.custom_info: dict = {} @abstractmethod def set_action_space(self): diff --git a/freqtrade/freqai/RL/TensorboardCallback.py b/freqtrade/freqai/RL/TensorboardCallback.py index b5b8ba23d..f590bdf84 100644 --- a/freqtrade/freqai/RL/TensorboardCallback.py +++ b/freqtrade/freqai/RL/TensorboardCallback.py @@ -42,7 +42,7 @@ class TensorboardCallback(BaseCallback): ) def _on_step(self) -> bool: - custom_info = self.training_env.custom_info + custom_info = self.training_env.get_attr("custom_info")[0] self.logger.record("_state/position", self.locals["infos"][0]["position"]) self.logger.record("_state/trade_duration", self.locals["infos"][0]["trade_duration"]) self.logger.record("_state/current_profit_pct", self.locals["infos"] diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index c53137093..f19acb018 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -237,7 +237,6 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog) df = freqai.cache_corr_pairlist_dfs(df, freqai.dk) for i in range(5): df[f'%-constant_{i}'] = i - # df.loc[:, f'%-constant_{i}'] = i metadata = {"pair": "LTC/BTC"} freqai.start_backtesting(df, metadata, freqai.dk) From bc48099e48333d5c657fcbb11831ea8cd1700697 Mon Sep 17 00:00:00 2001 From: Emre Date: Mon, 5 Dec 2022 23:52:48 +0300 Subject: [PATCH 279/421] Revert changes --- freqtrade/freqai/utils.py | 20 -------------------- freqtrade/strategy/interface.py | 4 +--- tests/freqai/test_freqai_interface.py | 17 +---------------- 3 files changed, 2 insertions(+), 39 deletions(-) diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index 7a9d3df76..806e3ca15 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -233,23 +233,3 @@ def get_timerange_backtest_live_models(config: Config) -> str: dd = FreqaiDataDrawer(models_path, config) timerange = dd.get_timerange_from_live_historic_predictions() return timerange.timerange_str - - -def ensure_base_tf_in_include_timeframes(config: Config) -> Config: - """ - Ensure that the base timeframe is included in the include_timeframes list - :param config: Configuration dictionary - - :return config: Configuration dictionary - """ - feature_parameters = config.get('freqai', {}).get('feature_parameters', {}) - include_timeframes = feature_parameters.get('include_timeframes', []) - - if config['timeframe'] in include_timeframes: - return config - - include_timeframes = [config['timeframe']] + include_timeframes - config.get('freqai', {}).get('feature_parameters', {}) \ - .update({**feature_parameters, 'include_timeframes': include_timeframes}) - - return config diff --git a/freqtrade/strategy/interface.py b/freqtrade/strategy/interface.py index 48a03e216..681c5fcbb 100644 --- a/freqtrade/strategy/interface.py +++ b/freqtrade/strategy/interface.py @@ -148,11 +148,9 @@ class IStrategy(ABC, HyperStrategyMixin): def load_freqAI_model(self) -> None: if self.config.get('freqai', {}).get('enabled', False): # Import here to avoid importing this if freqAI is disabled - from freqtrade.freqai.utils import (download_all_data_for_training, - ensure_base_tf_in_include_timeframes) + from freqtrade.freqai.utils import download_all_data_for_training from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver self.freqai = FreqaiModelResolver.load_freqaimodel(self.config) - self.config = ensure_base_tf_in_include_timeframes(self.config) self.freqai_info = self.config["freqai"] # download the desired data in dry/live diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 6f01c66f6..c53137093 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -9,9 +9,7 @@ from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider from freqtrade.enums import RunMode from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.utils import (download_all_data_for_training, - ensure_base_tf_in_include_timeframes, - get_required_data_timerange) +from freqtrade.freqai.utils import download_all_data_for_training, get_required_data_timerange from freqtrade.optimize.backtesting import Backtesting from freqtrade.persistence import Trade from freqtrade.plugins.pairlistmanager import PairListManager @@ -530,19 +528,6 @@ def test_start_set_train_queue(mocker, freqai_conf, caplog): ) -def test_base_tf_in_include_timeframes(mocker, freqai_conf): - freqai_conf['timeframe'] = '5m' - freqai_conf['freqai']['feature_parameters'].update({ - 'include_timeframes': ['15m', '1h'] - }) - updated_conf = ensure_base_tf_in_include_timeframes(freqai_conf) - assert updated_conf['freqai']['feature_parameters']['include_timeframes'] == [ - '5m', '15m', '1h', - ] - last_conf = ensure_base_tf_in_include_timeframes(updated_conf) - assert last_conf == updated_conf - - def test_get_required_data_timerange(mocker, freqai_conf): time_range = get_required_data_timerange(freqai_conf) assert (time_range.stopts - time_range.startts) == 177300 From 26a61afa15bce5d85256e8706534295f8cb033c3 Mon Sep 17 00:00:00 2001 From: Emre Date: Mon, 5 Dec 2022 23:54:15 +0300 Subject: [PATCH 280/421] Move base tf logic to config validation --- freqtrade/configuration/config_validation.py | 7 +++++++ tests/test_configuration.py | 7 ++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/freqtrade/configuration/config_validation.py b/freqtrade/configuration/config_validation.py index bf0657994..606f081ef 100644 --- a/freqtrade/configuration/config_validation.py +++ b/freqtrade/configuration/config_validation.py @@ -355,6 +355,13 @@ def _validate_freqai_include_timeframes(conf: Dict[str, Any]) -> None: f"Main timeframe of {main_tf} must be smaller or equal to FreqAI " f"`include_timeframes`.Offending include-timeframes: {', '.join(offending_lines)}") + # Ensure that the base timeframe is included in the include_timeframes list + if main_tf not in freqai_include_timeframes: + feature_parameters = conf.get('freqai', {}).get('feature_parameters', {}) + include_timeframes = [main_tf] + freqai_include_timeframes + conf.get('freqai', {}).get('feature_parameters', {}) \ + .update({**feature_parameters, 'include_timeframes': include_timeframes}) + def _validate_freqai_backtest(conf: Dict[str, Any]) -> None: if conf.get('runmode', RunMode.OTHER) == RunMode.BACKTEST: diff --git a/tests/test_configuration.py b/tests/test_configuration.py index 1bcff20db..cdf9f2f2e 100644 --- a/tests/test_configuration.py +++ b/tests/test_configuration.py @@ -1046,8 +1046,13 @@ def test__validate_freqai_include_timeframes(default_conf, caplog) -> None: # Validation pass conf.update({'timeframe': '1m'}) validate_config_consistency(conf) - conf.update({'analyze_per_epoch': True}) + # Ensure base timeframe is in include_timeframes + conf['freqai']['feature_parameters']['include_timeframes'] = ["5m", "15m"] + validate_config_consistency(conf) + assert conf['freqai']['feature_parameters']['include_timeframes'] == ["1m", "5m", "15m"] + + conf.update({'analyze_per_epoch': True}) with pytest.raises(OperationalException, match=r"Using analyze-per-epoch .* not supported with a FreqAI strategy."): validate_config_consistency(conf) From 227cdb09386153fd7a871e3b72ff46cd2999962e Mon Sep 17 00:00:00 2001 From: Emre Date: Mon, 5 Dec 2022 23:58:04 +0300 Subject: [PATCH 281/421] Change dict update order --- freqtrade/configuration/config_validation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/configuration/config_validation.py b/freqtrade/configuration/config_validation.py index 606f081ef..7e291cb90 100644 --- a/freqtrade/configuration/config_validation.py +++ b/freqtrade/configuration/config_validation.py @@ -360,7 +360,7 @@ def _validate_freqai_include_timeframes(conf: Dict[str, Any]) -> None: feature_parameters = conf.get('freqai', {}).get('feature_parameters', {}) include_timeframes = [main_tf] + freqai_include_timeframes conf.get('freqai', {}).get('feature_parameters', {}) \ - .update({**feature_parameters, 'include_timeframes': include_timeframes}) + .update({'include_timeframes': include_timeframes, **feature_parameters}) def _validate_freqai_backtest(conf: Dict[str, Any]) -> None: From 58604c747e759161f25ad4c90571fbaf6a1c5233 Mon Sep 17 00:00:00 2001 From: initrv Date: Wed, 7 Dec 2022 14:37:55 +0300 Subject: [PATCH 282/421] cleanup tensorboard callback --- freqtrade/freqai/RL/BaseEnvironment.py | 10 ++----- freqtrade/freqai/RL/TensorboardCallback.py | 27 +++++++++---------- .../prediction_models/ReinforcementLearner.py | 14 +++++----- 3 files changed, 21 insertions(+), 30 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index a31ded0c6..71b423844 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -137,15 +137,9 @@ class BaseEnvironment(gym.Env): Reset is called at the beginning of every episode """ # custom_info is used for episodic reports and tensorboard logging - self.custom_info["Invalid"] = 0 - self.custom_info["Hold"] = 0 - self.custom_info["Unknown"] = 0 - self.custom_info["pnl_factor"] = 0 - self.custom_info["duration_factor"] = 0 - self.custom_info["reward_exit"] = 0 - self.custom_info["reward_hold"] = 0 + self.custom_info: dict = {} for action in self.actions: - self.custom_info[f"{action.name}"] = 0 + self.custom_info[action.name] = 0 self._done = False diff --git a/freqtrade/freqai/RL/TensorboardCallback.py b/freqtrade/freqai/RL/TensorboardCallback.py index f590bdf84..d03c040d4 100644 --- a/freqtrade/freqai/RL/TensorboardCallback.py +++ b/freqtrade/freqai/RL/TensorboardCallback.py @@ -42,19 +42,18 @@ class TensorboardCallback(BaseCallback): ) def _on_step(self) -> bool: + + local_info = self.locals["infos"][0] custom_info = self.training_env.get_attr("custom_info")[0] - self.logger.record("_state/position", self.locals["infos"][0]["position"]) - self.logger.record("_state/trade_duration", self.locals["infos"][0]["trade_duration"]) - self.logger.record("_state/current_profit_pct", self.locals["infos"] - [0]["current_profit_pct"]) - self.logger.record("_reward/total_profit", self.locals["infos"][0]["total_profit"]) - self.logger.record("_reward/total_reward", self.locals["infos"][0]["total_reward"]) - self.logger.record_mean("_reward/mean_trade_duration", self.locals["infos"] - [0]["trade_duration"]) - self.logger.record("_actions/action", self.locals["infos"][0]["action"]) - self.logger.record("_actions/_Invalid", custom_info["Invalid"]) - self.logger.record("_actions/_Unknown", custom_info["Unknown"]) - self.logger.record("_actions/Hold", custom_info["Hold"]) - for action in self.actions: - self.logger.record(f"_actions/{action.name}", custom_info[action.name]) + + for info in local_info: + if info not in ["episode", "terminal_observation"]: + self.logger.record(f"_info/{info}", local_info[info]) + + for info in custom_info: + if info in [action.name for action in self.actions]: + self.logger.record(f"_actions/{info}", custom_info[info]) + else: + self.logger.record(f"_custom/{info}", custom_info[info]) + return True diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 47dbaf99e..1383ad15e 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -100,7 +100,6 @@ class ReinforcementLearner(BaseReinforcementLearningModel): """ # first, penalize if the action is not valid if not self._is_valid(action): - self.custom_info["Invalid"] += 1 return -2 pnl = self.get_unrealized_profit() @@ -109,15 +108,15 @@ class ReinforcementLearner(BaseReinforcementLearningModel): # reward agent for entering trades if (action == Actions.Long_enter.value and self._position == Positions.Neutral): - self.custom_info[f"{Actions.Long_enter.name}"] += 1 + self.custom_info[Actions.Long_enter.name] += 1 return 25 if (action == Actions.Short_enter.value and self._position == Positions.Neutral): - self.custom_info[f"{Actions.Short_enter.name}"] += 1 + self.custom_info[Actions.Short_enter.name] += 1 return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: - self.custom_info[f"{Actions.Neutral.name}"] += 1 + self.custom_info[Actions.Neutral.name] += 1 return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) @@ -131,22 +130,21 @@ class ReinforcementLearner(BaseReinforcementLearningModel): # discourage sitting in position if (self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value): - self.custom_info["Hold"] += 1 + self.custom_info[Actions.Neutral.name] += 1 return -1 * trade_duration / max_trade_duration # close long if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - self.custom_info[f"{Actions.Long_exit.name}"] += 1 + self.custom_info[Actions.Long_exit.name] += 1 return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - self.custom_info[f"{Actions.Short_exit.name}"] += 1 + self.custom_info[Actions.Short_exit.name] += 1 return float(pnl * factor) - self.custom_info["Unknown"] += 1 return 0. From 199fd2d074a7d02c5f5ddad205ca59032591d519 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Wed, 7 Dec 2022 15:08:33 +0100 Subject: [PATCH 283/421] +Remote Pairlist --- freqtrade/constants.py | 2 +- freqtrade/plugins/pairlist/RemotePairlist.py | 152 +++++++++++++++++++ 2 files changed, 153 insertions(+), 1 deletion(-) create mode 100644 freqtrade/plugins/pairlist/RemotePairlist.py diff --git a/freqtrade/constants.py b/freqtrade/constants.py index d869b89f6..dba277916 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -31,7 +31,7 @@ HYPEROPT_LOSS_BUILTIN = ['ShortTradeDurHyperOptLoss', 'OnlyProfitHyperOptLoss', 'CalmarHyperOptLoss', 'MaxDrawDownHyperOptLoss', 'MaxDrawDownRelativeHyperOptLoss', 'ProfitDrawDownHyperOptLoss'] -AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'ProducerPairList', +AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'ProducerPairList', 'RemotePairlist', 'AgeFilter', 'OffsetFilter', 'PerformanceFilter', 'PrecisionFilter', 'PriceFilter', 'RangeStabilityFilter', 'ShuffleFilter', 'SpreadFilter', 'VolatilityFilter'] diff --git a/freqtrade/plugins/pairlist/RemotePairlist.py b/freqtrade/plugins/pairlist/RemotePairlist.py new file mode 100644 index 000000000..3b1b56069 --- /dev/null +++ b/freqtrade/plugins/pairlist/RemotePairlist.py @@ -0,0 +1,152 @@ +""" +Remote PairList provider + +Provides dynamic pair list based on trade volumes +""" +import json +import logging +from typing import Any, Dict, List + +import requests +from cachetools import TTLCache + +from freqtrade.constants import Config +from freqtrade.exceptions import OperationalException +from freqtrade.exchange.types import Tickers +from freqtrade.plugins.pairlist.IPairList import IPairList + + +logger = logging.getLogger(__name__) + + +class RemotePairlist(IPairList): + + def __init__(self, exchange, pairlistmanager, + config: Config, pairlistconfig: Dict[str, Any], + pairlist_pos: int) -> None: + super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) + + if 'number_assets' not in self._pairlistconfig: + raise OperationalException( + '`number_assets` not specified. Please check your configuration ' + 'for "pairlist.config.number_assets"') + + self._number_pairs = self._pairlistconfig['number_assets'] + self._refresh_period = self._pairlistconfig.get('refresh_period', 1800) + self._keep_pairlist_on_failure = self._pairlistconfig.get('keep_pairlist_on_failure', True) + self._pair_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period) + self._pairlist_url = self._pairlistconfig.get('pairlist_url', + 'http://pairlist.robot.co.network') + self._stake_currency = config['stake_currency'] + + if (self._refresh_period < 850): + raise OperationalException( + 'Please set a Refresh Period higher than 850 for the Remotepairlist.' + ) + + @property + def needstickers(self) -> bool: + """ + Boolean property defining if tickers are necessary. + If no Pairlist requires tickers, an empty Dict is passed + as tickers argument to filter_pairlist + """ + return False + + def short_desc(self) -> str: + """ + Short whitelist method description - used for startup-messages + """ + return f"{self.name} - {self._pairlistconfig['number_assets']} pairs from Remote." + + def gen_pairlist(self, tickers: Tickers) -> List[str]: + """ + Generate the pairlist + :param tickers: Tickers (from exchange.get_tickers). May be cached. + :return: List of pairs + """ + hick = "'" + double = '"' + # Generate dynamic whitelist + # Must always run if this pairlist is not the first in the list. + pairlist = self._pair_cache.get('pairlist') + + if pairlist: + # Item found - no refresh necessary + return pairlist.copy() + else: + + headers = { + 'User-Agent': 'Freqtrade Pairlist Fetcher', + } + + if "limit" not in self._pairlist_url: + url = self._pairlist_url + "&limit=" + str(self._number_pairs) + else: + url = self._pairlist_url + + if "stake" not in self._pairlist_url: + url = self._pairlist_url + "&stake=" + str(self._config['stake_currency']) + else: + url = self._pairlist_url + + if "exchange" not in self._pairlist_url: + url = self._pairlist_url + "&exchange=" + str(self._config['exchange']) + else: + url = self._pairlist_url + + try: + response = requests.get(url, headers=headers, timeout=60) + responser = response.text.replace(hick, double) + time_elapsed = response.elapsed.total_seconds() + rsplit = responser.split("#") + plist = rsplit[0].strip() + plist = plist.replace("
", "") + plist = json.loads(plist) + info = rsplit[1].strip() + + except Exception as e: + print(e) + self.log_once(f'Was not able to receive pairlist from' + f' {self._pairlist_url}', logger.info) + + if self._keep_pairlist_on_failure: + plist = pairlist + else: + plist = "" + + + pairlist = [] + + for i in plist: + if i not in pairlist: + if "/" in i: + if self._stake_currency in i: + pairlist.append(i) + else: + continue + else: + pairlist.append(i + "/" + self._config['stake_currency']) + + pairlist = self.filter_pairlist(pairlist, tickers) + self._pair_cache['pairlist'] = pairlist.copy() + self.log_once(info + " | " + "Fetched in " + str(time_elapsed) + " seconds.", logger.info) + return pairlist + + def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]: + """ + Filters and sorts pairlist and returns the whitelist again. + Called on each bot iteration - please use internal caching if necessary + :param pairlist: pairlist to filter or sort + :param tickers: Tickers (from exchange.get_tickers). May be cached. + :return: new whitelist + """ + + # Validate whitelist to only have active market pairs + pairlist = self._whitelist_for_active_markets(pairlist) + pairlist = self.verify_blacklist(pairlist, logger.info) + # Limit pairlist to the requested number of pairs + pairlist = pairlist[:self._number_pairs] + self.log_once(f"Searching {self._number_pairs} pairs: {pairlist}", logger.info) + + return pairlist From 48160f3fe9b099aa0c286fc78efcc5971186a323 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Wed, 7 Dec 2022 17:01:45 +0100 Subject: [PATCH 284/421] Flake 8 fix, Json Fetching --- freqtrade/constants.py | 2 +- freqtrade/plugins/pairlist/RemotePairList.py | 146 +++++++++++++++++++ 2 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 freqtrade/plugins/pairlist/RemotePairList.py diff --git a/freqtrade/constants.py b/freqtrade/constants.py index dba277916..e2eccfed3 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -31,7 +31,7 @@ HYPEROPT_LOSS_BUILTIN = ['ShortTradeDurHyperOptLoss', 'OnlyProfitHyperOptLoss', 'CalmarHyperOptLoss', 'MaxDrawDownHyperOptLoss', 'MaxDrawDownRelativeHyperOptLoss', 'ProfitDrawDownHyperOptLoss'] -AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'ProducerPairList', 'RemotePairlist', +AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'ProducerPairList', 'RemotePairList', 'AgeFilter', 'OffsetFilter', 'PerformanceFilter', 'PrecisionFilter', 'PriceFilter', 'RangeStabilityFilter', 'ShuffleFilter', 'SpreadFilter', 'VolatilityFilter'] diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py new file mode 100644 index 000000000..684e68a1b --- /dev/null +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -0,0 +1,146 @@ +""" +Remote PairList provider + +Provides pair list fetched from a remote source +""" +import json +import logging +from typing import Any, Dict, List + +import requests +from cachetools import TTLCache + +from freqtrade.constants import Config +from freqtrade.exceptions import OperationalException +from freqtrade.exchange.types import Tickers +from freqtrade.plugins.pairlist.IPairList import IPairList + + +logger = logging.getLogger(__name__) + + +class RemotePairList(IPairList): + + def __init__(self, exchange, pairlistmanager, + config: Config, pairlistconfig: Dict[str, Any], + pairlist_pos: int) -> None: + super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) + + if 'number_assets' not in self._pairlistconfig: + raise OperationalException( + '`number_assets` not specified. Please check your configuration ' + 'for "pairlist.config.number_assets"') + + if 'pairlist_url' not in self._pairlistconfig: + raise OperationalException( + '`pairlist_url` not specified. Please check your configuration ' + 'for "pairlist.config.pairlist_url"') + + self._number_pairs = self._pairlistconfig['number_assets'] + self._refresh_period = self._pairlistconfig.get('refresh_period', 1800) + self._keep_pairlist_on_failure = self._pairlistconfig.get('keep_pairlist_on_failure', True) + self._pair_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period) + self._pairlist_url = self._pairlistconfig.get('pairlist_url', '') + self._read_timeout = self._pairlistconfig.get('read_timeout', 60) + self._last_pairlist: List[Any] = list() + + @property + def needstickers(self) -> bool: + """ + Boolean property defining if tickers are necessary. + If no Pairlist requires tickers, an empty Dict is passed + as tickers argument to filter_pairlist + """ + return False + + def short_desc(self) -> str: + """ + Short whitelist method description - used for startup-messages + """ + return f"{self.name} - {self._pairlistconfig['number_assets']} pairs from RemotePairlist." + + def gen_pairlist(self, tickers: Tickers) -> List[str]: + """ + Generate the pairlist + :param tickers: Tickers (from exchange.get_tickers). May be cached. + :return: List of pairs + """ + pairlist = self._pair_cache.get('pairlist') + info = "" + + if pairlist: + # Item found - no refresh necessary + return pairlist.copy() + else: + # Fetch Pairlist from Remote + headers = { + 'User-Agent': 'Freqtrade - Remotepairlist', + } + + try: + response = requests.get(self._pairlist_url, headers=headers, + timeout=self._read_timeout) + content_type = response.headers.get('content-type') + time_elapsed = response.elapsed.total_seconds() + + rsplit = response.text.split("#") + + if "text/html" in str(content_type): + if len(rsplit) > 1: + plist = rsplit[0].strip() + plist = json.loads(plist) + info = rsplit[1].strip() + else: + plist = json.loads(rsplit[0]) + elif "application/json" in str(content_type): + jsonp = json.loads(' '.join(rsplit)) + plist = jsonp['pairs'] + info = jsonp['info'] + + except requests.exceptions.RequestException: + self.log_once(f'Was not able to fetch pairlist from:' + f' {self._pairlist_url}', logger.info) + + if self._keep_pairlist_on_failure: + plist = str(self._last_pairlist) + self.log_once('Keeping last fetched pairlist', logger.info) + else: + plist = "" + + time_elapsed = 0 + + pairlist = [] + + for i in plist: + if i not in pairlist: + pairlist.append(i) + else: + continue + + pairlist = self.filter_pairlist(pairlist, tickers) + self._pair_cache['pairlist'] = pairlist.copy() + + if(time_elapsed): + self.log_once(info + " | " + " Fetched in " + str(time_elapsed) + + " seconds.", logger.info) + + self._last_pairlist = list(pairlist) + return pairlist + + def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]: + """ + Filters and sorts pairlist and returns the whitelist again. + Called on each bot iteration - please use internal caching if necessary + :param pairlist: pairlist to filter or sort + :param tickers: Tickers (from exchange.get_tickers). May be cached. + :return: new whitelist + """ + + # Validate whitelist to only have active market pairs + pairlist = self._whitelist_for_active_markets(pairlist) + pairlist = self.verify_blacklist(pairlist, logger.info) + # Limit pairlist to the requested number of pairs + pairlist = pairlist[:self._number_pairs] + self.log_once(f"Searching {self._number_pairs} pairs: {pairlist}", logger.info) + + return pairlist From 607d5b2f8f0e870c34fe3bdee2c8fe6cff4af37c Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Wed, 7 Dec 2022 17:47:38 +0100 Subject: [PATCH 285/421] Split to fetch_pairlist function, Info Message --- freqtrade/plugins/pairlist/RemotePairList.py | 87 +++++++++++--------- 1 file changed, 47 insertions(+), 40 deletions(-) diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index 684e68a1b..b6d0abe35 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -59,6 +59,49 @@ class RemotePairList(IPairList): """ return f"{self.name} - {self._pairlistconfig['number_assets']} pairs from RemotePairlist." + def fetch_pairlist(self): + headers = { + 'User-Agent': 'Freqtrade - Remotepairlist', + } + + try: + response = requests.get(self._pairlist_url, headers=headers, + timeout=self._read_timeout) + content_type = response.headers.get('content-type') + time_elapsed = response.elapsed.total_seconds() + + rsplit = response.text.split("#") + + if "text/html" in str(content_type): + if len(rsplit) > 1: + plist = rsplit[0].strip() + plist = json.loads(plist) + info = rsplit[1].strip() + else: + plist = json.loads(rsplit[0]) + elif "application/json" in str(content_type): + jsonr = response.json() + plist = jsonr['pairs'] + + if 'info' in jsonr: + info = jsonr['info'] + if 'refresh_period' in jsonr: + self._refresh_period = jsonr['refresh_period'] + + except requests.exceptions.RequestException: + self.log_once(f'Was not able to fetch pairlist from:' + f' {self._pairlist_url}', logger.info) + + if self._keep_pairlist_on_failure: + plist = str(self._last_pairlist) + self.log_once('Keeping last fetched pairlist', logger.info) + else: + plist = "" + + time_elapsed = 0 + + return plist, time_elapsed, info + def gen_pairlist(self, tickers: Tickers) -> List[str]: """ Generate the pairlist @@ -66,49 +109,14 @@ class RemotePairList(IPairList): :return: List of pairs """ pairlist = self._pair_cache.get('pairlist') - info = "" + info = "Pairlist" if pairlist: # Item found - no refresh necessary return pairlist.copy() else: - # Fetch Pairlist from Remote - headers = { - 'User-Agent': 'Freqtrade - Remotepairlist', - } - - try: - response = requests.get(self._pairlist_url, headers=headers, - timeout=self._read_timeout) - content_type = response.headers.get('content-type') - time_elapsed = response.elapsed.total_seconds() - - rsplit = response.text.split("#") - - if "text/html" in str(content_type): - if len(rsplit) > 1: - plist = rsplit[0].strip() - plist = json.loads(plist) - info = rsplit[1].strip() - else: - plist = json.loads(rsplit[0]) - elif "application/json" in str(content_type): - jsonp = json.loads(' '.join(rsplit)) - plist = jsonp['pairs'] - info = jsonp['info'] - - except requests.exceptions.RequestException: - self.log_once(f'Was not able to fetch pairlist from:' - f' {self._pairlist_url}', logger.info) - - if self._keep_pairlist_on_failure: - plist = str(self._last_pairlist) - self.log_once('Keeping last fetched pairlist', logger.info) - else: - plist = "" - - time_elapsed = 0 - + # Fetch Pairlist from Remote URL + plist, time_elapsed, info = self.fetch_pairlist() pairlist = [] for i in plist: @@ -121,8 +129,7 @@ class RemotePairList(IPairList): self._pair_cache['pairlist'] = pairlist.copy() if(time_elapsed): - self.log_once(info + " | " + " Fetched in " + str(time_elapsed) + - " seconds.", logger.info) + self.log_once(f'{info} Fetched in {time_elapsed} seconds.', logger.info) self._last_pairlist = list(pairlist) return pairlist From 547a75d9c1abc42db10b811c152147a66d48a6af Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Wed, 7 Dec 2022 17:49:21 +0100 Subject: [PATCH 286/421] Fix Info --- freqtrade/plugins/pairlist/RemotePairList.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index b6d0abe35..07829d246 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -64,6 +64,8 @@ class RemotePairList(IPairList): 'User-Agent': 'Freqtrade - Remotepairlist', } + info = "Pairlist" + try: response = requests.get(self._pairlist_url, headers=headers, timeout=self._read_timeout) @@ -109,7 +111,6 @@ class RemotePairList(IPairList): :return: List of pairs """ pairlist = self._pair_cache.get('pairlist') - info = "Pairlist" if pairlist: # Item found - no refresh necessary From b144a6357d7cbafa1ab7ded091f6e5ad79a78027 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Wed, 7 Dec 2022 18:24:55 +0100 Subject: [PATCH 287/421] Remove Duplicate --- freqtrade/plugins/pairlist/RemotePairlist.py | 152 ------------------- 1 file changed, 152 deletions(-) delete mode 100644 freqtrade/plugins/pairlist/RemotePairlist.py diff --git a/freqtrade/plugins/pairlist/RemotePairlist.py b/freqtrade/plugins/pairlist/RemotePairlist.py deleted file mode 100644 index 3b1b56069..000000000 --- a/freqtrade/plugins/pairlist/RemotePairlist.py +++ /dev/null @@ -1,152 +0,0 @@ -""" -Remote PairList provider - -Provides dynamic pair list based on trade volumes -""" -import json -import logging -from typing import Any, Dict, List - -import requests -from cachetools import TTLCache - -from freqtrade.constants import Config -from freqtrade.exceptions import OperationalException -from freqtrade.exchange.types import Tickers -from freqtrade.plugins.pairlist.IPairList import IPairList - - -logger = logging.getLogger(__name__) - - -class RemotePairlist(IPairList): - - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: - super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - - if 'number_assets' not in self._pairlistconfig: - raise OperationalException( - '`number_assets` not specified. Please check your configuration ' - 'for "pairlist.config.number_assets"') - - self._number_pairs = self._pairlistconfig['number_assets'] - self._refresh_period = self._pairlistconfig.get('refresh_period', 1800) - self._keep_pairlist_on_failure = self._pairlistconfig.get('keep_pairlist_on_failure', True) - self._pair_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period) - self._pairlist_url = self._pairlistconfig.get('pairlist_url', - 'http://pairlist.robot.co.network') - self._stake_currency = config['stake_currency'] - - if (self._refresh_period < 850): - raise OperationalException( - 'Please set a Refresh Period higher than 850 for the Remotepairlist.' - ) - - @property - def needstickers(self) -> bool: - """ - Boolean property defining if tickers are necessary. - If no Pairlist requires tickers, an empty Dict is passed - as tickers argument to filter_pairlist - """ - return False - - def short_desc(self) -> str: - """ - Short whitelist method description - used for startup-messages - """ - return f"{self.name} - {self._pairlistconfig['number_assets']} pairs from Remote." - - def gen_pairlist(self, tickers: Tickers) -> List[str]: - """ - Generate the pairlist - :param tickers: Tickers (from exchange.get_tickers). May be cached. - :return: List of pairs - """ - hick = "'" - double = '"' - # Generate dynamic whitelist - # Must always run if this pairlist is not the first in the list. - pairlist = self._pair_cache.get('pairlist') - - if pairlist: - # Item found - no refresh necessary - return pairlist.copy() - else: - - headers = { - 'User-Agent': 'Freqtrade Pairlist Fetcher', - } - - if "limit" not in self._pairlist_url: - url = self._pairlist_url + "&limit=" + str(self._number_pairs) - else: - url = self._pairlist_url - - if "stake" not in self._pairlist_url: - url = self._pairlist_url + "&stake=" + str(self._config['stake_currency']) - else: - url = self._pairlist_url - - if "exchange" not in self._pairlist_url: - url = self._pairlist_url + "&exchange=" + str(self._config['exchange']) - else: - url = self._pairlist_url - - try: - response = requests.get(url, headers=headers, timeout=60) - responser = response.text.replace(hick, double) - time_elapsed = response.elapsed.total_seconds() - rsplit = responser.split("#") - plist = rsplit[0].strip() - plist = plist.replace("
", "") - plist = json.loads(plist) - info = rsplit[1].strip() - - except Exception as e: - print(e) - self.log_once(f'Was not able to receive pairlist from' - f' {self._pairlist_url}', logger.info) - - if self._keep_pairlist_on_failure: - plist = pairlist - else: - plist = "" - - - pairlist = [] - - for i in plist: - if i not in pairlist: - if "/" in i: - if self._stake_currency in i: - pairlist.append(i) - else: - continue - else: - pairlist.append(i + "/" + self._config['stake_currency']) - - pairlist = self.filter_pairlist(pairlist, tickers) - self._pair_cache['pairlist'] = pairlist.copy() - self.log_once(info + " | " + "Fetched in " + str(time_elapsed) + " seconds.", logger.info) - return pairlist - - def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]: - """ - Filters and sorts pairlist and returns the whitelist again. - Called on each bot iteration - please use internal caching if necessary - :param pairlist: pairlist to filter or sort - :param tickers: Tickers (from exchange.get_tickers). May be cached. - :return: new whitelist - """ - - # Validate whitelist to only have active market pairs - pairlist = self._whitelist_for_active_markets(pairlist) - pairlist = self.verify_blacklist(pairlist, logger.info) - # Limit pairlist to the requested number of pairs - pairlist = pairlist[:self._number_pairs] - self.log_once(f"Searching {self._number_pairs} pairs: {pairlist}", logger.info) - - return pairlist From 9b4364ddc3e410ca445cf08d73c606aed4323e6d Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 7 Dec 2022 19:49:14 +0100 Subject: [PATCH 288/421] ensure that add_state_info is deactivated during backtesting --- freqtrade/freqai/RL/BaseEnvironment.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index a31ded0c6..c217b72dd 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -12,6 +12,7 @@ from gym.utils import seeding from pandas import DataFrame from freqtrade.data.dataprovider import DataProvider +from freqtrade.enums import RunMode logger = logging.getLogger(__name__) @@ -78,6 +79,11 @@ class BaseEnvironment(gym.Env): # set here to default 5Ac, but all children envs can override this self.actions: Type[Enum] = BaseActions self.custom_info: dict = {} + self.live: bool = False + if dp: + self.live = dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE) + if not self.live and self.add_state_info: + logger.warning("add_state_info is not available in backtesting. Deactivating.") def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, reward_kwargs: dict, starting_point=True): @@ -188,7 +194,7 @@ class BaseEnvironment(gym.Env): """ features_window = self.signal_features[( self._current_tick - self.window_size):self._current_tick] - if self.add_state_info: + if self.add_state_info and self.live: features_and_state = DataFrame(np.zeros((len(features_window), 3)), columns=['current_profit_pct', 'position', From 7b3406914c2a219b877867e08f93c26ab64d9e41 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 7 Dec 2022 19:49:39 +0100 Subject: [PATCH 289/421] flip add_state_info --- freqtrade/freqai/RL/BaseEnvironment.py | 1 + 1 file changed, 1 insertion(+) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index c217b72dd..86c63c382 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -83,6 +83,7 @@ class BaseEnvironment(gym.Env): if dp: self.live = dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE) if not self.live and self.add_state_info: + self.add_state_info = False logger.warning("add_state_info is not available in backtesting. Deactivating.") def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, From da2747d487ced9129a3b3ae8336e6d7533da5132 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Thu, 8 Dec 2022 00:52:54 +0100 Subject: [PATCH 290/421] Add Local .json file Loading --- freqtrade/plugins/pairlist/RemotePairList.py | 30 +++++++++++++++----- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index 07829d246..c3b612067 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -5,6 +5,7 @@ Provides pair list fetched from a remote source """ import json import logging +from pathlib import Path from typing import Any, Dict, List import requests @@ -110,21 +111,36 @@ class RemotePairList(IPairList): :param tickers: Tickers (from exchange.get_tickers). May be cached. :return: List of pairs """ + + time_elapsed = 0 pairlist = self._pair_cache.get('pairlist') if pairlist: # Item found - no refresh necessary return pairlist.copy() else: - # Fetch Pairlist from Remote URL - plist, time_elapsed, info = self.fetch_pairlist() - pairlist = [] + if self._pairlist_url.startswith("file:///"): + filename = self._pairlist_url.split("file:///", 1)[1] + file_path = Path(filename) - for i in plist: - if i not in pairlist: - pairlist.append(i) + if file_path.exists(): + with open(filename) as json_file: + # Load the JSON data into a dictionary + jsonp = json.load(json_file) + plist = jsonp['pairs'] else: - continue + raise ValueError(f"{self._pairlist_url} does not exist.") + else: + # Fetch Pairlist from Remote URL + plist, time_elapsed, info = self.fetch_pairlist() + + pairlist = [] + + for i in plist: + if i not in pairlist: + pairlist.append(i) + else: + continue pairlist = self.filter_pairlist(pairlist, tickers) self._pair_cache['pairlist'] = pairlist.copy() From 7efcbbb4573c3a5ff75cca0fc892cdc6a743e779 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Thu, 8 Dec 2022 01:09:17 +0100 Subject: [PATCH 291/421] Local File Loading --- freqtrade/plugins/pairlist/RemotePairList.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index c3b612067..af8b67577 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -83,13 +83,11 @@ class RemotePairList(IPairList): else: plist = json.loads(rsplit[0]) elif "application/json" in str(content_type): - jsonr = response.json() - plist = jsonr['pairs'] + jsonp = response.json() + plist = jsonp['pairs'] - if 'info' in jsonr: - info = jsonr['info'] - if 'refresh_period' in jsonr: - self._refresh_period = jsonr['refresh_period'] + info = jsonp.get('info', '') + self._refresh_period = jsonp.get('refresh_period', self._refresh_period) except requests.exceptions.RequestException: self.log_once(f'Was not able to fetch pairlist from:' @@ -128,6 +126,10 @@ class RemotePairList(IPairList): # Load the JSON data into a dictionary jsonp = json.load(json_file) plist = jsonp['pairs'] + + info = jsonp.get('info', '') + self._refresh_period = jsonp.get('refresh_period', self._refresh_period) + else: raise ValueError(f"{self._pairlist_url} does not exist.") else: @@ -145,8 +147,10 @@ class RemotePairList(IPairList): pairlist = self.filter_pairlist(pairlist, tickers) self._pair_cache['pairlist'] = pairlist.copy() - if(time_elapsed): + if (time_elapsed) in locals(): self.log_once(f'{info} Fetched in {time_elapsed} seconds.', logger.info) + else: + self.log_once(f'{info} Fetched Pairlist.', logger.info) self._last_pairlist = list(pairlist) return pairlist From 66412bfa58645177ebcef18c4c8ecf4a875527c2 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Thu, 8 Dec 2022 01:51:12 +0100 Subject: [PATCH 292/421] Remove unnecessary loop --- freqtrade/plugins/pairlist/RemotePairList.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index af8b67577..7367f713c 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -110,8 +110,8 @@ class RemotePairList(IPairList): :return: List of pairs """ - time_elapsed = 0 pairlist = self._pair_cache.get('pairlist') + time_elapsed = 0 if pairlist: # Item found - no refresh necessary @@ -125,7 +125,7 @@ class RemotePairList(IPairList): with open(filename) as json_file: # Load the JSON data into a dictionary jsonp = json.load(json_file) - plist = jsonp['pairs'] + pairlist = jsonp['pairs'] info = jsonp.get('info', '') self._refresh_period = jsonp.get('refresh_period', self._refresh_period) @@ -134,20 +134,12 @@ class RemotePairList(IPairList): raise ValueError(f"{self._pairlist_url} does not exist.") else: # Fetch Pairlist from Remote URL - plist, time_elapsed, info = self.fetch_pairlist() - - pairlist = [] - - for i in plist: - if i not in pairlist: - pairlist.append(i) - else: - continue + pairlist, time_elapsed, info = self.fetch_pairlist() pairlist = self.filter_pairlist(pairlist, tickers) self._pair_cache['pairlist'] = pairlist.copy() - if (time_elapsed) in locals(): + if time_elapsed: self.log_once(f'{info} Fetched in {time_elapsed} seconds.', logger.info) else: self.log_once(f'{info} Fetched Pairlist.', logger.info) From 74e623fe5b4c5931362f149ce88d52ed3cb12cdc Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 8 Dec 2022 08:33:07 +0100 Subject: [PATCH 293/421] Improve kraken test resiliance --- tests/exchange/test_ccxt_compat.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/exchange/test_ccxt_compat.py b/tests/exchange/test_ccxt_compat.py index 280876ae8..7f23c2031 100644 --- a/tests/exchange/test_ccxt_compat.py +++ b/tests/exchange/test_ccxt_compat.py @@ -224,8 +224,13 @@ class TestCCXTExchange(): for val in [1, 2, 5, 25, 100]: l2 = exchange.fetch_l2_order_book(pair, val) if not l2_limit_range or val in l2_limit_range: - assert len(l2['asks']) == val - assert len(l2['bids']) == val + if val > 50: + # Orderbooks are not always this deep. + assert val - 5 < len(l2['asks']) <= val + assert val - 5 < len(l2['bids']) <= val + else: + assert len(l2['asks']) == val + assert len(l2['bids']) == val else: next_limit = exchange.get_next_limit_in_list( val, l2_limit_range, l2_limit_range_required) From 3d3a7033ed34f8c9bed86c729198e8a4b5e0414f Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 8 Dec 2022 08:46:16 +0100 Subject: [PATCH 294/421] Improve Docker documentation wording --- docs/docker_quickstart.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/docker_quickstart.md b/docs/docker_quickstart.md index 6b48a7877..89f737d71 100644 --- a/docs/docker_quickstart.md +++ b/docs/docker_quickstart.md @@ -4,13 +4,15 @@ This page explains how to run the bot with Docker. It is not meant to work out o ## Install Docker -Start by downloading and installing Docker CE for your platform: +Start by downloading and installing Docker / Docker Desktop for your platform: * [Mac](https://docs.docker.com/docker-for-mac/install/) * [Windows](https://docs.docker.com/docker-for-windows/install/) * [Linux](https://docs.docker.com/install/) -To simplify running freqtrade, [`docker compose`](https://docs.docker.com/compose/install/) should be installed and available to follow the below [docker quick start guide](#docker-quick-start). +!!! Info "Docker compose install" + Freqtrade documentation assumes the use of Docker desktop (or the docker compose plugin). + While the docker-compose standalone installation still works, it will require changing all `docker compose` commands from `docker compose` to `docker-compose` to work (e.g. `docker compose up -d` will become `docker-compose up -d`). ## Freqtrade with docker From bbedc4b63efd08a4e4e3b2371a8463e6f6e445b3 Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 8 Dec 2022 14:15:29 +0100 Subject: [PATCH 295/421] Stop clock to avoid random failures on slow CI runs --- tests/rpc/test_rpc_telegram.py | 70 +++++++++++++++++----------------- 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/tests/rpc/test_rpc_telegram.py b/tests/rpc/test_rpc_telegram.py index 3552d5fe7..1f4665867 100644 --- a/tests/rpc/test_rpc_telegram.py +++ b/tests/rpc/test_rpc_telegram.py @@ -12,6 +12,7 @@ from unittest.mock import ANY, MagicMock import arrow import pytest +import time_machine from pandas import DataFrame from telegram import Chat, Message, ReplyKeyboardMarkup, Update from telegram.error import BadRequest, NetworkError, TelegramError @@ -2065,41 +2066,42 @@ def test_send_msg_sell_fill_notification(default_conf, mocker, direction, default_conf['telegram']['notification_settings']['exit_fill'] = 'on' telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) - telegram.send_msg({ - 'type': RPCMessageType.EXIT_FILL, - 'trade_id': 1, - 'exchange': 'Binance', - 'pair': 'KEY/ETH', - 'leverage': leverage, - 'direction': direction, - 'gain': 'loss', - 'limit': 3.201e-05, - 'amount': 1333.3333333333335, - 'order_type': 'market', - 'open_rate': 7.5e-05, - 'close_rate': 3.201e-05, - 'profit_amount': -0.05746268, - 'profit_ratio': -0.57405275, - 'stake_currency': 'ETH', - 'enter_tag': enter_signal, - 'exit_reason': ExitType.STOP_LOSS.value, - 'open_date': arrow.utcnow().shift(days=-1, hours=-2, minutes=-30), - 'close_date': arrow.utcnow(), - }) + with time_machine.travel("2022-09-01 05:00:00 +00:00", tick=False) as t: + telegram.send_msg({ + 'type': RPCMessageType.EXIT_FILL, + 'trade_id': 1, + 'exchange': 'Binance', + 'pair': 'KEY/ETH', + 'leverage': leverage, + 'direction': direction, + 'gain': 'loss', + 'limit': 3.201e-05, + 'amount': 1333.3333333333335, + 'order_type': 'market', + 'open_rate': 7.5e-05, + 'close_rate': 3.201e-05, + 'profit_amount': -0.05746268, + 'profit_ratio': -0.57405275, + 'stake_currency': 'ETH', + 'enter_tag': enter_signal, + 'exit_reason': ExitType.STOP_LOSS.value, + 'open_date': arrow.utcnow().shift(days=-1, hours=-2, minutes=-30), + 'close_date': arrow.utcnow(), + }) - leverage_text = f'*Leverage:* `{leverage}`\n' if leverage and leverage != 1.0 else '' - assert msg_mock.call_args[0][0] == ( - '\N{WARNING SIGN} *Binance (dry):* Exited KEY/ETH (#1)\n' - '*Profit:* `-57.41% (loss: -0.05746268 ETH)`\n' - f'*Enter Tag:* `{enter_signal}`\n' - '*Exit Reason:* `stop_loss`\n' - f"*Direction:* `{direction}`\n" - f"{leverage_text}" - '*Amount:* `1333.33333333`\n' - '*Open Rate:* `0.00007500`\n' - '*Exit Rate:* `0.00003201`\n' - '*Duration:* `1 day, 2:30:00 (1590.0 min)`' - ) + leverage_text = f'*Leverage:* `{leverage}`\n' if leverage and leverage != 1.0 else '' + assert msg_mock.call_args[0][0] == ( + '\N{WARNING SIGN} *Binance (dry):* Exited KEY/ETH (#1)\n' + '*Profit:* `-57.41% (loss: -0.05746268 ETH)`\n' + f'*Enter Tag:* `{enter_signal}`\n' + '*Exit Reason:* `stop_loss`\n' + f"*Direction:* `{direction}`\n" + f"{leverage_text}" + '*Amount:* `1333.33333333`\n' + '*Open Rate:* `0.00007500`\n' + '*Exit Rate:* `0.00003201`\n' + '*Duration:* `1 day, 2:30:00 (1590.0 min)`' + ) def test_send_msg_status_notification(default_conf, mocker) -> None: From 1da8ad69d9501838fee5792b39563d9925ed7ad5 Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 8 Dec 2022 14:33:07 +0100 Subject: [PATCH 296/421] improve more tests by freezing time --- tests/rpc/test_rpc_telegram.py | 223 +++++++++++++++++---------------- 1 file changed, 112 insertions(+), 111 deletions(-) diff --git a/tests/rpc/test_rpc_telegram.py b/tests/rpc/test_rpc_telegram.py index 1f4665867..58977a94a 100644 --- a/tests/rpc/test_rpc_telegram.py +++ b/tests/rpc/test_rpc_telegram.py @@ -1907,119 +1907,120 @@ def test_send_msg_entry_fill_notification(default_conf, mocker, message_type, en def test_send_msg_sell_notification(default_conf, mocker) -> None: - telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) + with time_machine.travel("2022-09-01 05:00:00 +00:00", tick=False): + telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) - old_convamount = telegram._rpc._fiat_converter.convert_amount - telegram._rpc._fiat_converter.convert_amount = lambda a, b, c: -24.812 - telegram.send_msg({ - 'type': RPCMessageType.EXIT, - 'trade_id': 1, - 'exchange': 'Binance', - 'pair': 'KEY/ETH', - 'leverage': 1.0, - 'direction': 'Long', - 'gain': 'loss', - 'order_rate': 3.201e-05, - 'amount': 1333.3333333333335, - 'order_type': 'market', - 'open_rate': 7.5e-05, - 'current_rate': 3.201e-05, - 'profit_amount': -0.05746268, - 'profit_ratio': -0.57405275, - 'stake_currency': 'ETH', - 'fiat_currency': 'USD', - 'enter_tag': 'buy_signal1', - 'exit_reason': ExitType.STOP_LOSS.value, - 'open_date': arrow.utcnow().shift(hours=-1), - 'close_date': arrow.utcnow(), - }) - assert msg_mock.call_args[0][0] == ( - '\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n' - '*Unrealized Profit:* `-57.41% (loss: -0.05746268 ETH / -24.812 USD)`\n' - '*Enter Tag:* `buy_signal1`\n' - '*Exit Reason:* `stop_loss`\n' - '*Direction:* `Long`\n' - '*Amount:* `1333.33333333`\n' - '*Open Rate:* `0.00007500`\n' - '*Current Rate:* `0.00003201`\n' - '*Exit Rate:* `0.00003201`\n' - '*Duration:* `1:00:00 (60.0 min)`' - ) - - msg_mock.reset_mock() - telegram.send_msg({ - 'type': RPCMessageType.EXIT, - 'trade_id': 1, - 'exchange': 'Binance', - 'pair': 'KEY/ETH', - 'direction': 'Long', - 'gain': 'loss', - 'order_rate': 3.201e-05, - 'amount': 1333.3333333333335, - 'order_type': 'market', - 'open_rate': 7.5e-05, - 'current_rate': 3.201e-05, - 'cumulative_profit': -0.15746268, - 'profit_amount': -0.05746268, - 'profit_ratio': -0.57405275, - 'stake_currency': 'ETH', - 'fiat_currency': 'USD', - 'enter_tag': 'buy_signal1', - 'exit_reason': ExitType.STOP_LOSS.value, - 'open_date': arrow.utcnow().shift(days=-1, hours=-2, minutes=-30), - 'close_date': arrow.utcnow(), - 'stake_amount': 0.01, - 'sub_trade': True, - }) - assert msg_mock.call_args[0][0] == ( - '\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n' - '*Unrealized Sub Profit:* `-57.41% (loss: -0.05746268 ETH / -24.812 USD)`\n' - '*Cumulative Profit:* (`-0.15746268 ETH / -24.812 USD`)\n' - '*Enter Tag:* `buy_signal1`\n' - '*Exit Reason:* `stop_loss`\n' - '*Direction:* `Long`\n' - '*Amount:* `1333.33333333`\n' - '*Open Rate:* `0.00007500`\n' - '*Current Rate:* `0.00003201`\n' - '*Exit Rate:* `0.00003201`\n' - '*Remaining:* `(0.01 ETH, -24.812 USD)`' + old_convamount = telegram._rpc._fiat_converter.convert_amount + telegram._rpc._fiat_converter.convert_amount = lambda a, b, c: -24.812 + telegram.send_msg({ + 'type': RPCMessageType.EXIT, + 'trade_id': 1, + 'exchange': 'Binance', + 'pair': 'KEY/ETH', + 'leverage': 1.0, + 'direction': 'Long', + 'gain': 'loss', + 'order_rate': 3.201e-05, + 'amount': 1333.3333333333335, + 'order_type': 'market', + 'open_rate': 7.5e-05, + 'current_rate': 3.201e-05, + 'profit_amount': -0.05746268, + 'profit_ratio': -0.57405275, + 'stake_currency': 'ETH', + 'fiat_currency': 'USD', + 'enter_tag': 'buy_signal1', + 'exit_reason': ExitType.STOP_LOSS.value, + 'open_date': arrow.utcnow().shift(hours=-1), + 'close_date': arrow.utcnow(), + }) + assert msg_mock.call_args[0][0] == ( + '\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n' + '*Unrealized Profit:* `-57.41% (loss: -0.05746268 ETH / -24.812 USD)`\n' + '*Enter Tag:* `buy_signal1`\n' + '*Exit Reason:* `stop_loss`\n' + '*Direction:* `Long`\n' + '*Amount:* `1333.33333333`\n' + '*Open Rate:* `0.00007500`\n' + '*Current Rate:* `0.00003201`\n' + '*Exit Rate:* `0.00003201`\n' + '*Duration:* `1:00:00 (60.0 min)`' ) - msg_mock.reset_mock() - telegram.send_msg({ - 'type': RPCMessageType.EXIT, - 'trade_id': 1, - 'exchange': 'Binance', - 'pair': 'KEY/ETH', - 'direction': 'Long', - 'gain': 'loss', - 'order_rate': 3.201e-05, - 'amount': 1333.3333333333335, - 'order_type': 'market', - 'open_rate': 7.5e-05, - 'current_rate': 3.201e-05, - 'profit_amount': -0.05746268, - 'profit_ratio': -0.57405275, - 'stake_currency': 'ETH', - 'enter_tag': 'buy_signal1', - 'exit_reason': ExitType.STOP_LOSS.value, - 'open_date': arrow.utcnow().shift(days=-1, hours=-2, minutes=-30), - 'close_date': arrow.utcnow(), - }) - assert msg_mock.call_args[0][0] == ( - '\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n' - '*Unrealized Profit:* `-57.41% (loss: -0.05746268 ETH)`\n' - '*Enter Tag:* `buy_signal1`\n' - '*Exit Reason:* `stop_loss`\n' - '*Direction:* `Long`\n' - '*Amount:* `1333.33333333`\n' - '*Open Rate:* `0.00007500`\n' - '*Current Rate:* `0.00003201`\n' - '*Exit Rate:* `0.00003201`\n' - '*Duration:* `1 day, 2:30:00 (1590.0 min)`' - ) - # Reset singleton function to avoid random breaks - telegram._rpc._fiat_converter.convert_amount = old_convamount + msg_mock.reset_mock() + telegram.send_msg({ + 'type': RPCMessageType.EXIT, + 'trade_id': 1, + 'exchange': 'Binance', + 'pair': 'KEY/ETH', + 'direction': 'Long', + 'gain': 'loss', + 'order_rate': 3.201e-05, + 'amount': 1333.3333333333335, + 'order_type': 'market', + 'open_rate': 7.5e-05, + 'current_rate': 3.201e-05, + 'cumulative_profit': -0.15746268, + 'profit_amount': -0.05746268, + 'profit_ratio': -0.57405275, + 'stake_currency': 'ETH', + 'fiat_currency': 'USD', + 'enter_tag': 'buy_signal1', + 'exit_reason': ExitType.STOP_LOSS.value, + 'open_date': arrow.utcnow().shift(days=-1, hours=-2, minutes=-30), + 'close_date': arrow.utcnow(), + 'stake_amount': 0.01, + 'sub_trade': True, + }) + assert msg_mock.call_args[0][0] == ( + '\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n' + '*Unrealized Sub Profit:* `-57.41% (loss: -0.05746268 ETH / -24.812 USD)`\n' + '*Cumulative Profit:* (`-0.15746268 ETH / -24.812 USD`)\n' + '*Enter Tag:* `buy_signal1`\n' + '*Exit Reason:* `stop_loss`\n' + '*Direction:* `Long`\n' + '*Amount:* `1333.33333333`\n' + '*Open Rate:* `0.00007500`\n' + '*Current Rate:* `0.00003201`\n' + '*Exit Rate:* `0.00003201`\n' + '*Remaining:* `(0.01 ETH, -24.812 USD)`' + ) + + msg_mock.reset_mock() + telegram.send_msg({ + 'type': RPCMessageType.EXIT, + 'trade_id': 1, + 'exchange': 'Binance', + 'pair': 'KEY/ETH', + 'direction': 'Long', + 'gain': 'loss', + 'order_rate': 3.201e-05, + 'amount': 1333.3333333333335, + 'order_type': 'market', + 'open_rate': 7.5e-05, + 'current_rate': 3.201e-05, + 'profit_amount': -0.05746268, + 'profit_ratio': -0.57405275, + 'stake_currency': 'ETH', + 'enter_tag': 'buy_signal1', + 'exit_reason': ExitType.STOP_LOSS.value, + 'open_date': arrow.utcnow().shift(days=-1, hours=-2, minutes=-30), + 'close_date': arrow.utcnow(), + }) + assert msg_mock.call_args[0][0] == ( + '\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n' + '*Unrealized Profit:* `-57.41% (loss: -0.05746268 ETH)`\n' + '*Enter Tag:* `buy_signal1`\n' + '*Exit Reason:* `stop_loss`\n' + '*Direction:* `Long`\n' + '*Amount:* `1333.33333333`\n' + '*Open Rate:* `0.00007500`\n' + '*Current Rate:* `0.00003201`\n' + '*Exit Rate:* `0.00003201`\n' + '*Duration:* `1 day, 2:30:00 (1590.0 min)`' + ) + # Reset singleton function to avoid random breaks + telegram._rpc._fiat_converter.convert_amount = old_convamount def test_send_msg_sell_cancel_notification(default_conf, mocker) -> None: @@ -2066,7 +2067,7 @@ def test_send_msg_sell_fill_notification(default_conf, mocker, direction, default_conf['telegram']['notification_settings']['exit_fill'] = 'on' telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) - with time_machine.travel("2022-09-01 05:00:00 +00:00", tick=False) as t: + with time_machine.travel("2022-09-01 05:00:00 +00:00", tick=False): telegram.send_msg({ 'type': RPCMessageType.EXIT_FILL, 'trade_id': 1, From 980a5a9b521d1a905a7beae383fd9ff8a8fd5302 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 10 Dec 2022 19:54:04 +0100 Subject: [PATCH 297/421] Fix docs typo --- freqtrade/plugins/pairlist/VolumePairList.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/plugins/pairlist/VolumePairList.py b/freqtrade/plugins/pairlist/VolumePairList.py index ad27a93d8..be58ec1a1 100644 --- a/freqtrade/plugins/pairlist/VolumePairList.py +++ b/freqtrade/plugins/pairlist/VolumePairList.py @@ -218,7 +218,7 @@ class VolumePairList(IPairList): else: filtered_tickers[i]['quoteVolume'] = 0 else: - # Tickers mode - filter based on incomming pairlist. + # Tickers mode - filter based on incoming pairlist. filtered_tickers = [v for k, v in tickers.items() if k in pairlist] if self._min_value > 0: From 6b9f3f279587e1097915732ad3ac6e69c00c9bb5 Mon Sep 17 00:00:00 2001 From: Emre Date: Sun, 11 Dec 2022 13:24:24 +0300 Subject: [PATCH 298/421] Fix test validation --- freqtrade/configuration/config_validation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/configuration/config_validation.py b/freqtrade/configuration/config_validation.py index 7e291cb90..606f081ef 100644 --- a/freqtrade/configuration/config_validation.py +++ b/freqtrade/configuration/config_validation.py @@ -360,7 +360,7 @@ def _validate_freqai_include_timeframes(conf: Dict[str, Any]) -> None: feature_parameters = conf.get('freqai', {}).get('feature_parameters', {}) include_timeframes = [main_tf] + freqai_include_timeframes conf.get('freqai', {}).get('feature_parameters', {}) \ - .update({'include_timeframes': include_timeframes, **feature_parameters}) + .update({**feature_parameters, 'include_timeframes': include_timeframes}) def _validate_freqai_backtest(conf: Dict[str, Any]) -> None: From 85f22b5c3029a3f613d0b0da7b61eeef8f6685d5 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 11 Dec 2022 12:15:19 +0100 Subject: [PATCH 299/421] fix bug in MultiOutput* with conv_width = 1 --- freqtrade/freqai/base_models/BaseClassifierModel.py | 3 +++ freqtrade/freqai/base_models/BaseRegressionModel.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/freqtrade/freqai/base_models/BaseClassifierModel.py b/freqtrade/freqai/base_models/BaseClassifierModel.py index 17bffa85b..a5cea879f 100644 --- a/freqtrade/freqai/base_models/BaseClassifierModel.py +++ b/freqtrade/freqai/base_models/BaseClassifierModel.py @@ -95,6 +95,9 @@ class BaseClassifierModel(IFreqaiModel): self.data_cleaning_predict(dk) predictions = self.model.predict(dk.data_dictionary["prediction_features"]) + if self.CONV_WIDTH == 1: + predictions = np.reshape(predictions, (-1, len(dk.label_list))) + pred_df = DataFrame(predictions, columns=dk.label_list) predictions_prob = self.model.predict_proba(dk.data_dictionary["prediction_features"]) diff --git a/freqtrade/freqai/base_models/BaseRegressionModel.py b/freqtrade/freqai/base_models/BaseRegressionModel.py index 766579cb6..1f9b4f5a6 100644 --- a/freqtrade/freqai/base_models/BaseRegressionModel.py +++ b/freqtrade/freqai/base_models/BaseRegressionModel.py @@ -95,6 +95,9 @@ class BaseRegressionModel(IFreqaiModel): self.data_cleaning_predict(dk) predictions = self.model.predict(dk.data_dictionary["prediction_features"]) + if self.CONV_WIDTH == 1: + predictions = np.reshape(predictions, (-1, len(dk.label_list))) + pred_df = DataFrame(predictions, columns=dk.label_list) pred_df = dk.denormalize_labels_from_metadata(pred_df) From 8c7ec07951eadf53a5722fe7d7489e9a95e5ab46 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 11 Dec 2022 12:39:31 +0100 Subject: [PATCH 300/421] ensure predict_proba follows suit. Remove all lib specific params from example config --- config_examples/config_freqai.example.json | 1 - freqtrade/freqai/base_models/BaseClassifierModel.py | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/config_examples/config_freqai.example.json b/config_examples/config_freqai.example.json index 5e564a1fc..f58a4468b 100644 --- a/config_examples/config_freqai.example.json +++ b/config_examples/config_freqai.example.json @@ -80,7 +80,6 @@ "random_state": 1 }, "model_training_parameters": { - "n_estimators": 1000 } }, "bot_name": "", diff --git a/freqtrade/freqai/base_models/BaseClassifierModel.py b/freqtrade/freqai/base_models/BaseClassifierModel.py index a5cea879f..ffd42dd1d 100644 --- a/freqtrade/freqai/base_models/BaseClassifierModel.py +++ b/freqtrade/freqai/base_models/BaseClassifierModel.py @@ -101,6 +101,8 @@ class BaseClassifierModel(IFreqaiModel): pred_df = DataFrame(predictions, columns=dk.label_list) predictions_prob = self.model.predict_proba(dk.data_dictionary["prediction_features"]) + if self.CONV_WIDTH == 1: + predictions_prob = np.reshape(predictions_prob, (-1, len(self.model.classes_))) pred_df_prob = DataFrame(predictions_prob, columns=self.model.classes_) pred_df = pd.concat([pred_df, pred_df_prob], axis=1) From cb8fc3c8c7c392b75493d8da7f748760372040a9 Mon Sep 17 00:00:00 2001 From: initrv Date: Sun, 11 Dec 2022 15:37:45 +0300 Subject: [PATCH 301/421] custom info to tensorboard_metrics --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 2 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 1 + freqtrade/freqai/RL/BaseEnvironment.py | 8 ++++---- freqtrade/freqai/RL/TensorboardCallback.py | 8 ++++---- .../freqai/prediction_models/ReinforcementLearner.py | 6 ------ 5 files changed, 10 insertions(+), 15 deletions(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 79616d778..02e182bbd 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -46,9 +46,9 @@ class Base4ActionRLEnv(BaseEnvironment): self._done = True self._update_unrealized_total_profit() - step_reward = self.calculate_reward(action) self.total_reward += step_reward + self.tensorboard_metrics[self.actions._member_names_[action]] += 1 trade_type = None if self.is_tradesignal(action): diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 1c09f9386..baf7dde9f 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -49,6 +49,7 @@ class Base5ActionRLEnv(BaseEnvironment): self._update_unrealized_total_profit() step_reward = self.calculate_reward(action) self.total_reward += step_reward + self.tensorboard_metrics[self.actions._member_names_[action]] += 1 trade_type = None if self.is_tradesignal(action): diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 71b423844..0da13db7c 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -77,7 +77,7 @@ class BaseEnvironment(gym.Env): # set here to default 5Ac, but all children envs can override this self.actions: Type[Enum] = BaseActions - self.custom_info: dict = {} + self.tensorboard_metrics: dict = {} def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, reward_kwargs: dict, starting_point=True): @@ -136,10 +136,10 @@ class BaseEnvironment(gym.Env): """ Reset is called at the beginning of every episode """ - # custom_info is used for episodic reports and tensorboard logging - self.custom_info: dict = {} + # tensorboard_metrics is used for episodic reports and tensorboard logging + self.tensorboard_metrics: dict = {} for action in self.actions: - self.custom_info[action.name] = 0 + self.tensorboard_metrics[action.name] = 0 self._done = False diff --git a/freqtrade/freqai/RL/TensorboardCallback.py b/freqtrade/freqai/RL/TensorboardCallback.py index d03c040d4..b596742e9 100644 --- a/freqtrade/freqai/RL/TensorboardCallback.py +++ b/freqtrade/freqai/RL/TensorboardCallback.py @@ -44,16 +44,16 @@ class TensorboardCallback(BaseCallback): def _on_step(self) -> bool: local_info = self.locals["infos"][0] - custom_info = self.training_env.get_attr("custom_info")[0] + tensorboard_metrics = self.training_env.get_attr("tensorboard_metrics")[0] for info in local_info: if info not in ["episode", "terminal_observation"]: self.logger.record(f"_info/{info}", local_info[info]) - for info in custom_info: + for info in tensorboard_metrics: if info in [action.name for action in self.actions]: - self.logger.record(f"_actions/{info}", custom_info[info]) + self.logger.record(f"_actions/{info}", tensorboard_metrics[info]) else: - self.logger.record(f"_custom/{info}", custom_info[info]) + self.logger.record(f"_custom/{info}", tensorboard_metrics[info]) return True diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 1383ad15e..e015b138a 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -108,15 +108,12 @@ class ReinforcementLearner(BaseReinforcementLearningModel): # reward agent for entering trades if (action == Actions.Long_enter.value and self._position == Positions.Neutral): - self.custom_info[Actions.Long_enter.name] += 1 return 25 if (action == Actions.Short_enter.value and self._position == Positions.Neutral): - self.custom_info[Actions.Short_enter.name] += 1 return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: - self.custom_info[Actions.Neutral.name] += 1 return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) @@ -130,21 +127,18 @@ class ReinforcementLearner(BaseReinforcementLearningModel): # discourage sitting in position if (self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value): - self.custom_info[Actions.Neutral.name] += 1 return -1 * trade_duration / max_trade_duration # close long if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - self.custom_info[Actions.Long_exit.name] += 1 return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - self.custom_info[Actions.Short_exit.name] += 1 return float(pnl * factor) return 0. From 0fd8e214e4f95a4c2c1929e9b26da43c70fd47dc Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 11 Dec 2022 15:31:29 +0100 Subject: [PATCH 302/421] add documentation for tensorboard_log, change how users interact with tensorboard_log --- docs/freqai-reinforcement-learning.md | 26 +++++++++++++++ freqtrade/freqai/RL/Base4ActionRLEnv.py | 2 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 2 +- freqtrade/freqai/RL/BaseEnvironment.py | 33 ++++++++++++++++--- .../prediction_models/ReinforcementLearner.py | 1 + 5 files changed, 57 insertions(+), 7 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index b1a212a92..b831c90a0 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -247,6 +247,32 @@ where `unique-id` is the `identifier` set in the `freqai` configuration file. Th ![tensorboard](assets/tensorboard.jpg) + +### Custom logging + +FreqAI also provides a built in episodic summary logger called `self.tensorboard_log` for adding custom information to the Tensorboard log. By default, this function is already called once per step inside the environment to record the agent actions. All values accumulated for all steps in a single episode are reported at the conclusion of each episode, followed by a full reset of all metrics to 0 in preparation for the subsequent episode. + + +`self.tensorboard_log` can also be used anywhere inside the environment, for example, it can be added to the `calculate_reward` function to collect more detailed information about how often various parts of the reward were called: + +```py + class MyRLEnv(Base5ActionRLEnv): + """ + User made custom environment. This class inherits from BaseEnvironment and gym.env. + Users can override any functions from those parent classes. Here is an example + of a user customized `calculate_reward()` function. + """ + def calculate_reward(self, action: int) -> float: + if not self._is_valid(action): + self.tensorboard_log("is_valid") + return -2 + +``` + +!!! Note + The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)` would add 0.23 to `float_metric`. + + ### Choosing a base environment FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 4 or 5 actions. In the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Meanwhile, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include: diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 02e182bbd..a3ebfdbfa 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -48,7 +48,7 @@ class Base4ActionRLEnv(BaseEnvironment): self._update_unrealized_total_profit() step_reward = self.calculate_reward(action) self.total_reward += step_reward - self.tensorboard_metrics[self.actions._member_names_[action]] += 1 + self.tensorboard_log(self.actions._member_names_[action]) trade_type = None if self.is_tradesignal(action): diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index baf7dde9f..22d3cae30 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -49,7 +49,7 @@ class Base5ActionRLEnv(BaseEnvironment): self._update_unrealized_total_profit() step_reward = self.calculate_reward(action) self.total_reward += step_reward - self.tensorboard_metrics[self.actions._member_names_[action]] += 1 + self.tensorboard_log(self.actions._member_names_[action]) trade_type = None if self.is_tradesignal(action): diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 0da13db7c..a5cee4def 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -2,7 +2,7 @@ import logging import random from abc import abstractmethod from enum import Enum -from typing import Optional, Type +from typing import Optional, Type, Union import gym import numpy as np @@ -132,14 +132,37 @@ class BaseEnvironment(gym.Env): self.np_random, seed = seeding.np_random(seed) return [seed] + def tensorboard_log(self, metric: str, inc: Union[int, float] = 1): + """ + Function builds the tensorboard_metrics dictionary + to be parsed by the TensorboardCallback. This + function is designed for tracking incremented objects, + events, actions inside the training environment. + For example, a user can call this to track the + frequency of occurence of an `is_valid` call in + their `calculate_reward()`: + + def calculate_reward(self, action: int) -> float: + if not self._is_valid(action): + self.tensorboard_log("is_valid") + return -2 + + :param metric: metric to be tracked and incremented + :param inc: value to increment `metric` by + """ + if metric not in self.tensorboard_metrics: + self.tensorboard_metrics[metric] = inc + else: + self.tensorboard_metrics[metric] += inc + + def reset_tensorboard_log(self): + self.tensorboard_metrics = {} + def reset(self): """ Reset is called at the beginning of every episode """ - # tensorboard_metrics is used for episodic reports and tensorboard logging - self.tensorboard_metrics: dict = {} - for action in self.actions: - self.tensorboard_metrics[action.name] = 0 + self.reset_tensorboard_log() self._done = False diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index e015b138a..38ea67e69 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -100,6 +100,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): """ # first, penalize if the action is not valid if not self._is_valid(action): + self.tensorboard_log("is_valid") return -2 pnl = self.get_unrealized_profit() From 78c40f0535617fc29047262719877e6b151075d6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 03:00:40 +0000 Subject: [PATCH 303/421] Bump scikit-learn from 1.1.3 to 1.2.0 Bumps [scikit-learn](https://github.com/scikit-learn/scikit-learn) from 1.1.3 to 1.2.0. - [Release notes](https://github.com/scikit-learn/scikit-learn/releases) - [Commits](https://github.com/scikit-learn/scikit-learn/compare/1.1.3...1.2.0) --- updated-dependencies: - dependency-name: scikit-learn dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements-freqai.txt | 2 +- requirements-hyperopt.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-freqai.txt b/requirements-freqai.txt index 66730e29f..5eafc497b 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -3,7 +3,7 @@ -r requirements-plot.txt # Required for freqai -scikit-learn==1.1.3 +scikit-learn==1.2.0 joblib==1.2.0 catboost==1.1.1; platform_machine != 'aarch64' lightgbm==3.3.3 diff --git a/requirements-hyperopt.txt b/requirements-hyperopt.txt index 4f59ad1fa..83ba62240 100644 --- a/requirements-hyperopt.txt +++ b/requirements-hyperopt.txt @@ -3,7 +3,7 @@ # Required for hyperopt scipy==1.9.3 -scikit-learn==1.1.3 +scikit-learn==1.2.0 scikit-optimize==0.9.0 filelock==3.8.0 progressbar2==4.2.0 From 434eec73341f9b38e34517b2a63d5125d94eeddb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 03:00:46 +0000 Subject: [PATCH 304/421] Bump blosc from 1.10.6 to 1.11.0 Bumps [blosc](https://github.com/blosc/python-blosc) from 1.10.6 to 1.11.0. - [Release notes](https://github.com/blosc/python-blosc/releases) - [Changelog](https://github.com/Blosc/python-blosc/blob/main/RELEASE_NOTES.rst) - [Commits](https://github.com/blosc/python-blosc/compare/v1.10.6...v1.11.0) --- updated-dependencies: - dependency-name: blosc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 313e0ff9c..4bd527c90 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,7 +20,7 @@ tabulate==0.9.0 pycoingecko==3.1.0 jinja2==3.1.2 tables==3.7.0 -blosc==1.10.6 +blosc==1.11.0 joblib==1.2.0 pyarrow==10.0.1; platform_machine != 'armv7l' From 63d3a9ced66ecccafd77ec57f20a48b0c427993a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 03:00:49 +0000 Subject: [PATCH 305/421] Bump prompt-toolkit from 3.0.33 to 3.0.36 Bumps [prompt-toolkit](https://github.com/prompt-toolkit/python-prompt-toolkit) from 3.0.33 to 3.0.36. - [Release notes](https://github.com/prompt-toolkit/python-prompt-toolkit/releases) - [Changelog](https://github.com/prompt-toolkit/python-prompt-toolkit/blob/master/CHANGELOG) - [Commits](https://github.com/prompt-toolkit/python-prompt-toolkit/compare/3.0.33...3.0.36) --- updated-dependencies: - dependency-name: prompt-toolkit dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 313e0ff9c..1bdcc82ba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -47,7 +47,7 @@ psutil==5.9.4 colorama==0.4.6 # Building config files interactively questionary==1.10.0 -prompt-toolkit==3.0.33 +prompt-toolkit==3.0.36 # Extensions to datetime library python-dateutil==2.8.2 From a35111e55e55046504a922a952f90e091f28d49d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 03:00:54 +0000 Subject: [PATCH 306/421] Bump nbconvert from 7.2.5 to 7.2.6 Bumps [nbconvert](https://github.com/jupyter/nbconvert) from 7.2.5 to 7.2.6. - [Release notes](https://github.com/jupyter/nbconvert/releases) - [Changelog](https://github.com/jupyter/nbconvert/blob/main/CHANGELOG.md) - [Commits](https://github.com/jupyter/nbconvert/compare/v7.2.5...v7.2.6) --- updated-dependencies: - dependency-name: nbconvert dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 463d2656a..e36419f6c 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -23,7 +23,7 @@ time-machine==2.8.2 httpx==0.23.1 # Convert jupyter notebooks to markdown documents -nbconvert==7.2.5 +nbconvert==7.2.6 # mypy types types-cachetools==5.2.1 From 56256480115c142b00587c6734b78092d4027c3c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 03:01:07 +0000 Subject: [PATCH 307/421] Bump pytest-asyncio from 0.20.2 to 0.20.3 Bumps [pytest-asyncio](https://github.com/pytest-dev/pytest-asyncio) from 0.20.2 to 0.20.3. - [Release notes](https://github.com/pytest-dev/pytest-asyncio/releases) - [Changelog](https://github.com/pytest-dev/pytest-asyncio/blob/master/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest-asyncio/compare/v0.20.2...v0.20.3) --- updated-dependencies: - dependency-name: pytest-asyncio dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 463d2656a..843337c9b 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -12,7 +12,7 @@ flake8-tidy-imports==4.8.0 mypy==0.991 pre-commit==2.20.0 pytest==7.2.0 -pytest-asyncio==0.20.2 +pytest-asyncio==0.20.3 pytest-cov==4.0.0 pytest-mock==3.10.0 pytest-random-order==1.1.0 From 5a7b493d3ec9ccd557071ad70963041b31417bf7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 03:01:11 +0000 Subject: [PATCH 308/421] Bump xgboost from 1.7.1 to 1.7.2 Bumps [xgboost](https://github.com/dmlc/xgboost) from 1.7.1 to 1.7.2. - [Release notes](https://github.com/dmlc/xgboost/releases) - [Changelog](https://github.com/dmlc/xgboost/blob/master/NEWS.md) - [Commits](https://github.com/dmlc/xgboost/compare/v1.7.1...v1.7.2) --- updated-dependencies: - dependency-name: xgboost dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-freqai.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-freqai.txt b/requirements-freqai.txt index 66730e29f..215a312bf 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -7,5 +7,5 @@ scikit-learn==1.1.3 joblib==1.2.0 catboost==1.1.1; platform_machine != 'aarch64' lightgbm==3.3.3 -xgboost==1.7.1 +xgboost==1.7.2 tensorboard==2.11.0 From 0344203372c84be49a9bd7d3d55c3b3456ce877a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 03:01:32 +0000 Subject: [PATCH 309/421] Bump sqlalchemy from 1.4.44 to 1.4.45 Bumps [sqlalchemy](https://github.com/sqlalchemy/sqlalchemy) from 1.4.44 to 1.4.45. - [Release notes](https://github.com/sqlalchemy/sqlalchemy/releases) - [Changelog](https://github.com/sqlalchemy/sqlalchemy/blob/main/CHANGES.rst) - [Commits](https://github.com/sqlalchemy/sqlalchemy/commits) --- updated-dependencies: - dependency-name: sqlalchemy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 313e0ff9c..b36225aa6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ ccxt==2.2.67 cryptography==38.0.1; platform_machine == 'armv7l' cryptography==38.0.4; platform_machine != 'armv7l' aiohttp==3.8.3 -SQLAlchemy==1.4.44 +SQLAlchemy==1.4.45 python-telegram-bot==13.14 arrow==1.2.3 cachetools==4.2.2 From 2647c35f485406e50c6f8539510e66c83e20cc5b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 03:02:53 +0000 Subject: [PATCH 310/421] Bump pypa/gh-action-pypi-publish from 1.6.1 to 1.6.4 Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.6.1 to 1.6.4. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/v1.6.1...v1.6.4) --- updated-dependencies: - dependency-name: pypa/gh-action-pypi-publish dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 273fb7ea0..b15451a64 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -410,7 +410,7 @@ jobs: python setup.py sdist bdist_wheel - name: Publish to PyPI (Test) - uses: pypa/gh-action-pypi-publish@v1.6.1 + uses: pypa/gh-action-pypi-publish@v1.6.4 if: (github.event_name == 'release') with: user: __token__ @@ -418,7 +418,7 @@ jobs: repository_url: https://test.pypi.org/legacy/ - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.6.1 + uses: pypa/gh-action-pypi-publish@v1.6.4 if: (github.event_name == 'release') with: user: __token__ From bc2b9981d3dbc782f20ed6730f8f712e02d61594 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 05:30:55 +0000 Subject: [PATCH 311/421] Bump python-telegram-bot from 13.14 to 13.15 Bumps [python-telegram-bot](https://github.com/python-telegram-bot/python-telegram-bot) from 13.14 to 13.15. - [Release notes](https://github.com/python-telegram-bot/python-telegram-bot/releases) - [Changelog](https://github.com/python-telegram-bot/python-telegram-bot/blob/v13.15/CHANGES.rst) - [Commits](https://github.com/python-telegram-bot/python-telegram-bot/compare/v13.14...v13.15) --- updated-dependencies: - dependency-name: python-telegram-bot dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b36225aa6..3b572cce6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ cryptography==38.0.1; platform_machine == 'armv7l' cryptography==38.0.4; platform_machine != 'armv7l' aiohttp==3.8.3 SQLAlchemy==1.4.45 -python-telegram-bot==13.14 +python-telegram-bot==13.15 arrow==1.2.3 cachetools==4.2.2 requests==2.28.1 From 915e0ac62f940e0cb20484d582e8527b13488d3a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 05:31:01 +0000 Subject: [PATCH 312/421] Bump ccxt from 2.2.67 to 2.2.92 Bumps [ccxt](https://github.com/ccxt/ccxt) from 2.2.67 to 2.2.92. - [Release notes](https://github.com/ccxt/ccxt/releases) - [Changelog](https://github.com/ccxt/ccxt/blob/master/exchanges.cfg) - [Commits](https://github.com/ccxt/ccxt/compare/2.2.67...2.2.92) --- updated-dependencies: - dependency-name: ccxt dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b36225aa6..fff69ffac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ numpy==1.23.5 pandas==1.5.2 pandas-ta==0.3.14b -ccxt==2.2.67 +ccxt==2.2.92 # Pin cryptography for now due to rust build errors with piwheels cryptography==38.0.1; platform_machine == 'armv7l' cryptography==38.0.4; platform_machine != 'armv7l' From de9784267a361ebb541fde4afaa23c8c6310a1fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 05:39:16 +0000 Subject: [PATCH 313/421] Bump filelock from 3.8.0 to 3.8.2 Bumps [filelock](https://github.com/tox-dev/py-filelock) from 3.8.0 to 3.8.2. - [Release notes](https://github.com/tox-dev/py-filelock/releases) - [Changelog](https://github.com/tox-dev/py-filelock/blob/main/docs/changelog.rst) - [Commits](https://github.com/tox-dev/py-filelock/compare/3.8.0...3.8.2) --- updated-dependencies: - dependency-name: filelock dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-hyperopt.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-hyperopt.txt b/requirements-hyperopt.txt index 83ba62240..8fc58812b 100644 --- a/requirements-hyperopt.txt +++ b/requirements-hyperopt.txt @@ -5,5 +5,5 @@ scipy==1.9.3 scikit-learn==1.2.0 scikit-optimize==0.9.0 -filelock==3.8.0 +filelock==3.8.2 progressbar2==4.2.0 From f6b90595fae9a24cd0f2a3a3e83d824bf597e129 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Mon, 12 Dec 2022 11:05:03 +0100 Subject: [PATCH 314/421] remove html. change var names. --- freqtrade/plugins/pairlist/RemotePairList.py | 53 +++++++++----------- 1 file changed, 24 insertions(+), 29 deletions(-) diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index 7367f713c..ef5463a56 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -6,7 +6,7 @@ Provides pair list fetched from a remote source import json import logging from pathlib import Path -from typing import Any, Dict, List +from typing import Any, Dict, List, Tuple import requests from cachetools import TTLCache @@ -60,7 +60,7 @@ class RemotePairList(IPairList): """ return f"{self.name} - {self._pairlistconfig['number_assets']} pairs from RemotePairlist." - def fetch_pairlist(self): + def fetch_pairlist(self) -> Tuple[List[str], float, str]: headers = { 'User-Agent': 'Freqtrade - Remotepairlist', } @@ -68,40 +68,35 @@ class RemotePairList(IPairList): info = "Pairlist" try: - response = requests.get(self._pairlist_url, headers=headers, - timeout=self._read_timeout) - content_type = response.headers.get('content-type') - time_elapsed = response.elapsed.total_seconds() + with requests.get(self._pairlist_url, headers=headers, + timeout=self._read_timeout) as response: + content_type = response.headers.get('content-type') + time_elapsed = response.elapsed.total_seconds() - rsplit = response.text.split("#") - - if "text/html" in str(content_type): - if len(rsplit) > 1: - plist = rsplit[0].strip() - plist = json.loads(plist) - info = rsplit[1].strip() + if "application/json" in str(content_type): + jsonparse = response.json() + pairlist = jsonparse['pairs'] + info = jsonparse.get('info', '') else: - plist = json.loads(rsplit[0]) - elif "application/json" in str(content_type): - jsonp = response.json() - plist = jsonp['pairs'] + raise OperationalException( + 'Remotepairlist is not of type JSON abort') - info = jsonp.get('info', '') - self._refresh_period = jsonp.get('refresh_period', self._refresh_period) + self._refresh_period = jsonparse.get('refresh_period', self._refresh_period) + self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) except requests.exceptions.RequestException: self.log_once(f'Was not able to fetch pairlist from:' f' {self._pairlist_url}', logger.info) if self._keep_pairlist_on_failure: - plist = str(self._last_pairlist) + pairlist = self._last_pairlist self.log_once('Keeping last fetched pairlist', logger.info) else: - plist = "" + pairlist = [] time_elapsed = 0 - return plist, time_elapsed, info + return pairlist, time_elapsed, info def gen_pairlist(self, tickers: Tickers) -> List[str]: """ @@ -111,7 +106,7 @@ class RemotePairList(IPairList): """ pairlist = self._pair_cache.get('pairlist') - time_elapsed = 0 + time_elapsed = 0.0 if pairlist: # Item found - no refresh necessary @@ -124,11 +119,11 @@ class RemotePairList(IPairList): if file_path.exists(): with open(filename) as json_file: # Load the JSON data into a dictionary - jsonp = json.load(json_file) - pairlist = jsonp['pairs'] - - info = jsonp.get('info', '') - self._refresh_period = jsonp.get('refresh_period', self._refresh_period) + jsonparse = json.load(json_file) + pairlist = jsonparse['pairs'] + info = jsonparse.get('info', '') + self._refresh_period = jsonparse.get('refresh_period', self._refresh_period) + self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) else: raise ValueError(f"{self._pairlist_url} does not exist.") @@ -139,7 +134,7 @@ class RemotePairList(IPairList): pairlist = self.filter_pairlist(pairlist, tickers) self._pair_cache['pairlist'] = pairlist.copy() - if time_elapsed: + if time_elapsed != 0.0: self.log_once(f'{info} Fetched in {time_elapsed} seconds.', logger.info) else: self.log_once(f'{info} Fetched Pairlist.', logger.info) From f9b7d35900b50cc786f8fee4943d5e301e3123b8 Mon Sep 17 00:00:00 2001 From: initrv Date: Mon, 12 Dec 2022 14:14:23 +0300 Subject: [PATCH 315/421] add increment param for tensorboard_log --- freqtrade/freqai/RL/BaseEnvironment.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 5a90d381e..5a5a950e7 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -139,7 +139,7 @@ class BaseEnvironment(gym.Env): self.np_random, seed = seeding.np_random(seed) return [seed] - def tensorboard_log(self, metric: str, inc: Union[int, float] = 1): + def tensorboard_log(self, metric: str, value: Union[int, float] = 1, inc: bool = True): """ Function builds the tensorboard_metrics dictionary to be parsed by the TensorboardCallback. This @@ -155,12 +155,13 @@ class BaseEnvironment(gym.Env): return -2 :param metric: metric to be tracked and incremented - :param inc: value to increment `metric` by + :param value: value to increment `metric` by + :param inc: sets whether the `value` is incremented or not """ - if metric not in self.tensorboard_metrics: - self.tensorboard_metrics[metric] = inc + if not inc or metric not in self.tensorboard_metrics: + self.tensorboard_metrics[metric] = value else: - self.tensorboard_metrics[metric] += inc + self.tensorboard_metrics[metric] += value def reset_tensorboard_log(self): self.tensorboard_metrics = {} From f940280d5e82d3574628af99f29d1fa0e2dd695a Mon Sep 17 00:00:00 2001 From: initrv Date: Mon, 12 Dec 2022 14:35:44 +0300 Subject: [PATCH 316/421] Fix tensorboard_log incrementing note --- docs/freqai-reinforcement-learning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index b831c90a0..f3d6c97f8 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -270,7 +270,7 @@ FreqAI also provides a built in episodic summary logger called `self.tensorboard ``` !!! Note - The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)` would add 0.23 to `float_metric`. + The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)` would add 0.23 to `float_metric`. In this case you can also disable incrementing using `inc=False` parameter. ### Choosing a base environment From 6f92c58e3317773a2ffedaf33e9f465d358ec528 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Mon, 12 Dec 2022 13:24:33 +0100 Subject: [PATCH 317/421] add docs, add bearer token. --- docs/includes/pairlists.md | 46 +++++++++++++++++++- freqtrade/plugins/pairlist/RemotePairList.py | 12 +++-- 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/docs/includes/pairlists.md b/docs/includes/pairlists.md index d61718c7d..c12683e75 100644 --- a/docs/includes/pairlists.md +++ b/docs/includes/pairlists.md @@ -2,7 +2,7 @@ Pairlist Handlers define the list of pairs (pairlist) that the bot should trade. They are configured in the `pairlists` section of the configuration settings. -In your configuration, you can use Static Pairlist (defined by the [`StaticPairList`](#static-pair-list) Pairlist Handler) and Dynamic Pairlist (defined by the [`VolumePairList`](#volume-pair-list) Pairlist Handler). +In your configuration, you can use Static Pairlist (defined by the [`StaticPairList`](#static-pair-list) Pairlist Handler), Dynamic Pairlist (defined by the [`VolumePairList`](#volume-pair-list) Pairlist Handler). Additionally, [`AgeFilter`](#agefilter), [`PrecisionFilter`](#precisionfilter), [`PriceFilter`](#pricefilter), [`ShuffleFilter`](#shufflefilter), [`SpreadFilter`](#spreadfilter) and [`VolatilityFilter`](#volatilityfilter) act as Pairlist Filters, removing certain pairs and/or moving their positions in the pairlist. @@ -23,6 +23,7 @@ You may also use something like `.*DOWN/BTC` or `.*UP/BTC` to exclude leveraged * [`StaticPairList`](#static-pair-list) (default, if not configured differently) * [`VolumePairList`](#volume-pair-list) * [`ProducerPairList`](#producerpairlist) +* [`RemotePairList`](#remotepairlist) * [`AgeFilter`](#agefilter) * [`OffsetFilter`](#offsetfilter) * [`PerformanceFilter`](#performancefilter) @@ -173,6 +174,49 @@ You can limit the length of the pairlist with the optional parameter `number_ass `ProducerPairList` can also be used multiple times in sequence, combining the pairs from multiple producers. Obviously in complex such configurations, the Producer may not provide data for all pairs, so the strategy must be fit for this. +#### RemotePairList + +It allows the user to fetch a pairlist from a remote server or a locally stored json file within the freqtrade directory, enabling dynamic updates and customization of the trading pairlist. + +The RemotePairList is defined in the pairlists section of the configuration settings. It uses the following configuration options: + +```json +"pairlists": [ + { + "method": "RemotePairList", + "pairlist_url": "https://example.com/pairlist", + "number_assets": 10, + "refresh_period": 1800, + "keep_pairlist_on_failure": true, + "read_timeout": 60, + "bearer_token": "my-bearer-token" + } +] +``` + +The `pairlist_url` option specifies the URL of the remote server where the pairlist is located, or the path to a local file (if file:/// is prepended). This allows the user to use either a remote server or a local file as the source for the pairlist. + +The user is responsible for providing a server or local file that returns a JSON object with the following structure: + +```json +{ + "pairs": ["XRP/USDT", "ETH/USDT", "LTC/USDT"], + "refresh_period": 1800, + "info": "Pairlist updated on 2022-12-12 at 12:12" +} +``` + +The `pairs` property should contain a list of strings with the trading pairs to be used by the bot. The `refresh_period` property is optional and specifies the number of seconds that the pairlist should be cached before being refreshed. The `info` property is also optional and can be used to provide any additional information about the pairlist. + +The optional `keep_pairlist_on_failure` specifies whether the previous received pairlist should be used if the remote server is not reachable or returns an error. The default value is true. + +The optional `read_timeout` specifies the maximum amount of time (in seconds) to wait for a response from the remote source, The default value is 60. + +The optional `bearer_token` will be included in the requests Authorization Header. + +!!! Note + In case of a server error the last received pairlist will be kept if `keep_pairlist_on_failure` is set to true, when set to false a empty pairlist is returned. + #### AgeFilter Removes pairs that have been listed on the exchange for less than `min_days_listed` days (defaults to `10`) or more than `max_days_listed` days (defaults `None` mean infinity). diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index ef5463a56..7ef038da7 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -11,6 +11,7 @@ from typing import Any, Dict, List, Tuple import requests from cachetools import TTLCache +from freqtrade import __version__ from freqtrade.constants import Config from freqtrade.exceptions import OperationalException from freqtrade.exchange.types import Tickers @@ -43,6 +44,7 @@ class RemotePairList(IPairList): self._pair_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period) self._pairlist_url = self._pairlistconfig.get('pairlist_url', '') self._read_timeout = self._pairlistconfig.get('read_timeout', 60) + self._bearer_token = self._pairlistconfig.get('bearer_token', '') self._last_pairlist: List[Any] = list() @property @@ -61,10 +63,14 @@ class RemotePairList(IPairList): return f"{self.name} - {self._pairlistconfig['number_assets']} pairs from RemotePairlist." def fetch_pairlist(self) -> Tuple[List[str], float, str]: + headers = { - 'User-Agent': 'Freqtrade - Remotepairlist', + 'User-Agent': 'Freqtrade/' + __version__ + ' Remotepairlist' } + if self._bearer_token: + headers['Authorization'] = f'Bearer {self._bearer_token}' + info = "Pairlist" try: @@ -76,7 +82,7 @@ class RemotePairList(IPairList): if "application/json" in str(content_type): jsonparse = response.json() pairlist = jsonparse['pairs'] - info = jsonparse.get('info', '') + info = jsonparse.get('info', '')[:1000] else: raise OperationalException( 'Remotepairlist is not of type JSON abort') @@ -121,7 +127,7 @@ class RemotePairList(IPairList): # Load the JSON data into a dictionary jsonparse = json.load(json_file) pairlist = jsonparse['pairs'] - info = jsonparse.get('info', '') + info = jsonparse.get('info', '')[:1000] self._refresh_period = jsonparse.get('refresh_period', self._refresh_period) self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) From 5c984bf5c23d8e14e7d79f7a12848225f93fe410 Mon Sep 17 00:00:00 2001 From: Emre Date: Mon, 12 Dec 2022 21:33:12 +0300 Subject: [PATCH 318/421] Temporarily downgrade blosc for arm64 --- requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5aba43edf..37f1d31e1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,7 +20,8 @@ tabulate==0.9.0 pycoingecko==3.1.0 jinja2==3.1.2 tables==3.7.0 -blosc==1.11.0 +blosc==1.10.6; platform_machine == 'arm64' +blosc==1.11.0; platform_machine != 'arm64' joblib==1.2.0 pyarrow==10.0.1; platform_machine != 'armv7l' From abc3badfb53cf3c5ba56258cd18c0c94517ab8e7 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 12 Dec 2022 20:01:54 +0100 Subject: [PATCH 319/421] Improve shutdown behavior closes #7882 --- freqtrade/freqtradebot.py | 9 ++++++++- tests/test_freqtradebot.py | 12 ++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index f9cb28c28..f6c4a52bb 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -155,6 +155,8 @@ class FreqtradeBot(LoggingMixin): self.cancel_all_open_orders() self.check_for_open_trades() + except Exception as e: + logger.warning(f'Exception during cleanup: {e.__class__.__name__} {e}') finally: self.strategy.ft_bot_cleanup() @@ -162,8 +164,13 @@ class FreqtradeBot(LoggingMixin): self.rpc.cleanup() if self.emc: self.emc.shutdown() - Trade.commit() self.exchange.close() + try: + Trade.commit() + except Exception: + # Exeptions here will be happening if the db disappeared. + # At which point we can no longer commit anyway. + pass def startup(self) -> None: """ diff --git a/tests/test_freqtradebot.py b/tests/test_freqtradebot.py index b71b5b387..faaefcafb 100644 --- a/tests/test_freqtradebot.py +++ b/tests/test_freqtradebot.py @@ -88,6 +88,18 @@ def test_bot_cleanup(mocker, default_conf_usdt, caplog) -> None: assert coo_mock.call_count == 1 +def test_bot_cleanup_db_errors(mocker, default_conf_usdt, caplog) -> None: + mocker.patch('freqtrade.freqtradebot.Trade.commit', + side_effect=OperationalException()) + mocker.patch('freqtrade.freqtradebot.FreqtradeBot.check_for_open_trades', + side_effect=OperationalException()) + freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) + freqtrade.emc = MagicMock() + freqtrade.emc.shutdown = MagicMock() + freqtrade.cleanup() + assert freqtrade.emc.shutdown.call_count == 1 + + @pytest.mark.parametrize('runmode', [ RunMode.DRY_RUN, RunMode.LIVE From 9660e445b89c15c732b276d380f3ef1a27618d46 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 25 Nov 2022 18:09:47 -0700 Subject: [PATCH 320/421] use new channel apis in emc, extend analyzed df to include list of dates for candles --- freqtrade/data/dataprovider.py | 78 ++++++++++++- freqtrade/rpc/api_server/ws_schemas.py | 2 +- freqtrade/rpc/external_message_consumer.py | 128 ++++++++++++++++----- freqtrade/rpc/rpc.py | 46 ++++++-- 4 files changed, 212 insertions(+), 42 deletions(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index 6b220c8b4..d6eb217a8 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -9,7 +9,7 @@ from collections import deque from datetime import datetime, timezone from typing import Any, Dict, List, Optional, Tuple -from pandas import DataFrame +from pandas import DataFrame, concat, date_range from freqtrade.configuration import TimeRange from freqtrade.constants import Config, ListPairsWithTimeframes, PairWithTimeframe @@ -120,7 +120,7 @@ class DataProvider: 'type': RPCMessageType.ANALYZED_DF, 'data': { 'key': pair_key, - 'df': dataframe, + 'df': dataframe.tail(1), 'la': datetime.now(timezone.utc) } } @@ -157,6 +157,80 @@ class DataProvider: self.__producer_pairs_df[producer_name][pair_key] = (dataframe, _last_analyzed) logger.debug(f"External DataFrame for {pair_key} from {producer_name} added.") + def _add_external_candle( + self, + pair: str, + dataframe: DataFrame, + last_analyzed: datetime, + timeframe: str, + candle_type: CandleType, + producer_name: str = "default" + ) -> Tuple[bool, Optional[List[str]]]: + """ + Append a candle to the existing external dataframe + + :param pair: pair to get the data for + :param timeframe: Timeframe to get data for + :param candle_type: Any of the enum CandleType (must match trading mode!) + :returns: A tuple with a boolean value signifying if the candle was correctly appended, + and a list of datetimes missing from the candle if it finds some. + Will return false if has no data for `producer_name`. + Will return false if no existing data for (pair, timeframe, candle_type). + Will return false if there's missing candles, and a list of datetimes of + the missing candles. + """ + pair_key = (pair, timeframe, candle_type) + + if producer_name not in self.__producer_pairs_df: + # We don't have data from this producer yet, + # so we can't append a candle + return (False, None) + + if pair_key not in self.__producer_pairs_df[producer_name]: + # We don't have data for this pair_key, + # so we can't append a candle + return (False, None) + + # CHECK FOR MISSING CANDLES + + existing_df, _ = self.__producer_pairs_df[producer_name][pair_key] + appended_df = self._append_candle_to_dataframe(existing_df, dataframe) + + # Everything is good, we appended + self.__producer_pairs_df[producer_name][pair_key] = appended_df, last_analyzed + return (True, None) + + def _append_candle_to_dataframe(self, existing: DataFrame, new: DataFrame) -> DataFrame: + """ + Append the `new` dataframe to the `existing` dataframe + + :param existing: The full dataframe you want appended to + :param new: The new dataframe containing the data you want appended + :returns: The dataframe with the new data in it + """ + if existing.iloc[-1]['date'] != new.iloc[-1]['date']: + existing = concat([existing, new]) + + # Only keep the last 1000 candles in memory + # TODO: Do this better + existing = existing[-1000:] if len(existing) > 1000 else existing + + return existing + + def _is_missing_candles(self, dataframe: DataFrame) -> bool: + """ + Check if the dataframe is missing any candles + + :param dataframe: The DataFrame to check + """ + logger.info(dataframe.index) + return len( + date_range( + dataframe.index.min(), + dataframe.index.max() + ).difference(dataframe.index) + ) > 0 + def get_producer_df( self, pair: str, diff --git a/freqtrade/rpc/api_server/ws_schemas.py b/freqtrade/rpc/api_server/ws_schemas.py index 877232213..292672b60 100644 --- a/freqtrade/rpc/api_server/ws_schemas.py +++ b/freqtrade/rpc/api_server/ws_schemas.py @@ -47,7 +47,7 @@ class WSWhitelistRequest(WSRequestSchema): class WSAnalyzedDFRequest(WSRequestSchema): type: RPCRequestType = RPCRequestType.ANALYZED_DF - data: Dict[str, Any] = {"limit": 1500} + data: Dict[str, Any] = {"limit": 1500, "pair": None} # ------------------------------ MESSAGE SCHEMAS ---------------------------- diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index 6078efd07..24731ef4f 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -8,7 +8,7 @@ import asyncio import logging import socket from threading import Thread -from typing import TYPE_CHECKING, Any, Callable, Dict, List, TypedDict +from typing import TYPE_CHECKING, Any, Callable, Dict, List, TypedDict, Union import websockets from pydantic import ValidationError @@ -16,7 +16,8 @@ from pydantic import ValidationError from freqtrade.data.dataprovider import DataProvider from freqtrade.enums import RPCMessageType from freqtrade.misc import remove_entry_exit_signals -from freqtrade.rpc.api_server.ws import WebSocketChannel +from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel +from freqtrade.rpc.api_server.ws.message_stream import MessageStream from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSAnalyzedDFRequest, WSMessageSchema, WSRequestSchema, WSSubscribeRequest, WSWhitelistMessage, @@ -38,6 +39,14 @@ class Producer(TypedDict): logger = logging.getLogger(__name__) +def schema_to_dict(schema: Union[WSMessageSchema, WSRequestSchema]): + return schema.dict(exclude_none=True) + + +# def parse_message(message: Dict[str, Any], message_schema: Type[WSMessageSchema]): +# return message_schema.parse_obj(message) + + class ExternalMessageConsumer: """ The main controller class for consuming external messages from @@ -92,6 +101,8 @@ class ExternalMessageConsumer: RPCMessageType.ANALYZED_DF: self._consume_analyzed_df_message, } + self._channel_streams: Dict[str, MessageStream] = {} + self.start() def start(self): @@ -118,6 +129,8 @@ class ExternalMessageConsumer: logger.info("Stopping ExternalMessageConsumer") self._running = False + self._channel_streams = {} + if self._sub_tasks: # Cancel sub tasks for task in self._sub_tasks: @@ -175,7 +188,6 @@ class ExternalMessageConsumer: :param producer: Dictionary containing producer info :param lock: An asyncio Lock """ - channel = None while self._running: try: host, port = producer['host'], producer['port'] @@ -190,19 +202,17 @@ class ExternalMessageConsumer: max_size=self.message_size_limit, ping_interval=None ) as ws: - channel = WebSocketChannel(ws, channel_id=name) + async with create_channel(ws, channel_id=name) as channel: - logger.info(f"Producer connection success - {channel}") + # Create the message stream for this channel + self._channel_streams[name] = MessageStream() - # Now request the initial data from this Producer - for request in self._initial_requests: - await channel.send( - request.dict(exclude_none=True) + # Run the channel tasks while connected + await channel.run_channel_tasks( + self._receive_messages(channel, producer, lock), + self._send_requests(channel, self._channel_streams[name]) ) - # Now receive data, if none is within the time limit, ping - await self._receive_messages(channel, producer, lock) - except (websockets.exceptions.InvalidURI, ValueError) as e: logger.error(f"{ws_url} is an invalid WebSocket URL - {e}") break @@ -214,26 +224,33 @@ class ExternalMessageConsumer: websockets.exceptions.InvalidMessage ) as e: logger.error(f"Connection Refused - {e} retrying in {self.sleep_time}s") - await asyncio.sleep(self.sleep_time) - continue except ( websockets.exceptions.ConnectionClosedError, websockets.exceptions.ConnectionClosedOK ): # Just keep trying to connect again indefinitely - await asyncio.sleep(self.sleep_time) - continue + pass except Exception as e: # An unforseen error has occurred, log and continue logger.error("Unexpected error has occurred:") logger.exception(e) - continue finally: - if channel: - await channel.close() + await asyncio.sleep(self.sleep_time) + continue + + async def _send_requests(self, channel: WebSocketChannel, channel_stream: MessageStream): + # Send the initial requests + for init_request in self._initial_requests: + await channel.send(schema_to_dict(init_request)) + + # Now send any subsequent requests published to + # this channel's stream + async for request in channel_stream: + logger.info(f"Sending request to channel - {channel} - {request}") + await channel.send(request) async def _receive_messages( self, @@ -270,20 +287,39 @@ class ExternalMessageConsumer: latency = (await asyncio.wait_for(pong, timeout=self.ping_timeout) * 1000) logger.info(f"Connection to {channel} still alive, latency: {latency}ms") - continue + except (websockets.exceptions.ConnectionClosed): # Just eat the error and continue reconnecting logger.warning(f"Disconnection in {channel} - retrying in {self.sleep_time}s") - await asyncio.sleep(self.sleep_time) - break + except Exception as e: + # Just eat the error and continue reconnecting logger.warning(f"Ping error {channel} - {e} - retrying in {self.sleep_time}s") logger.debug(e, exc_info=e) - await asyncio.sleep(self.sleep_time) + finally: + await asyncio.sleep(self.sleep_time) break + def send_producer_request( + self, + producer_name: str, + request: Union[WSRequestSchema, Dict[str, Any]] + ): + """ + Publish a message to the producer's message stream to be + sent by the channel task. + + :param producer_name: The name of the producer to publish the message to + :param request: The request to send to the producer + """ + if isinstance(request, WSRequestSchema): + request = schema_to_dict(request) + + if channel_stream := self._channel_streams.get(producer_name): + channel_stream.publish(request) + def handle_producer_message(self, producer: Producer, message: Dict[str, Any]): """ Handles external messages from a Producer @@ -340,12 +376,44 @@ class ExternalMessageConsumer: if self._emc_config.get('remove_entry_exit_signals', False): df = remove_entry_exit_signals(df) - # Add the dataframe to the dataprovider - self._dp._add_external_df(pair, df, - last_analyzed=la, - timeframe=timeframe, - candle_type=candle_type, - producer_name=producer_name) + if len(df) >= 999: + # This is a full dataframe + # Add the dataframe to the dataprovider + self._dp._add_external_df( + pair, + df, + last_analyzed=la, + timeframe=timeframe, + candle_type=candle_type, + producer_name=producer_name + ) - logger.debug( + elif len(df) == 1: + # This is just a single candle + # Have dataprovider append it to + # the full datafame. If it can't, + # request the missing candles + if not self._dp._add_external_candle( + pair, + df, + last_analyzed=la, + timeframe=timeframe, + candle_type=candle_type, + producer_name=producer_name + ): + logger.info("Holes in data or no existing df, " + f"requesting data for {key} from `{producer_name}`") + + self.send_producer_request( + producer_name, + WSAnalyzedDFRequest( + data={ + "limit": 1000, + "pair": pair + } + ) + ) + return + + logger.info( f"Consumed message from `{producer_name}` of type `RPCMessageType.ANALYZED_DF`") diff --git a/freqtrade/rpc/rpc.py b/freqtrade/rpc/rpc.py index 334e18dc7..8b23d33e7 100644 --- a/freqtrade/rpc/rpc.py +++ b/freqtrade/rpc/rpc.py @@ -1058,23 +1058,46 @@ class RPC: return self._convert_dataframe_to_dict(self._freqtrade.config['strategy'], pair, timeframe, _data, last_analyzed) - def __rpc_analysed_dataframe_raw(self, pair: str, timeframe: str, - limit: Optional[int]) -> Tuple[DataFrame, datetime]: - """ Get the dataframe and last analyze from the dataprovider """ + def __rpc_analysed_dataframe_raw( + self, + pair: str, + timeframe: str, + limit: Optional[Union[int, List[str]]] = None + ) -> Tuple[DataFrame, datetime]: + """ + Get the dataframe and last analyze from the dataprovider + + :param pair: The pair to get + :param timeframe: The timeframe of data to get + :param limit: If an integer, limits the size of dataframe + If a list of string date times, only returns those candles + """ _data, last_analyzed = self._freqtrade.dataprovider.get_analyzed_dataframe( pair, timeframe) _data = _data.copy() - if limit: + if limit and isinstance(limit, int): _data = _data.iloc[-limit:] + elif limit and isinstance(limit, str): + _data = _data.iloc[_data['date'].isin(limit)] + return _data, last_analyzed def _ws_all_analysed_dataframes( self, pairlist: List[str], - limit: Optional[int] + limit: Optional[Union[int, List[str]]] = None ) -> Generator[Dict[str, Any], None, None]: - """ Get the analysed dataframes of each pair in the pairlist """ + """ + Get the analysed dataframes of each pair in the pairlist. + Limit size of dataframe if specified. + If candles, only return the candles specified. + + :param pairlist: A list of pairs to get + :param limit: If an integer, limits the size of dataframe + If a list of string date times, only returns those candles + :returns: A generator of dictionaries with the key, dataframe, and last analyzed timestamp + """ timeframe = self._freqtrade.config['timeframe'] candle_type = self._freqtrade.config.get('candle_type_def', CandleType.SPOT) @@ -1087,10 +1110,15 @@ class RPC: "la": last_analyzed } - def _ws_request_analyzed_df(self, limit: Optional[int]): + def _ws_request_analyzed_df( + self, + pair: Optional[str], + limit: Optional[Union[int, List[str]]] = None, + ): """ Historical Analyzed Dataframes for WebSocket """ - whitelist = self._freqtrade.active_pair_whitelist - return self._ws_all_analysed_dataframes(whitelist, limit) + pairlist = [pair] if pair else self._freqtrade.active_pair_whitelist + + return self._ws_all_analysed_dataframes(pairlist, limit) def _ws_request_whitelist(self): """ Whitelist data for WebSocket """ From 4cbb3341d7160e21a55b86738100a1f49bfc7a6b Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 25 Nov 2022 19:04:51 -0700 Subject: [PATCH 321/421] change how missing candles will be handled --- freqtrade/data/dataprovider.py | 35 +++++----------------- freqtrade/rpc/external_message_consumer.py | 4 +-- freqtrade/rpc/rpc.py | 13 ++++---- 3 files changed, 15 insertions(+), 37 deletions(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index d6eb217a8..07999fc90 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -7,9 +7,9 @@ Common Interface for bot and strategy to access data. import logging from collections import deque from datetime import datetime, timezone -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple, Union -from pandas import DataFrame, concat, date_range +from pandas import DataFrame, concat from freqtrade.configuration import TimeRange from freqtrade.constants import Config, ListPairsWithTimeframes, PairWithTimeframe @@ -165,40 +165,36 @@ class DataProvider: timeframe: str, candle_type: CandleType, producer_name: str = "default" - ) -> Tuple[bool, Optional[List[str]]]: + ) -> Union[bool, int]: """ Append a candle to the existing external dataframe :param pair: pair to get the data for :param timeframe: Timeframe to get data for :param candle_type: Any of the enum CandleType (must match trading mode!) - :returns: A tuple with a boolean value signifying if the candle was correctly appended, - and a list of datetimes missing from the candle if it finds some. - Will return false if has no data for `producer_name`. - Will return false if no existing data for (pair, timeframe, candle_type). - Will return false if there's missing candles, and a list of datetimes of - the missing candles. + :returns: False if the candle could not be appended, or the int number of missing candles. """ pair_key = (pair, timeframe, candle_type) if producer_name not in self.__producer_pairs_df: # We don't have data from this producer yet, # so we can't append a candle - return (False, None) + return False if pair_key not in self.__producer_pairs_df[producer_name]: # We don't have data for this pair_key, # so we can't append a candle - return (False, None) + return False # CHECK FOR MISSING CANDLES + # return int existing_df, _ = self.__producer_pairs_df[producer_name][pair_key] appended_df = self._append_candle_to_dataframe(existing_df, dataframe) # Everything is good, we appended self.__producer_pairs_df[producer_name][pair_key] = appended_df, last_analyzed - return (True, None) + return True def _append_candle_to_dataframe(self, existing: DataFrame, new: DataFrame) -> DataFrame: """ @@ -212,25 +208,10 @@ class DataProvider: existing = concat([existing, new]) # Only keep the last 1000 candles in memory - # TODO: Do this better existing = existing[-1000:] if len(existing) > 1000 else existing return existing - def _is_missing_candles(self, dataframe: DataFrame) -> bool: - """ - Check if the dataframe is missing any candles - - :param dataframe: The DataFrame to check - """ - logger.info(dataframe.index) - return len( - date_range( - dataframe.index.min(), - dataframe.index.max() - ).difference(dataframe.index) - ) > 0 - def get_producer_df( self, pair: str, diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index 24731ef4f..231642142 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -388,8 +388,8 @@ class ExternalMessageConsumer: producer_name=producer_name ) - elif len(df) == 1: - # This is just a single candle + elif len(df) < 999: + # This is n single candles # Have dataprovider append it to # the full datafame. If it can't, # request the missing candles diff --git a/freqtrade/rpc/rpc.py b/freqtrade/rpc/rpc.py index 8b23d33e7..2452a61b8 100644 --- a/freqtrade/rpc/rpc.py +++ b/freqtrade/rpc/rpc.py @@ -1062,31 +1062,28 @@ class RPC: self, pair: str, timeframe: str, - limit: Optional[Union[int, List[str]]] = None + limit: Optional[int] = None ) -> Tuple[DataFrame, datetime]: """ Get the dataframe and last analyze from the dataprovider :param pair: The pair to get :param timeframe: The timeframe of data to get - :param limit: If an integer, limits the size of dataframe - If a list of string date times, only returns those candles + :param limit: The amount of candles in the dataframe """ _data, last_analyzed = self._freqtrade.dataprovider.get_analyzed_dataframe( pair, timeframe) _data = _data.copy() - if limit and isinstance(limit, int): + if limit: _data = _data.iloc[-limit:] - elif limit and isinstance(limit, str): - _data = _data.iloc[_data['date'].isin(limit)] return _data, last_analyzed def _ws_all_analysed_dataframes( self, pairlist: List[str], - limit: Optional[Union[int, List[str]]] = None + limit: Optional[int] = None ) -> Generator[Dict[str, Any], None, None]: """ Get the analysed dataframes of each pair in the pairlist. @@ -1113,7 +1110,7 @@ class RPC: def _ws_request_analyzed_df( self, pair: Optional[str], - limit: Optional[Union[int, List[str]]] = None, + limit: Optional[int] = None, ): """ Historical Analyzed Dataframes for WebSocket """ pairlist = [pair] if pair else self._freqtrade.active_pair_whitelist From 36a00e8de08b47900c5dbaea70c035e51f036571 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sun, 27 Nov 2022 12:17:26 -0700 Subject: [PATCH 322/421] update add_external_candle returns --- freqtrade/data/dataprovider.py | 12 ++++++------ freqtrade/rpc/external_message_consumer.py | 8 +++++--- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index 07999fc90..19b5df652 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -7,7 +7,7 @@ Common Interface for bot and strategy to access data. import logging from collections import deque from datetime import datetime, timezone -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple from pandas import DataFrame, concat @@ -165,7 +165,7 @@ class DataProvider: timeframe: str, candle_type: CandleType, producer_name: str = "default" - ) -> Union[bool, int]: + ) -> Tuple[bool, int]: """ Append a candle to the existing external dataframe @@ -179,22 +179,22 @@ class DataProvider: if producer_name not in self.__producer_pairs_df: # We don't have data from this producer yet, # so we can't append a candle - return False + return (False, 0) if pair_key not in self.__producer_pairs_df[producer_name]: # We don't have data for this pair_key, # so we can't append a candle - return False + return (False, 0) # CHECK FOR MISSING CANDLES - # return int + # return (False, int > 0) existing_df, _ = self.__producer_pairs_df[producer_name][pair_key] appended_df = self._append_candle_to_dataframe(existing_df, dataframe) # Everything is good, we appended self.__producer_pairs_df[producer_name][pair_key] = appended_df, last_analyzed - return True + return (True, 0) def _append_candle_to_dataframe(self, existing: DataFrame, new: DataFrame) -> DataFrame: """ diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index 231642142..17c4e1aa0 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -393,14 +393,16 @@ class ExternalMessageConsumer: # Have dataprovider append it to # the full datafame. If it can't, # request the missing candles - if not self._dp._add_external_candle( + did_append, n_missing = self._dp._add_external_candle( pair, df, last_analyzed=la, timeframe=timeframe, candle_type=candle_type, producer_name=producer_name - ): + ) + + if not did_append: logger.info("Holes in data or no existing df, " f"requesting data for {key} from `{producer_name}`") @@ -408,7 +410,7 @@ class ExternalMessageConsumer: producer_name, WSAnalyzedDFRequest( data={ - "limit": 1000, + "limit": n_missing if n_missing > 0 else 1000, "pair": pair } ) From fce1e9d6d0636c42d1ce19fdc6ebc8acce75e147 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sun, 27 Nov 2022 12:18:41 -0700 Subject: [PATCH 323/421] update analyzed df request to allow specifying a single pair --- freqtrade/rpc/api_server/api_ws.py | 3 ++- freqtrade/rpc/rpc.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index e183cd7e7..18714f15f 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -91,9 +91,10 @@ async def _process_consumer_request( elif type == RPCRequestType.ANALYZED_DF: # Limit the amount of candles per dataframe to 'limit' or 1500 limit = min(data.get('limit', 1500), 1500) if data else None + pair = data.get('pair', None) if data else None # For every pair in the generator, send a separate message - for message in rpc._ws_request_analyzed_df(limit): + for message in rpc._ws_request_analyzed_df(limit, pair): # Format response response = WSAnalyzedDFMessage(data=message) await channel.send(response.dict(exclude_none=True)) diff --git a/freqtrade/rpc/rpc.py b/freqtrade/rpc/rpc.py index 2452a61b8..4ebedd6c4 100644 --- a/freqtrade/rpc/rpc.py +++ b/freqtrade/rpc/rpc.py @@ -1109,8 +1109,8 @@ class RPC: def _ws_request_analyzed_df( self, - pair: Optional[str], limit: Optional[int] = None, + pair: Optional[str] = None ): """ Historical Analyzed Dataframes for WebSocket """ pairlist = [pair] if pair else self._freqtrade.active_pair_whitelist From d2c8487ecf01b90fab34dd55cc8d76bdd9bf5c2d Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sun, 27 Nov 2022 13:11:43 -0700 Subject: [PATCH 324/421] update add_external_candle, fix breaking on ping error, handle empty dataframes --- freqtrade/data/dataprovider.py | 14 +++++++++----- freqtrade/rpc/external_message_consumer.py | 20 ++++++++++++++------ 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index 19b5df652..42fe2f603 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -179,15 +179,19 @@ class DataProvider: if producer_name not in self.__producer_pairs_df: # We don't have data from this producer yet, # so we can't append a candle - return (False, 0) + return (False, 999) if pair_key not in self.__producer_pairs_df[producer_name]: # We don't have data for this pair_key, # so we can't append a candle - return (False, 0) + return (False, 999) # CHECK FOR MISSING CANDLES - # return (False, int > 0) + # Calculate difference between last candle in local dataframe + # and first candle in incoming dataframe. Take difference and divide + # by timeframe to find out how many candles we still need. If 1 + # then the incoming candle is the right candle. If more than 1, + # return (False, missing candles - 1) existing_df, _ = self.__producer_pairs_df[producer_name][pair_key] appended_df = self._append_candle_to_dataframe(existing_df, dataframe) @@ -207,8 +211,8 @@ class DataProvider: if existing.iloc[-1]['date'] != new.iloc[-1]['date']: existing = concat([existing, new]) - # Only keep the last 1000 candles in memory - existing = existing[-1000:] if len(existing) > 1000 else existing + # Only keep the last 1500 candles in memory + existing = existing[-1500:] if len(existing) > 1000 else existing return existing diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index 17c4e1aa0..13c2e5fb3 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -248,7 +248,7 @@ class ExternalMessageConsumer: # Now send any subsequent requests published to # this channel's stream - async for request in channel_stream: + async for request, _ in channel_stream: logger.info(f"Sending request to channel - {channel} - {request}") await channel.send(request) @@ -292,13 +292,13 @@ class ExternalMessageConsumer: except (websockets.exceptions.ConnectionClosed): # Just eat the error and continue reconnecting logger.warning(f"Disconnection in {channel} - retrying in {self.sleep_time}s") + await asyncio.sleep(self.sleep_time) + break except Exception as e: # Just eat the error and continue reconnecting logger.warning(f"Ping error {channel} - {e} - retrying in {self.sleep_time}s") logger.debug(e, exc_info=e) - - finally: await asyncio.sleep(self.sleep_time) break @@ -372,10 +372,16 @@ class ExternalMessageConsumer: pair, timeframe, candle_type = key + if df.empty: + logger.info(f"Received Empty Dataframe for {key}") + return + # If set, remove the Entry and Exit signals from the Producer if self._emc_config.get('remove_entry_exit_signals', False): df = remove_entry_exit_signals(df) + logger.info(f"Received {len(df)} candle(s) for {key}") + if len(df) >= 999: # This is a full dataframe # Add the dataframe to the dataprovider @@ -404,13 +410,14 @@ class ExternalMessageConsumer: if not did_append: logger.info("Holes in data or no existing df, " - f"requesting data for {key} from `{producer_name}`") + f"requesting {n_missing} candles " + f"for {key} from `{producer_name}`") self.send_producer_request( producer_name, WSAnalyzedDFRequest( data={ - "limit": n_missing if n_missing > 0 else 1000, + "limit": n_missing, "pair": pair } ) @@ -418,4 +425,5 @@ class ExternalMessageConsumer: return logger.info( - f"Consumed message from `{producer_name}` of type `RPCMessageType.ANALYZED_DF`") + f"Consumed message from `{producer_name}` " + f"of type `RPCMessageType.ANALYZED_DF` for {key}") From 89338fa677185b70528d2f74609ced74f84f7274 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sun, 27 Nov 2022 13:14:49 -0700 Subject: [PATCH 325/421] allow specifying channel send throttle --- freqtrade/rpc/api_server/ws/channel.py | 7 +++++-- freqtrade/rpc/external_message_consumer.py | 6 +++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index c50aff8be..3c0a833d8 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -27,7 +27,8 @@ class WebSocketChannel: self, websocket: WebSocketType, channel_id: Optional[str] = None, - serializer_cls: Type[WebSocketSerializer] = HybridJSONWebSocketSerializer + serializer_cls: Type[WebSocketSerializer] = HybridJSONWebSocketSerializer, + send_throttle: float = 0.01 ): self.channel_id = channel_id if channel_id else uuid4().hex[:8] self._websocket = WebSocketProxy(websocket) @@ -41,6 +42,7 @@ class WebSocketChannel: self._send_times: Deque[float] = deque([], maxlen=10) # High limit defaults to 3 to start self._send_high_limit = 3 + self._send_throttle = send_throttle # The subscribed message types self._subscriptions: List[str] = [] @@ -106,7 +108,8 @@ class WebSocketChannel: # Explicitly give control back to event loop as # websockets.send does not - await asyncio.sleep(0.01) + # Also throttles how fast we send + await asyncio.sleep(self._send_throttle) async def recv(self): """ diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index 13c2e5fb3..aed5d9fb9 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -202,7 +202,11 @@ class ExternalMessageConsumer: max_size=self.message_size_limit, ping_interval=None ) as ws: - async with create_channel(ws, channel_id=name) as channel: + async with create_channel( + ws, + channel_id=name, + send_throttle=0.5 + ) as channel: # Create the message stream for this channel self._channel_streams[name] = MessageStream() From c050eb8b8b372c280b43ea0c2eecbe683ef083d9 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Mon, 28 Nov 2022 11:02:03 -0700 Subject: [PATCH 326/421] add candle difference calculation to dataprovider --- freqtrade/data/dataprovider.py | 40 +++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index 42fe2f603..e34a428eb 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -9,7 +9,7 @@ from collections import deque from datetime import datetime, timezone from typing import Any, Dict, List, Optional, Tuple -from pandas import DataFrame, concat +from pandas import DataFrame, concat, to_timedelta from freqtrade.configuration import TimeRange from freqtrade.constants import Config, ListPairsWithTimeframes, PairWithTimeframe @@ -176,24 +176,30 @@ class DataProvider: """ pair_key = (pair, timeframe, candle_type) - if producer_name not in self.__producer_pairs_df: + if (producer_name not in self.__producer_pairs_df) \ + or (pair_key not in self.__producer_pairs_df[producer_name]): # We don't have data from this producer yet, - # so we can't append a candle - return (False, 999) - - if pair_key not in self.__producer_pairs_df[producer_name]: - # We don't have data for this pair_key, - # so we can't append a candle - return (False, 999) - - # CHECK FOR MISSING CANDLES - # Calculate difference between last candle in local dataframe - # and first candle in incoming dataframe. Take difference and divide - # by timeframe to find out how many candles we still need. If 1 - # then the incoming candle is the right candle. If more than 1, - # return (False, missing candles - 1) + # sor we don't have data for this pair_key + # return False and 1000 for the full df + return (False, 1000) existing_df, _ = self.__producer_pairs_df[producer_name][pair_key] + + # CHECK FOR MISSING CANDLES + timeframe_delta = to_timedelta(timeframe) # Convert the timeframe to a timedelta for pandas + local_last = existing_df.iloc[-1]['date'] # We want the last date from our copy of data + incoming_first = dataframe.iloc[0]['date'] # We want the first date from the incoming data + + candle_difference = (incoming_first - local_last) / timeframe_delta + + # If the difference divided by the timeframe is 1, then this + # is the candle we want and the incoming data isn't missing any. + # If the candle_difference is more than 1, that means + # we missed some candles between our data and the incoming + # so return False and candle_difference. + if candle_difference > 1: + return (False, candle_difference) + appended_df = self._append_candle_to_dataframe(existing_df, dataframe) # Everything is good, we appended @@ -212,7 +218,7 @@ class DataProvider: existing = concat([existing, new]) # Only keep the last 1500 candles in memory - existing = existing[-1500:] if len(existing) > 1000 else existing + existing = existing[-1500:] if len(existing) > 1500 else existing return existing From ccd1aa70a2f5b1ecfcc202e20250b2d79a11a6cc Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 29 Nov 2022 11:21:36 -0700 Subject: [PATCH 327/421] change log calls to debug, handle already received candle --- freqtrade/data/dataprovider.py | 9 ++++++++- freqtrade/rpc/external_message_consumer.py | 14 +++++++------- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index e34a428eb..657d96df1 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -179,7 +179,7 @@ class DataProvider: if (producer_name not in self.__producer_pairs_df) \ or (pair_key not in self.__producer_pairs_df[producer_name]): # We don't have data from this producer yet, - # sor we don't have data for this pair_key + # or we don't have data for this pair_key # return False and 1000 for the full df return (False, 1000) @@ -190,6 +190,13 @@ class DataProvider: local_last = existing_df.iloc[-1]['date'] # We want the last date from our copy of data incoming_first = dataframe.iloc[0]['date'] # We want the first date from the incoming data + # We have received this candle before, update our copy + # and return True, 0 + if local_last == incoming_first: + existing_df.iloc[-1] = dataframe.iloc[0] + existing_df = existing_df.reset_index(drop=True) + return (True, 0) + candle_difference = (incoming_first - local_last) / timeframe_delta # If the difference divided by the timeframe is 1, then this diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index aed5d9fb9..d028bc006 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -253,7 +253,7 @@ class ExternalMessageConsumer: # Now send any subsequent requests published to # this channel's stream async for request, _ in channel_stream: - logger.info(f"Sending request to channel - {channel} - {request}") + logger.debug(f"Sending request to channel - {channel} - {request}") await channel.send(request) async def _receive_messages( @@ -377,14 +377,14 @@ class ExternalMessageConsumer: pair, timeframe, candle_type = key if df.empty: - logger.info(f"Received Empty Dataframe for {key}") + logger.debug(f"Received Empty Dataframe for {key}") return # If set, remove the Entry and Exit signals from the Producer if self._emc_config.get('remove_entry_exit_signals', False): df = remove_entry_exit_signals(df) - logger.info(f"Received {len(df)} candle(s) for {key}") + logger.debug(f"Received {len(df)} candle(s) for {key}") if len(df) >= 999: # This is a full dataframe @@ -413,9 +413,9 @@ class ExternalMessageConsumer: ) if not did_append: - logger.info("Holes in data or no existing df, " - f"requesting {n_missing} candles " - f"for {key} from `{producer_name}`") + logger.debug("Holes in data or no existing df, " + f"requesting {n_missing} candles " + f"for {key} from `{producer_name}`") self.send_producer_request( producer_name, @@ -428,6 +428,6 @@ class ExternalMessageConsumer: ) return - logger.info( + logger.debug( f"Consumed message from `{producer_name}` " f"of type `RPCMessageType.ANALYZED_DF` for {key}") From d376bf4052f56dcceedf2d30121a1419a7369702 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 29 Nov 2022 12:22:06 -0700 Subject: [PATCH 328/421] fix indefinite reconnecting --- freqtrade/rpc/external_message_consumer.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index d028bc006..05effb783 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -293,18 +293,11 @@ class ExternalMessageConsumer: logger.info(f"Connection to {channel} still alive, latency: {latency}ms") continue - except (websockets.exceptions.ConnectionClosed): - # Just eat the error and continue reconnecting - logger.warning(f"Disconnection in {channel} - retrying in {self.sleep_time}s") - await asyncio.sleep(self.sleep_time) - break - except Exception as e: # Just eat the error and continue reconnecting logger.warning(f"Ping error {channel} - {e} - retrying in {self.sleep_time}s") logger.debug(e, exc_info=e) - await asyncio.sleep(self.sleep_time) - break + raise def send_producer_request( self, From 0d5b2eed942922bffae0676d7870f2487f18ccec Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 2 Dec 2022 12:07:48 -0700 Subject: [PATCH 329/421] fix same candle handling --- freqtrade/data/dataprovider.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index 657d96df1..78d73b07d 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -194,7 +194,9 @@ class DataProvider: # and return True, 0 if local_last == incoming_first: existing_df.iloc[-1] = dataframe.iloc[0] - existing_df = existing_df.reset_index(drop=True) + existing_data = (existing_df.reset_index(drop=True), _) + + self.__producer_pairs_df[producer_name][pair_key] = existing_data return (True, 0) candle_difference = (incoming_first - local_last) / timeframe_delta From 49f6f40662d46bdfc2ca5006c96577df3db593b1 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 2 Dec 2022 12:08:42 -0700 Subject: [PATCH 330/421] remove comment --- freqtrade/rpc/external_message_consumer.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index 05effb783..15312ba10 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -43,10 +43,6 @@ def schema_to_dict(schema: Union[WSMessageSchema, WSRequestSchema]): return schema.dict(exclude_none=True) -# def parse_message(message: Dict[str, Any], message_schema: Type[WSMessageSchema]): -# return message_schema.parse_obj(message) - - class ExternalMessageConsumer: """ The main controller class for consuming external messages from From f1ebaf4730606498d928f3f02ab5fcddfe87310d Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 2 Dec 2022 12:28:27 -0700 Subject: [PATCH 331/421] fix tests --- freqtrade/rpc/external_message_consumer.py | 7 ++++--- tests/rpc/test_rpc_emc.py | 14 ++++++++------ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index 15312ba10..743698b24 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -224,20 +224,21 @@ class ExternalMessageConsumer: websockets.exceptions.InvalidMessage ) as e: logger.error(f"Connection Refused - {e} retrying in {self.sleep_time}s") + await asyncio.sleep(self.sleep_time) + continue except ( websockets.exceptions.ConnectionClosedError, websockets.exceptions.ConnectionClosedOK ): # Just keep trying to connect again indefinitely - pass + await asyncio.sleep(self.sleep_time) + continue except Exception as e: # An unforseen error has occurred, log and continue logger.error("Unexpected error has occurred:") logger.exception(e) - - finally: await asyncio.sleep(self.sleep_time) continue diff --git a/tests/rpc/test_rpc_emc.py b/tests/rpc/test_rpc_emc.py index 93ae829d5..155239e94 100644 --- a/tests/rpc/test_rpc_emc.py +++ b/tests/rpc/test_rpc_emc.py @@ -94,7 +94,7 @@ def test_emc_handle_producer_message(patched_emc, caplog, ohlcv_history): assert log_has( f"Consumed message from `{producer_name}` of type `RPCMessageType.WHITELIST`", caplog) - # Test handle analyzed_df message + # Test handle analyzed_df single candle message df_message = { "type": "analyzed_df", "data": { @@ -106,8 +106,7 @@ def test_emc_handle_producer_message(patched_emc, caplog, ohlcv_history): patched_emc.handle_producer_message(test_producer, df_message) assert log_has(f"Received message of type `analyzed_df` from `{producer_name}`", caplog) - assert log_has( - f"Consumed message from `{producer_name}` of type `RPCMessageType.ANALYZED_DF`", caplog) + assert log_has_re(r"Holes in data or no existing df,.+", caplog) # Test unhandled message unhandled_message = {"type": "status", "data": "RUNNING"} @@ -183,7 +182,7 @@ async def test_emc_create_connection_success(default_conf, caplog, mocker): async with websockets.serve(eat, _TEST_WS_HOST, _TEST_WS_PORT): await emc._create_connection(test_producer, lock) - assert log_has_re(r"Producer connection success.+", caplog) + assert log_has_re(r"Connected to channel.+", caplog) finally: emc.shutdown() @@ -212,7 +211,8 @@ async def test_emc_create_connection_invalid_url(default_conf, caplog, mocker, h dp = DataProvider(default_conf, None, None, None) # Handle start explicitly to avoid messing with threading in tests - mocker.patch("freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start",) + mocker.patch("freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start") + mocker.patch("freqtrade.rpc.api_server.ws.channel.create_channel") emc = ExternalMessageConsumer(default_conf, dp) try: @@ -390,7 +390,9 @@ async def test_emc_receive_messages_timeout(default_conf, caplog, mocker): try: change_running(emc) loop.call_soon(functools.partial(change_running, emc=emc)) - await emc._receive_messages(TestChannel(), test_producer, lock) + + with pytest.raises(asyncio.TimeoutError): + await emc._receive_messages(TestChannel(), test_producer, lock) assert log_has_re(r"Ping error.+", caplog) finally: From 0602479f7d328094401ebe454fd4d33962b09a19 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Mon, 5 Dec 2022 13:11:07 -0700 Subject: [PATCH 332/421] minor changes, update candle appending to support overlaps --- freqtrade/data/dataprovider.py | 31 +++++++++++++--------- freqtrade/rpc/external_message_consumer.py | 22 ++++++++++----- freqtrade/rpc/rpc.py | 4 +-- 3 files changed, 35 insertions(+), 22 deletions(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index 78d73b07d..b889da17f 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -167,7 +167,8 @@ class DataProvider: producer_name: str = "default" ) -> Tuple[bool, int]: """ - Append a candle to the existing external dataframe + Append a candle to the existing external dataframe. The incoming dataframe + must have at least 1 candle. :param pair: pair to get the data for :param timeframe: Timeframe to get data for @@ -176,29 +177,32 @@ class DataProvider: """ pair_key = (pair, timeframe, candle_type) - if (producer_name not in self.__producer_pairs_df) \ - or (pair_key not in self.__producer_pairs_df[producer_name]): + if dataframe.empty: + # The incoming dataframe must have at least 1 candle + return (False, 0) + + if (producer_name not in self.__producer_pairs_df + or pair_key not in self.__producer_pairs_df[producer_name]): # We don't have data from this producer yet, # or we don't have data for this pair_key # return False and 1000 for the full df return (False, 1000) - existing_df, _ = self.__producer_pairs_df[producer_name][pair_key] + existing_df, la = self.__producer_pairs_df[producer_name][pair_key] + + # Iterate over any overlapping candles and update the values + for idx, candle in dataframe.iterrows(): + existing_df.iloc[ + existing_df['date'] == candle['date'] + ] = candle + + existing_df.reset_index(drop=True, inplace=True) # CHECK FOR MISSING CANDLES timeframe_delta = to_timedelta(timeframe) # Convert the timeframe to a timedelta for pandas local_last = existing_df.iloc[-1]['date'] # We want the last date from our copy of data incoming_first = dataframe.iloc[0]['date'] # We want the first date from the incoming data - # We have received this candle before, update our copy - # and return True, 0 - if local_last == incoming_first: - existing_df.iloc[-1] = dataframe.iloc[0] - existing_data = (existing_df.reset_index(drop=True), _) - - self.__producer_pairs_df[producer_name][pair_key] = existing_data - return (True, 0) - candle_difference = (incoming_first - local_last) / timeframe_delta # If the difference divided by the timeframe is 1, then this @@ -228,6 +232,7 @@ class DataProvider: # Only keep the last 1500 candles in memory existing = existing[-1500:] if len(existing) > 1500 else existing + existing.reset_index(drop=True, inplace=True) return existing diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index 743698b24..278f04a8e 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -36,6 +36,9 @@ class Producer(TypedDict): ws_token: str +FULL_DATAFRAME_THRESHOLD = 100 + + logger = logging.getLogger(__name__) @@ -376,8 +379,8 @@ class ExternalMessageConsumer: logger.debug(f"Received {len(df)} candle(s) for {key}") - if len(df) >= 999: - # This is a full dataframe + if len(df) >= FULL_DATAFRAME_THRESHOLD: + # This is likely a full dataframe # Add the dataframe to the dataprovider self._dp._add_external_df( pair, @@ -388,8 +391,8 @@ class ExternalMessageConsumer: producer_name=producer_name ) - elif len(df) < 999: - # This is n single candles + elif len(df) < FULL_DATAFRAME_THRESHOLD: + # This is likely n single candles # Have dataprovider append it to # the full datafame. If it can't, # request the missing candles @@ -403,9 +406,14 @@ class ExternalMessageConsumer: ) if not did_append: - logger.debug("Holes in data or no existing df, " - f"requesting {n_missing} candles " - f"for {key} from `{producer_name}`") + # We want an overlap in candles incase some data has changed + n_missing += 1 + # Set to None for all candles if we missed a full df's worth of candles + n_missing = n_missing if n_missing < FULL_DATAFRAME_THRESHOLD else 1500 + + logger.warning("Holes in data or no existing df, " + f"requesting {n_missing} candles " + f"for {key} from `{producer_name}`") self.send_producer_request( producer_name, diff --git a/freqtrade/rpc/rpc.py b/freqtrade/rpc/rpc.py index 4ebedd6c4..331569de3 100644 --- a/freqtrade/rpc/rpc.py +++ b/freqtrade/rpc/rpc.py @@ -1062,7 +1062,7 @@ class RPC: self, pair: str, timeframe: str, - limit: Optional[int] = None + limit: Optional[int] ) -> Tuple[DataFrame, datetime]: """ Get the dataframe and last analyze from the dataprovider @@ -1083,7 +1083,7 @@ class RPC: def _ws_all_analysed_dataframes( self, pairlist: List[str], - limit: Optional[int] = None + limit: Optional[int] ) -> Generator[Dict[str, Any], None, None]: """ Get the analysed dataframes of each pair in the pairlist. From 6717dff19bb75015ff8ad8624fa0a82d3a961952 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 6 Dec 2022 16:00:28 -0700 Subject: [PATCH 333/421] update overlapping candle handling, move append to misc --- freqtrade/data/dataprovider.py | 48 ++++++++++++++++------------------ freqtrade/misc.py | 18 +++++++++++++ 2 files changed, 40 insertions(+), 26 deletions(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index b889da17f..8d81221b6 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -17,6 +17,7 @@ from freqtrade.data.history import load_pair_history from freqtrade.enums import CandleType, RPCMessageType, RunMode from freqtrade.exceptions import ExchangeError, OperationalException from freqtrade.exchange import Exchange, timeframe_to_seconds +from freqtrade.misc import append_candles_to_dataframe from freqtrade.rpc import RPCManager from freqtrade.util import PeriodicCache @@ -190,18 +191,30 @@ class DataProvider: existing_df, la = self.__producer_pairs_df[producer_name][pair_key] - # Iterate over any overlapping candles and update the values - for idx, candle in dataframe.iterrows(): - existing_df.iloc[ - existing_df['date'] == candle['date'] - ] = candle + # Handle overlapping candles + old_candles = existing_df[ + ~existing_df['date'].isin( + dataframe['date'] + ) + ] + overlapping_candles = existing_df[ + existing_df['date'].isin( + dataframe['date'] + ) + ] + new_candles = dataframe[ + ~dataframe['date'].isin( + existing_df['date'] + ) + ] - existing_df.reset_index(drop=True, inplace=True) + if overlapping_candles: + existing_df = concat([old_candles, overlapping_candles], axis=0) # CHECK FOR MISSING CANDLES timeframe_delta = to_timedelta(timeframe) # Convert the timeframe to a timedelta for pandas - local_last = existing_df.iloc[-1]['date'] # We want the last date from our copy of data - incoming_first = dataframe.iloc[0]['date'] # We want the first date from the incoming data + local_last = existing_df.iloc[-1]['date'] # We want the last date from our copy + incoming_first = new_candles.iloc[0]['date'] # We want the first date from the incoming candle_difference = (incoming_first - local_last) / timeframe_delta @@ -213,29 +226,12 @@ class DataProvider: if candle_difference > 1: return (False, candle_difference) - appended_df = self._append_candle_to_dataframe(existing_df, dataframe) + appended_df = append_candles_to_dataframe(existing_df, dataframe) # Everything is good, we appended self.__producer_pairs_df[producer_name][pair_key] = appended_df, last_analyzed return (True, 0) - def _append_candle_to_dataframe(self, existing: DataFrame, new: DataFrame) -> DataFrame: - """ - Append the `new` dataframe to the `existing` dataframe - - :param existing: The full dataframe you want appended to - :param new: The new dataframe containing the data you want appended - :returns: The dataframe with the new data in it - """ - if existing.iloc[-1]['date'] != new.iloc[-1]['date']: - existing = concat([existing, new]) - - # Only keep the last 1500 candles in memory - existing = existing[-1500:] if len(existing) > 1500 else existing - existing.reset_index(drop=True, inplace=True) - - return existing - def get_producer_df( self, pair: str, diff --git a/freqtrade/misc.py b/freqtrade/misc.py index 2d2c7513a..93e8da6dd 100644 --- a/freqtrade/misc.py +++ b/freqtrade/misc.py @@ -301,3 +301,21 @@ def remove_entry_exit_signals(dataframe: pd.DataFrame): dataframe[SignalTagType.EXIT_TAG.value] = None return dataframe + + +def append_candles_to_dataframe(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame: + """ + Append the `right` dataframe to the `left` dataframe + + :param left: The full dataframe you want appended to + :param right: The new dataframe containing the data you want appended + :returns: The dataframe with the right data in it + """ + if left.iloc[-1]['date'] != right.iloc[-1]['date']: + left = pd.concat([left, right]) + + # Only keep the last 1500 candles in memory + left = left[-1500:] if len(left) > 1500 else left + left.reset_index(drop=True, inplace=True) + + return left From 414c0ce050e520855a6440176b89e4c76797a6e1 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 6 Dec 2022 16:02:28 -0700 Subject: [PATCH 334/421] change unused var --- freqtrade/data/dataprovider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index 8d81221b6..3a6f74b97 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -189,7 +189,7 @@ class DataProvider: # return False and 1000 for the full df return (False, 1000) - existing_df, la = self.__producer_pairs_df[producer_name][pair_key] + existing_df, _ = self.__producer_pairs_df[producer_name][pair_key] # Handle overlapping candles old_candles = existing_df[ From 96edd31458e20237d65f98642c198b1cb13f8c4b Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 10 Dec 2022 20:03:42 +0100 Subject: [PATCH 335/421] Test add_external_candle --- tests/data/test_dataprovider.py | 68 ++++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 2 deletions(-) diff --git a/tests/data/test_dataprovider.py b/tests/data/test_dataprovider.py index 025e6d08a..862abfa0b 100644 --- a/tests/data/test_dataprovider.py +++ b/tests/data/test_dataprovider.py @@ -2,13 +2,13 @@ from datetime import datetime, timezone from unittest.mock import MagicMock import pytest -from pandas import DataFrame +from pandas import DataFrame, Timestamp from freqtrade.data.dataprovider import DataProvider from freqtrade.enums import CandleType, RunMode from freqtrade.exceptions import ExchangeError, OperationalException from freqtrade.plugins.pairlistmanager import PairListManager -from tests.conftest import get_patched_exchange +from tests.conftest import generate_test_data, get_patched_exchange @pytest.mark.parametrize('candle_type', [ @@ -412,3 +412,67 @@ def test_dp_send_msg(default_conf): dp = DataProvider(default_conf, None) dp.send_msg(msg, always_send=True) assert msg not in dp._msg_queue + + +def test_dp__add_external_candle(default_conf_usdt): + timeframe = '1h' + default_conf_usdt["timeframe"] = timeframe + dp = DataProvider(default_conf_usdt, None) + df = generate_test_data(timeframe, 24, '2022-01-01 00:00:00+00:00') + last_analyzed = datetime.now(timezone.utc) + + res = dp._add_external_candle('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT) + assert res[0] is False + # Why 1000 ?? + assert res[1] == 1000 + + dp._add_external_df('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT) + # BTC is not stored yet + res = dp._add_external_candle('BTC/USDT', df, last_analyzed, timeframe, CandleType.SPOT) + assert res[0] is False + df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + assert len(df) == 24 + + # Add the same dataframe again - dataframe size shall not change. + res = dp._add_external_candle('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT) + assert res[0] is True + assert res[1] == 0 + df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + assert len(df) == 24 + + # Add a new day. + df2 = generate_test_data(timeframe, 24, '2022-01-02 00:00:00+00:00') + + res = dp._add_external_candle('ETH/USDT', df2, last_analyzed, timeframe, CandleType.SPOT) + assert res[0] is True + assert res[1] == 0 + df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + assert len(df) == 48 + + # Add a dataframe with a 12 hour offset - so 12 candles are overlapping, and 12 valid. + df3 = generate_test_data(timeframe, 24, '2022-01-02 12:00:00+00:00') + + res = dp._add_external_candle('ETH/USDT', df3, last_analyzed, timeframe, CandleType.SPOT) + assert res[0] is True + assert res[1] == 0 + df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + # New length = 48 + 12 (since we have a 12 hour offset). + assert len(df) == 60 + assert df.iloc[-1]['date'] == df3.iloc[-1]['date'] + assert df.iloc[-1]['date'] == Timestamp('2022-01-03 11:00:00+00:00') + + # Generate 1 new candle + df4 = generate_test_data(timeframe, 1, '2022-01-03 12:00:00+00:00') + res = dp._add_external_candle('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT) + # assert res[0] is True + # assert res[1] == 0 + df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + # New length = 61 + 1 + assert len(df) == 61 + + # Gap in the data ... + df4 = generate_test_data(timeframe, 1, '2022-01-05 00:00:00+00:00') + res = dp._add_external_candle('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT) + assert res[0] is False + # 36 hours - from 2022-01-03 12:00:00+00:00 to 2022-01-05 00:00:00+00:00 + assert res[1] == 36 From a693495a6d599fd7bdbec75337db3c44dc39c5b7 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 11 Dec 2022 08:42:13 +0100 Subject: [PATCH 336/421] Improve external_candle aggregation --- freqtrade/data/dataprovider.py | 33 +++++++++------------------------ 1 file changed, 9 insertions(+), 24 deletions(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index 3a6f74b97..10569e7c7 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -9,7 +9,7 @@ from collections import deque from datetime import datetime, timezone from typing import Any, Dict, List, Optional, Tuple -from pandas import DataFrame, concat, to_timedelta +from pandas import DataFrame, to_timedelta from freqtrade.configuration import TimeRange from freqtrade.constants import Config, ListPairsWithTimeframes, PairWithTimeframe @@ -191,30 +191,13 @@ class DataProvider: existing_df, _ = self.__producer_pairs_df[producer_name][pair_key] - # Handle overlapping candles - old_candles = existing_df[ - ~existing_df['date'].isin( - dataframe['date'] - ) - ] - overlapping_candles = existing_df[ - existing_df['date'].isin( - dataframe['date'] - ) - ] - new_candles = dataframe[ - ~dataframe['date'].isin( - existing_df['date'] - ) - ] - - if overlapping_candles: - existing_df = concat([old_candles, overlapping_candles], axis=0) - # CHECK FOR MISSING CANDLES timeframe_delta = to_timedelta(timeframe) # Convert the timeframe to a timedelta for pandas local_last = existing_df.iloc[-1]['date'] # We want the last date from our copy - incoming_first = new_candles.iloc[0]['date'] # We want the first date from the incoming + incoming_first = dataframe.iloc[0]['date'] # We want the first date from the incoming + + # Remove existing candles that are newer than the incoming first candle + existing_df1 = existing_df[existing_df['date'] < incoming_first] candle_difference = (incoming_first - local_last) / timeframe_delta @@ -225,8 +208,10 @@ class DataProvider: # so return False and candle_difference. if candle_difference > 1: return (False, candle_difference) - - appended_df = append_candles_to_dataframe(existing_df, dataframe) + if existing_df1.empty: + appended_df = dataframe + else: + appended_df = append_candles_to_dataframe(existing_df1, dataframe) # Everything is good, we appended self.__producer_pairs_df[producer_name][pair_key] = appended_df, last_analyzed From 1c0c4fd4206bcafc59ad70a5bb5890cf657a928d Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 11 Dec 2022 08:49:35 +0100 Subject: [PATCH 337/421] Improve test --- tests/data/test_dataprovider.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/data/test_dataprovider.py b/tests/data/test_dataprovider.py index 862abfa0b..cce483c07 100644 --- a/tests/data/test_dataprovider.py +++ b/tests/data/test_dataprovider.py @@ -469,6 +469,8 @@ def test_dp__add_external_candle(default_conf_usdt): df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) # New length = 61 + 1 assert len(df) == 61 + assert df.iloc[-2]['date'] == Timestamp('2022-01-03 11:00:00+00:00') + assert df.iloc[-1]['date'] == Timestamp('2022-01-03 12:00:00+00:00') # Gap in the data ... df4 = generate_test_data(timeframe, 1, '2022-01-05 00:00:00+00:00') @@ -476,3 +478,13 @@ def test_dp__add_external_candle(default_conf_usdt): assert res[0] is False # 36 hours - from 2022-01-03 12:00:00+00:00 to 2022-01-05 00:00:00+00:00 assert res[1] == 36 + df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + # New length = 61 + 1 + assert len(df) == 61 + + # Empty dataframe + df4 = generate_test_data(timeframe, 0, '2022-01-05 00:00:00+00:00') + res = dp._add_external_candle('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT) + assert res[0] is False + # 36 hours - from 2022-01-03 12:00:00+00:00 to 2022-01-05 00:00:00+00:00 + assert res[1] == 0 From 0dd3836cc7a6c3ac8b5863b8267db889c7666d14 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Mon, 12 Dec 2022 22:46:19 -0700 Subject: [PATCH 338/421] fix rpc method docstring --- freqtrade/rpc/rpc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/rpc/rpc.py b/freqtrade/rpc/rpc.py index 331569de3..ceb791b46 100644 --- a/freqtrade/rpc/rpc.py +++ b/freqtrade/rpc/rpc.py @@ -1087,8 +1087,8 @@ class RPC: ) -> Generator[Dict[str, Any], None, None]: """ Get the analysed dataframes of each pair in the pairlist. - Limit size of dataframe if specified. - If candles, only return the candles specified. + If specified, only return the most recent `limit` candles for + each dataframe. :param pairlist: A list of pairs to get :param limit: If an integer, limits the size of dataframe From c042d0146e29baee22b42487b9bdded223754b88 Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 13 Dec 2022 17:14:11 +0000 Subject: [PATCH 339/421] Don't run gc_setup during tests --- tests/conftest.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index f3fc908e7..c9af5a171 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -408,6 +408,11 @@ def create_mock_trades_usdt(fee, is_short: Optional[bool] = False, use_db: bool Trade.commit() +@pytest.fixture(autouse=True) +def patch_gc(mocker) -> None: + mocker.patch("freqtrade.main.gc_set_threshold") + + @pytest.fixture(autouse=True) def patch_coingekko(mocker) -> None: """ From fed46d330ff4b8c2ed9aff97b311148f746bb99d Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 13 Dec 2022 18:14:56 +0100 Subject: [PATCH 340/421] Revert "Bump scikit-learn from 1.1.3 to 1.2.0" --- requirements-freqai.txt | 2 +- requirements-hyperopt.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-freqai.txt b/requirements-freqai.txt index 57dd8dbb4..215a312bf 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -3,7 +3,7 @@ -r requirements-plot.txt # Required for freqai -scikit-learn==1.2.0 +scikit-learn==1.1.3 joblib==1.2.0 catboost==1.1.1; platform_machine != 'aarch64' lightgbm==3.3.3 diff --git a/requirements-hyperopt.txt b/requirements-hyperopt.txt index 8fc58812b..fcae2cbdd 100644 --- a/requirements-hyperopt.txt +++ b/requirements-hyperopt.txt @@ -3,7 +3,7 @@ # Required for hyperopt scipy==1.9.3 -scikit-learn==1.2.0 +scikit-learn==1.1.3 scikit-optimize==0.9.0 filelock==3.8.2 progressbar2==4.2.0 From 1d92db7805c1f13bafd61177a9f451e1b612751f Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 13 Dec 2022 19:23:37 +0100 Subject: [PATCH 341/421] Change CI to actually run one 2 randomized point. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b15451a64..0a787bc47 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -88,7 +88,7 @@ jobs: run: | cp config_examples/config_bittrex.example.json config.json freqtrade create-userdir --userdir user_data - freqtrade hyperopt --datadir tests/testdata -e 5 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all + freqtrade hyperopt --datadir tests/testdata -e 6 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all - name: Flake8 run: | From d52c1c75544aee98f06be81cbce74d2fb45500b5 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Tue, 13 Dec 2022 20:21:06 +0100 Subject: [PATCH 342/421] Add unit tests --- docs/includes/pairlists.md | 2 +- freqtrade/plugins/pairlist/RemotePairList.py | 29 ++--- tests/plugins/test_remotepairlist.py | 123 +++++++++++++++++++ 3 files changed, 139 insertions(+), 15 deletions(-) create mode 100644 tests/plugins/test_remotepairlist.py diff --git a/docs/includes/pairlists.md b/docs/includes/pairlists.md index c12683e75..3a6ab7a3c 100644 --- a/docs/includes/pairlists.md +++ b/docs/includes/pairlists.md @@ -2,7 +2,7 @@ Pairlist Handlers define the list of pairs (pairlist) that the bot should trade. They are configured in the `pairlists` section of the configuration settings. -In your configuration, you can use Static Pairlist (defined by the [`StaticPairList`](#static-pair-list) Pairlist Handler), Dynamic Pairlist (defined by the [`VolumePairList`](#volume-pair-list) Pairlist Handler). +In your configuration, you can use Static Pairlist (defined by the [`StaticPairList`](#static-pair-list) Pairlist Handler) and Dynamic Pairlist (defined by the [`VolumePairList`](#volume-pair-list) Pairlist Handler). Additionally, [`AgeFilter`](#agefilter), [`PrecisionFilter`](#precisionfilter), [`PriceFilter`](#pricefilter), [`ShuffleFilter`](#shufflefilter), [`SpreadFilter`](#spreadfilter) and [`VolatilityFilter`](#volatilityfilter) act as Pairlist Filters, removing certain pairs and/or moving their positions in the pairlist. diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index 7ef038da7..418ac5b0b 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -74,21 +74,22 @@ class RemotePairList(IPairList): info = "Pairlist" try: - with requests.get(self._pairlist_url, headers=headers, - timeout=self._read_timeout) as response: - content_type = response.headers.get('content-type') - time_elapsed = response.elapsed.total_seconds() + response = requests.get(self._pairlist_url, headers=headers, + timeout=self._read_timeout) + content_type = response.headers.get('content-type') + time_elapsed = response.elapsed.total_seconds() - if "application/json" in str(content_type): - jsonparse = response.json() - pairlist = jsonparse['pairs'] - info = jsonparse.get('info', '')[:1000] - else: - raise OperationalException( - 'Remotepairlist is not of type JSON abort') + print(response) - self._refresh_period = jsonparse.get('refresh_period', self._refresh_period) - self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) + if "application/json" in str(content_type): + jsonparse = response.json() + pairlist = jsonparse['pairs'] + info = jsonparse.get('info', '') + else: + raise OperationalException('RemotePairList is not of type JSON abort ') + + self._refresh_period = jsonparse.get('refresh_period', self._refresh_period) + self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) except requests.exceptions.RequestException: self.log_once(f'Was not able to fetch pairlist from:' @@ -127,7 +128,7 @@ class RemotePairList(IPairList): # Load the JSON data into a dictionary jsonparse = json.load(json_file) pairlist = jsonparse['pairs'] - info = jsonparse.get('info', '')[:1000] + info = jsonparse.get('info', '') self._refresh_period = jsonparse.get('refresh_period', self._refresh_period) self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) diff --git a/tests/plugins/test_remotepairlist.py b/tests/plugins/test_remotepairlist.py new file mode 100644 index 000000000..743534bc3 --- /dev/null +++ b/tests/plugins/test_remotepairlist.py @@ -0,0 +1,123 @@ +from unittest.mock import MagicMock + +import pytest + +from freqtrade.exceptions import OperationalException +from freqtrade.plugins.pairlist.RemotePairList import RemotePairList +from freqtrade.plugins.pairlistmanager import PairListManager +from tests.conftest import get_patched_exchange, get_patched_freqtradebot + + +@pytest.fixture(scope="function") +def rpl_config(default_conf): + default_conf['stake_currency'] = 'USDT' + + default_conf['exchange']['pair_whitelist'] = [ + 'ETH/USDT', + 'BTC/USDT', + ] + default_conf['exchange']['pair_blacklist'] = [ + 'BLK/USDT' + ] + return default_conf + + +def test_fetch_pairlist_mock_response_html(mocker, rpl_config): + mock_response = MagicMock() + mock_response.headers = {'content-type': 'text/html'} + mocker.patch('requests.get', return_value=mock_response) + + rpl_config['pairlists'] = [ + { + "method": "RemotePairList", + "pairlist_url": "http://example.com/pairlist", + "number_assets": 10, + "read_timeout": 10, + "keep_pairlist_on_failure": True, + } + ] + + exchange = get_patched_exchange(mocker, rpl_config) + pairlistmanager = PairListManager(exchange, rpl_config) + + mocker.patch("freqtrade.plugins.pairlist.RemotePairList.requests.get", + return_value=mock_response) + remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, + rpl_config['pairlists'][0], 0) + + with pytest.raises(OperationalException, match='RemotePairList is not of type JSON abort'): + remote_pairlist.fetch_pairlist() + + +def test_remote_pairlist_init_no_pairlist_url(mocker, rpl_config): + + rpl_config['pairlists'] = [ + { + "method": "RemotePairList", + "number_assets": 10, + "keep_pairlist_on_failure": True, + } + ] + + get_patched_exchange(mocker, rpl_config) + with pytest.raises(OperationalException, match=r'`pairlist_url` not specified.' + r' Please check your configuration for "pairlist.config.pairlist_url"'): + get_patched_freqtradebot(mocker, rpl_config) + + +def test_remote_pairlist_init_no_number_assets(mocker, rpl_config): + + rpl_config['pairlists'] = [ + { + "method": "RemotePairList", + "pairlist_url": "http://example.com/pairlist", + "keep_pairlist_on_failure": True, + } + ] + + get_patched_exchange(mocker, rpl_config) + + with pytest.raises(OperationalException, match=r'`number_assets` not specified. ' + 'Please check your configuration for "pairlist.config.number_assets"'): + get_patched_freqtradebot(mocker, rpl_config) + + +def test_fetch_pairlist_mock_response_valid(mocker, rpl_config): + + rpl_config['pairlists'] = [ + { + "method": "RemotePairList", + "pairlist_url": "http://example.com/pairlist", + "number_assets": 10, + "refresh_period": 10, + "read_timeout": 10, + "keep_pairlist_on_failure": True, + } + ] + + mock_response = MagicMock() + + mock_response.json.return_value = { + "pairs": ["ETH/BTC", "XRP/BTC", "LTC/BTC", "EOS/BTC"], + "info": "Mock pairlist response", + "refresh_period": 60 + } + + mock_response.headers = { + "content-type": "application/json" + } + + mock_response.elapsed.total_seconds.return_value = 0.4 + mocker.patch("freqtrade.plugins.pairlist.RemotePairList.requests.get", + return_value=mock_response) + + exchange = get_patched_exchange(mocker, rpl_config) + pairlistmanager = PairListManager(exchange, rpl_config) + remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, + rpl_config['pairlists'][0], 0) + pairs, time_elapsed, info = remote_pairlist.fetch_pairlist() + + assert pairs == ["ETH/BTC", "XRP/BTC", "LTC/BTC", "EOS/BTC"] + assert time_elapsed == 0.4 + assert info == "Mock pairlist response" + assert remote_pairlist._refresh_period == 60 From 7f3524949c17afa87f52d8023770d8a974884b72 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Tue, 13 Dec 2022 21:00:23 +0100 Subject: [PATCH 343/421] - print --- freqtrade/plugins/pairlist/RemotePairList.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index 418ac5b0b..e46ac0419 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -79,8 +79,6 @@ class RemotePairList(IPairList): content_type = response.headers.get('content-type') time_elapsed = response.elapsed.total_seconds() - print(response) - if "application/json" in str(content_type): jsonparse = response.json() pairlist = jsonparse['pairs'] From 97fee37072dd28a8981131523711cfc7cbb9a3b6 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 14 Dec 2022 07:22:41 +0100 Subject: [PATCH 344/421] Improve emc test --- freqtrade/rpc/external_message_consumer.py | 3 +-- tests/rpc/test_rpc_emc.py | 25 +++++++++++++++++++--- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index 278f04a8e..67b323fb2 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -411,8 +411,7 @@ class ExternalMessageConsumer: # Set to None for all candles if we missed a full df's worth of candles n_missing = n_missing if n_missing < FULL_DATAFRAME_THRESHOLD else 1500 - logger.warning("Holes in data or no existing df, " - f"requesting {n_missing} candles " + logger.warning("Holes in data or no existing df, requesting {n_missing} candles " f"for {key} from `{producer_name}`") self.send_producer_request( diff --git a/tests/rpc/test_rpc_emc.py b/tests/rpc/test_rpc_emc.py index 155239e94..e1537ec9e 100644 --- a/tests/rpc/test_rpc_emc.py +++ b/tests/rpc/test_rpc_emc.py @@ -83,6 +83,7 @@ def test_emc_init(patched_emc): def test_emc_handle_producer_message(patched_emc, caplog, ohlcv_history): test_producer = {"name": "test", "url": "ws://test", "ws_token": "test"} producer_name = test_producer['name'] + invalid_msg = r"Invalid message .+" caplog.set_level(logging.DEBUG) @@ -119,7 +120,8 @@ def test_emc_handle_producer_message(patched_emc, caplog, ohlcv_history): malformed_message = {"type": "whitelist", "data": {"pair": "BTC/USDT"}} patched_emc.handle_producer_message(test_producer, malformed_message) - assert log_has_re(r"Invalid message .+", caplog) + assert log_has_re(invalid_msg, caplog) + caplog.clear() malformed_message = { "type": "analyzed_df", @@ -132,13 +134,30 @@ def test_emc_handle_producer_message(patched_emc, caplog, ohlcv_history): patched_emc.handle_producer_message(test_producer, malformed_message) assert log_has(f"Received message of type `analyzed_df` from `{producer_name}`", caplog) - assert log_has_re(r"Invalid message .+", caplog) + assert log_has_re(invalid_msg, caplog) + caplog.clear() + + # Empty dataframe + malformed_message = { + "type": "analyzed_df", + "data": { + "key": ("BTC/USDT", "5m", "spot"), + "df": ohlcv_history.loc[ohlcv_history['open'] < 0], + "la": datetime.now(timezone.utc) + } + } + patched_emc.handle_producer_message(test_producer, malformed_message) + + assert log_has(f"Received message of type `analyzed_df` from `{producer_name}`", caplog) + assert not log_has_re(invalid_msg, caplog) + assert log_has_re(r"Received Empty Dataframe for.+", caplog) caplog.clear() malformed_message = {"some": "stuff"} patched_emc.handle_producer_message(test_producer, malformed_message) - assert log_has_re(r"Invalid message .+", caplog) + assert log_has_re(invalid_msg, caplog) + caplog.clear() caplog.clear() malformed_message = {"type": "whitelist", "data": None} From de19d1cfbba4f7ca0c356ed840093950c74f6434 Mon Sep 17 00:00:00 2001 From: initrv <37817561+initrv@users.noreply.github.com> Date: Wed, 14 Dec 2022 13:36:07 +0300 Subject: [PATCH 345/421] fix doc minimal_roi --- docs/strategy-customization.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/strategy-customization.md b/docs/strategy-customization.md index c006bf12c..0fb35ce89 100644 --- a/docs/strategy-customization.md +++ b/docs/strategy-customization.md @@ -363,9 +363,9 @@ class AwesomeStrategy(IStrategy): timeframe = "1d" timeframe_mins = timeframe_to_minutes(timeframe) minimal_roi = { - "0": 0.05, # 5% for the first 3 candles - str(timeframe_mins * 3)): 0.02, # 2% after 3 candles - str(timeframe_mins * 6)): 0.01, # 1% After 6 candles + "0": 0.05, # 5% for the first 3 candles + str(timeframe_mins * 3): 0.02, # 2% after 3 candles + str(timeframe_mins * 6): 0.01, # 1% After 6 candles } ``` From 2285ca7d2a214c811c17e371e4780216d70760dc Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 14 Dec 2022 18:22:20 +0100 Subject: [PATCH 346/421] add dp to multiproc --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 6 ++++-- .../prediction_models/ReinforcementLearner_multiproc.py | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 5e9b81108..b77f21d58 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -24,6 +24,7 @@ from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback from freqtrade.persistence import Trade +from freqtrade.data.dataprovider import DataProvider logger = logging.getLogger(__name__) @@ -384,7 +385,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, reward_params: Dict[str, int], window_size: int, monitor: bool = False, - config: Dict[str, Any] = {}) -> Callable: + config: Dict[str, Any] = {}, dp: DataProvider = None) -> Callable: """ Utility function for multiprocessed env. @@ -398,7 +399,8 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, def _init() -> gym.Env: env = MyRLEnv(df=train_df, prices=price, window_size=window_size, - reward_kwargs=reward_params, id=env_id, seed=seed + rank, config=config) + reward_kwargs=reward_params, id=env_id, seed=seed + rank, + config=config, dp=dp) if monitor: env = Monitor(env) return env diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 32a2a2076..c9b824978 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -37,14 +37,14 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): env_id = "train_env" self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config) for i + config=self.config, dp=self.data_provider) for i in range(self.max_threads)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, test_df, prices_test, self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config) for i + config=self.config, dp=self.data_provider) for i in range(self.max_threads)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), From dac1c8ab894c345649e158a14105bac8f76e2c35 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 14 Dec 2022 18:28:52 +0100 Subject: [PATCH 347/421] fix isort --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index b77f21d58..0231124ff 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -17,6 +17,7 @@ from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv +from freqtrade.data.dataprovider import DataProvider from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel @@ -24,7 +25,6 @@ from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback from freqtrade.persistence import Trade -from freqtrade.data.dataprovider import DataProvider logger = logging.getLogger(__name__) From fa260e6560591d848189197362e69806396eb1bb Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 14 Dec 2022 19:56:54 +0100 Subject: [PATCH 348/421] Move "replace or append" decision to dataprovider --- freqtrade/constants.py | 1 + freqtrade/data/dataprovider.py | 29 ++++++++-- freqtrade/rpc/external_message_consumer.py | 67 ++++++++-------------- 3 files changed, 50 insertions(+), 47 deletions(-) diff --git a/freqtrade/constants.py b/freqtrade/constants.py index ca1be1d6a..ff6cc7c67 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -61,6 +61,7 @@ USERPATH_FREQAIMODELS = 'freqaimodels' TELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent'] WEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw'] +FULL_DATAFRAME_THRESHOLD = 100 ENV_VAR_PREFIX = 'FREQTRADE__' diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index 10569e7c7..b46f4e881 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -12,7 +12,8 @@ from typing import Any, Dict, List, Optional, Tuple from pandas import DataFrame, to_timedelta from freqtrade.configuration import TimeRange -from freqtrade.constants import Config, ListPairsWithTimeframes, PairWithTimeframe +from freqtrade.constants import (FULL_DATAFRAME_THRESHOLD, Config, ListPairsWithTimeframes, + PairWithTimeframe) from freqtrade.data.history import load_pair_history from freqtrade.enums import CandleType, RPCMessageType, RunMode from freqtrade.exceptions import ExchangeError, OperationalException @@ -132,7 +133,7 @@ class DataProvider: 'data': pair_key, }) - def _add_external_df( + def _replace_external_df( self, pair: str, dataframe: DataFrame, @@ -158,7 +159,7 @@ class DataProvider: self.__producer_pairs_df[producer_name][pair_key] = (dataframe, _last_analyzed) logger.debug(f"External DataFrame for {pair_key} from {producer_name} added.") - def _add_external_candle( + def _add_external_df( self, pair: str, dataframe: DataFrame, @@ -182,6 +183,19 @@ class DataProvider: # The incoming dataframe must have at least 1 candle return (False, 0) + if len(dataframe) >= FULL_DATAFRAME_THRESHOLD: + # This is likely a full dataframe + # Add the dataframe to the dataprovider + self._add_external_df( + pair, + dataframe, + last_analyzed=last_analyzed, + timeframe=timeframe, + candle_type=candle_type, + producer_name=producer_name + ) + return (True, 0) + if (producer_name not in self.__producer_pairs_df or pair_key not in self.__producer_pairs_df[producer_name]): # We don't have data from this producer yet, @@ -214,7 +228,14 @@ class DataProvider: appended_df = append_candles_to_dataframe(existing_df1, dataframe) # Everything is good, we appended - self.__producer_pairs_df[producer_name][pair_key] = appended_df, last_analyzed + self._add_external_df( + pair, + appended_df, + last_analyzed=last_analyzed, + timeframe=timeframe, + candle_type=candle_type, + producer_name=producer_name + ) return (True, 0) def get_producer_df( diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index 67b323fb2..e888191ea 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -13,6 +13,7 @@ from typing import TYPE_CHECKING, Any, Callable, Dict, List, TypedDict, Union import websockets from pydantic import ValidationError +from freqtrade.constants import FULL_DATAFRAME_THRESHOLD from freqtrade.data.dataprovider import DataProvider from freqtrade.enums import RPCMessageType from freqtrade.misc import remove_entry_exit_signals @@ -36,9 +37,6 @@ class Producer(TypedDict): ws_token: str -FULL_DATAFRAME_THRESHOLD = 100 - - logger = logging.getLogger(__name__) @@ -379,51 +377,34 @@ class ExternalMessageConsumer: logger.debug(f"Received {len(df)} candle(s) for {key}") - if len(df) >= FULL_DATAFRAME_THRESHOLD: - # This is likely a full dataframe - # Add the dataframe to the dataprovider - self._dp._add_external_df( - pair, - df, - last_analyzed=la, - timeframe=timeframe, - candle_type=candle_type, - producer_name=producer_name + did_append, n_missing = self._dp._add_external_df( + pair, + df, + last_analyzed=la, + timeframe=timeframe, + candle_type=candle_type, + producer_name=producer_name ) - elif len(df) < FULL_DATAFRAME_THRESHOLD: - # This is likely n single candles - # Have dataprovider append it to - # the full datafame. If it can't, - # request the missing candles - did_append, n_missing = self._dp._add_external_candle( - pair, - df, - last_analyzed=la, - timeframe=timeframe, - candle_type=candle_type, - producer_name=producer_name - ) + if not did_append: + # We want an overlap in candles incase some data has changed + n_missing += 1 + # Set to None for all candles if we missed a full df's worth of candles + n_missing = n_missing if n_missing < FULL_DATAFRAME_THRESHOLD else 1500 - if not did_append: - # We want an overlap in candles incase some data has changed - n_missing += 1 - # Set to None for all candles if we missed a full df's worth of candles - n_missing = n_missing if n_missing < FULL_DATAFRAME_THRESHOLD else 1500 + logger.warning(f"Holes in data or no existing df, requesting {n_missing} candles " + f"for {key} from `{producer_name}`") - logger.warning("Holes in data or no existing df, requesting {n_missing} candles " - f"for {key} from `{producer_name}`") - - self.send_producer_request( - producer_name, - WSAnalyzedDFRequest( - data={ - "limit": n_missing, - "pair": pair - } - ) + self.send_producer_request( + producer_name, + WSAnalyzedDFRequest( + data={ + "limit": n_missing, + "pair": pair + } ) - return + ) + return logger.debug( f"Consumed message from `{producer_name}` " From 2018da07677f6343bef7a28eb8c4782032fbb508 Mon Sep 17 00:00:00 2001 From: Emre Date: Wed, 14 Dec 2022 22:03:05 +0300 Subject: [PATCH 349/421] Add env_info dict to base environment --- freqtrade/freqai/RL/BaseEnvironment.py | 17 +++++------------ .../freqai/RL/BaseReinforcementLearningModel.py | 16 +++++++++++----- .../ReinforcementLearner_multiproc.py | 11 +++++++++-- 3 files changed, 25 insertions(+), 19 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 5a5a950e7..887910006 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -11,9 +11,6 @@ from gym import spaces from gym.utils import seeding from pandas import DataFrame -from freqtrade.data.dataprovider import DataProvider -from freqtrade.enums import RunMode - logger = logging.getLogger(__name__) @@ -48,7 +45,7 @@ class BaseEnvironment(gym.Env): def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), reward_kwargs: dict = {}, window_size=10, starting_point=True, id: str = 'baseenv-1', seed: int = 1, config: dict = {}, - dp: Optional[DataProvider] = None): + env_info: dict = {}): """ Initializes the training/eval environment. :param df: dataframe of features @@ -59,7 +56,7 @@ class BaseEnvironment(gym.Env): :param id: string id of the environment (used in backend for multiprocessed env) :param seed: Sets the seed of the environment higher in the gym.Env object :param config: Typical user configuration file - :param dp: dataprovider from freqtrade + :param env_info: Environment info dictionary, used to pass live status, fee, etc. """ self.config = config self.rl_config = config['freqai']['rl_config'] @@ -71,17 +68,13 @@ class BaseEnvironment(gym.Env): self.compound_trades = config['stake_amount'] == 'unlimited' if self.config.get('fee', None) is not None: self.fee = self.config['fee'] - elif dp is not None: - self.fee = dp._exchange.get_fee(symbol=dp.current_whitelist()[0]) # type: ignore else: - self.fee = 0.0015 + self.fee = env_info.get('fee', 0.0015) # set here to default 5Ac, but all children envs can override this self.actions: Type[Enum] = BaseActions self.tensorboard_metrics: dict = {} - self.live: bool = False - if dp: - self.live = dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE) + self.live = env_info.get('live', False) if not self.live and self.add_state_info: self.add_state_info = False logger.warning("add_state_info is not available in backtesting. Deactivating.") @@ -213,7 +206,7 @@ class BaseEnvironment(gym.Env): """ features_window = self.signal_features[( self._current_tick - self.window_size):self._current_tick] - if self.add_state_info and self.live: + if self.add_state_info: features_and_state = DataFrame(np.zeros((len(features_window), 3)), columns=['current_profit_pct', 'position', diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index b77f21d58..a41f02cba 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -17,6 +17,7 @@ from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv +from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel @@ -24,7 +25,6 @@ from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback from freqtrade.persistence import Trade -from freqtrade.data.dataprovider import DataProvider logger = logging.getLogger(__name__) @@ -144,18 +144,24 @@ class BaseReinforcementLearningModel(IFreqaiModel): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] + env_info = {"live": False} + if self.data_provider: + env_info["live"] = self.data_provider.runmode in (RunMode.DRY_RUN, RunMode.LIVE) + env_info["fee"] = self.data_provider._exchange \ + .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore + self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config, - dp=self.data_provider) + env_info=env_info) self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config, - dp=self.data_provider)) + env_info=env_info)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) @@ -385,7 +391,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, reward_params: Dict[str, int], window_size: int, monitor: bool = False, - config: Dict[str, Any] = {}, dp: DataProvider = None) -> Callable: + config: Dict[str, Any] = {}, env_info: Dict[str, Any] = {}) -> Callable: """ Utility function for multiprocessed env. @@ -400,7 +406,7 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, env = MyRLEnv(df=train_df, prices=price, window_size=window_size, reward_kwargs=reward_params, id=env_id, seed=seed + rank, - config=config, dp=dp) + config=config, env_info=env_info) if monitor: env = Monitor(env) return env diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index c9b824978..58735e78f 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -5,6 +5,7 @@ from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv +from freqtrade.enums import RunMode from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env @@ -34,17 +35,23 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] + env_info = {"live": False} + if self.data_provider: + env_info["live"] = self.data_provider.runmode in (RunMode.DRY_RUN, RunMode.LIVE) + env_info["fee"] = self.data_provider._exchange \ + .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore + env_id = "train_env" self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config, dp=self.data_provider) for i + config=self.config, env_info=env_info) for i in range(self.max_threads)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, test_df, prices_test, self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config, dp=self.data_provider) for i + config=self.config, env_info=env_info) for i in range(self.max_threads)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), From 3af2251ce86aee7a72fe659c6964338c412fadf7 Mon Sep 17 00:00:00 2001 From: Emre Date: Wed, 14 Dec 2022 22:03:23 +0300 Subject: [PATCH 350/421] Fix add_state_info backtesting bug --- freqtrade/freqai/RL/BaseEnvironment.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 887910006..49361cbde 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -62,8 +62,6 @@ class BaseEnvironment(gym.Env): self.rl_config = config['freqai']['rl_config'] self.add_state_info = self.rl_config.get('add_state_info', False) self.id = id - self.seed(seed) - self.reset_env(df, prices, window_size, reward_kwargs, starting_point) self.max_drawdown = 1 - self.rl_config.get('max_training_drawdown_pct', 0.8) self.compound_trades = config['stake_amount'] == 'unlimited' if self.config.get('fee', None) is not None: @@ -78,6 +76,8 @@ class BaseEnvironment(gym.Env): if not self.live and self.add_state_info: self.add_state_info = False logger.warning("add_state_info is not available in backtesting. Deactivating.") + self.seed(seed) + self.reset_env(df, prices, window_size, reward_kwargs, starting_point) def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, reward_kwargs: dict, starting_point=True): From ca2a878b86b32d5c81abd4276c7de7c907f25a69 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 14 Dec 2022 19:58:45 +0100 Subject: [PATCH 351/421] Update test naming --- freqtrade/data/dataprovider.py | 4 ++-- tests/data/test_dataprovider.py | 29 +++++++++++++++-------------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index b46f4e881..df4a4c898 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -186,7 +186,7 @@ class DataProvider: if len(dataframe) >= FULL_DATAFRAME_THRESHOLD: # This is likely a full dataframe # Add the dataframe to the dataprovider - self._add_external_df( + self._replace_external_df( pair, dataframe, last_analyzed=last_analyzed, @@ -228,7 +228,7 @@ class DataProvider: appended_df = append_candles_to_dataframe(existing_df1, dataframe) # Everything is good, we appended - self._add_external_df( + self._replace_external_df( pair, appended_df, last_analyzed=last_analyzed, diff --git a/tests/data/test_dataprovider.py b/tests/data/test_dataprovider.py index cce483c07..7d61a22be 100644 --- a/tests/data/test_dataprovider.py +++ b/tests/data/test_dataprovider.py @@ -161,9 +161,9 @@ def test_producer_pairs(mocker, default_conf, ohlcv_history): assert dataprovider.get_producer_pairs("bad") == [] -def test_get_producer_df(mocker, default_conf, ohlcv_history): +def test_get_producer_df(mocker, default_conf): dataprovider = DataProvider(default_conf, None) - + ohlcv_history = generate_test_data('5m', 150) pair = 'BTC/USDT' timeframe = default_conf['timeframe'] candle_type = CandleType.SPOT @@ -414,27 +414,28 @@ def test_dp_send_msg(default_conf): assert msg not in dp._msg_queue -def test_dp__add_external_candle(default_conf_usdt): +def test_dp__add_external_df(default_conf_usdt): timeframe = '1h' default_conf_usdt["timeframe"] = timeframe dp = DataProvider(default_conf_usdt, None) df = generate_test_data(timeframe, 24, '2022-01-01 00:00:00+00:00') last_analyzed = datetime.now(timezone.utc) - res = dp._add_external_candle('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is False # Why 1000 ?? assert res[1] == 1000 - dp._add_external_df('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT) + # Hard add dataframe + dp._replace_external_df('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT) # BTC is not stored yet - res = dp._add_external_candle('BTC/USDT', df, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df('BTC/USDT', df, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is False - df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) - assert len(df) == 24 + df_res, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + assert len(df_res) == 24 # Add the same dataframe again - dataframe size shall not change. - res = dp._add_external_candle('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is True assert res[1] == 0 df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) @@ -443,7 +444,7 @@ def test_dp__add_external_candle(default_conf_usdt): # Add a new day. df2 = generate_test_data(timeframe, 24, '2022-01-02 00:00:00+00:00') - res = dp._add_external_candle('ETH/USDT', df2, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df('ETH/USDT', df2, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is True assert res[1] == 0 df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) @@ -452,7 +453,7 @@ def test_dp__add_external_candle(default_conf_usdt): # Add a dataframe with a 12 hour offset - so 12 candles are overlapping, and 12 valid. df3 = generate_test_data(timeframe, 24, '2022-01-02 12:00:00+00:00') - res = dp._add_external_candle('ETH/USDT', df3, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df('ETH/USDT', df3, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is True assert res[1] == 0 df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) @@ -463,7 +464,7 @@ def test_dp__add_external_candle(default_conf_usdt): # Generate 1 new candle df4 = generate_test_data(timeframe, 1, '2022-01-03 12:00:00+00:00') - res = dp._add_external_candle('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT) # assert res[0] is True # assert res[1] == 0 df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) @@ -474,7 +475,7 @@ def test_dp__add_external_candle(default_conf_usdt): # Gap in the data ... df4 = generate_test_data(timeframe, 1, '2022-01-05 00:00:00+00:00') - res = dp._add_external_candle('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is False # 36 hours - from 2022-01-03 12:00:00+00:00 to 2022-01-05 00:00:00+00:00 assert res[1] == 36 @@ -484,7 +485,7 @@ def test_dp__add_external_candle(default_conf_usdt): # Empty dataframe df4 = generate_test_data(timeframe, 0, '2022-01-05 00:00:00+00:00') - res = dp._add_external_candle('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is False # 36 hours - from 2022-01-03 12:00:00+00:00 to 2022-01-05 00:00:00+00:00 assert res[1] == 0 From 33dce5cf1024aa506a0e57d8226136b0db434d81 Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 15 Dec 2022 06:51:15 +0100 Subject: [PATCH 352/421] Clarify partial exit calculation messaging --- docs/strategy-callbacks.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/strategy-callbacks.md b/docs/strategy-callbacks.md index 230968fb0..19bd26a04 100644 --- a/docs/strategy-callbacks.md +++ b/docs/strategy-callbacks.md @@ -773,7 +773,7 @@ class DigDeeperStrategy(IStrategy): * Sell 100@10\$ -> Avg price: 8.5\$, realized profit 150\$, 17.65% * Buy 150@11\$ -> Avg price: 10\$, realized profit 150\$, 17.65% * Sell 100@12\$ -> Avg price: 10\$, total realized profit 350\$, 20% - * Sell 150@14\$ -> Avg price: 10\$, total realized profit 950\$, 40% + * Sell 150@14\$ -> Avg price: 10\$, total realized profit 950\$, 40% <- *This will be the last "Exit" message* The total profit for this trade was 950$ on a 3350$ investment (`100@8$ + 100@9$ + 150@11$`). As such - the final relative profit is 28.35% (`950 / 3350`). From 7a0eadbdf5013c967d45c185da510c231e11dbe9 Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 15 Dec 2022 07:04:59 +0100 Subject: [PATCH 353/421] Don't recalc profit on closed trades --- freqtrade/rpc/rpc.py | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/freqtrade/rpc/rpc.py b/freqtrade/rpc/rpc.py index 334e18dc7..dae23d388 100644 --- a/freqtrade/rpc/rpc.py +++ b/freqtrade/rpc/rpc.py @@ -167,6 +167,7 @@ class RPC: results = [] for trade in trades: order: Optional[Order] = None + current_profit_fiat: Optional[float] = None if trade.open_order_id: order = trade.select_order_by_order_id(trade.open_order_id) # calculate profit and send message to user @@ -176,23 +177,26 @@ class RPC: trade.pair, side='exit', is_short=trade.is_short, refresh=False) except (ExchangeError, PricingError): current_rate = NAN + if len(trade.select_filled_orders(trade.entry_side)) > 0: + current_profit = trade.calc_profit_ratio( + current_rate) if not isnan(current_rate) else NAN + current_profit_abs = trade.calc_profit( + current_rate) if not isnan(current_rate) else NAN + else: + current_profit = current_profit_abs = current_profit_fiat = 0.0 else: + # Closed trade ... current_rate = trade.close_rate - if len(trade.select_filled_orders(trade.entry_side)) > 0: - current_profit = trade.calc_profit_ratio( - current_rate) if not isnan(current_rate) else NAN - current_profit_abs = trade.calc_profit( - current_rate) if not isnan(current_rate) else NAN - current_profit_fiat: Optional[float] = None - # Calculate fiat profit - if self._fiat_converter: - current_profit_fiat = self._fiat_converter.convert_amount( - current_profit_abs, - self._freqtrade.config['stake_currency'], - self._freqtrade.config['fiat_display_currency'] - ) - else: - current_profit = current_profit_abs = current_profit_fiat = 0.0 + current_profit = trade.close_profit + current_profit_abs = trade.close_profit_abs + + # Calculate fiat profit + if not isnan(current_profit_abs) and self._fiat_converter: + current_profit_fiat = self._fiat_converter.convert_amount( + current_profit_abs, + self._freqtrade.config['stake_currency'], + self._freqtrade.config['fiat_display_currency'] + ) # Calculate guaranteed profit (in case of trailing stop) stoploss_entry_dist = trade.calc_profit(trade.stop_loss) From 7b4abd5ef50f3c6f84c6604fc1f79ff4b92c2575 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 15 Dec 2022 12:25:33 +0100 Subject: [PATCH 354/421] use a dictionary to make code more readable --- freqtrade/freqai/RL/BaseEnvironment.py | 8 ++-- .../RL/BaseReinforcementLearningModel.py | 40 ++++++++++--------- .../ReinforcementLearner_multiproc.py | 18 ++++----- 3 files changed, 32 insertions(+), 34 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 49361cbde..39e8609f5 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -44,8 +44,8 @@ class BaseEnvironment(gym.Env): def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1, config: dict = {}, - env_info: dict = {}): + id: str = 'baseenv-1', seed: int = 1, config: dict = {}, live: bool = False, + fee: float = 0.0015): """ Initializes the training/eval environment. :param df: dataframe of features @@ -67,12 +67,12 @@ class BaseEnvironment(gym.Env): if self.config.get('fee', None) is not None: self.fee = self.config['fee'] else: - self.fee = env_info.get('fee', 0.0015) + self.fee = fee # set here to default 5Ac, but all children envs can override this self.actions: Type[Enum] = BaseActions self.tensorboard_metrics: dict = {} - self.live = env_info.get('live', False) + self.live = live if not self.live and self.add_state_info: self.add_state_info = False logger.warning("add_state_info is not available in backtesting. Deactivating.") diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a41f02cba..62963f194 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -17,7 +17,6 @@ from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv -from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel @@ -144,24 +143,14 @@ class BaseReinforcementLearningModel(IFreqaiModel): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - env_info = {"live": False} - if self.data_provider: - env_info["live"] = self.data_provider.runmode in (RunMode.DRY_RUN, RunMode.LIVE) - env_info["fee"] = self.data_provider._exchange \ - .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore + env_info = self.pack_env_dict() self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, - config=self.config, - env_info=env_info) + **env_info) self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, - config=self.config, - env_info=env_info)) + **env_info)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) @@ -169,6 +158,20 @@ class BaseReinforcementLearningModel(IFreqaiModel): actions = self.train_env.get_actions() self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions) + def pack_env_dict(self) -> Dict[str, Any]: + """ + Create dictionary of environment arguments + """ + env_info = {"window_size": self.CONV_WIDTH, + "reward_kwargs": self.reward_params, + "config": self.config, + "live": self.live} + if self.data_provider: + env_info["fee"] = self.data_provider._exchange \ + .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore + + return env_info + @abstractmethod def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): """ @@ -390,8 +393,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, - reward_params: Dict[str, int], window_size: int, monitor: bool = False, - config: Dict[str, Any] = {}, env_info: Dict[str, Any] = {}) -> Callable: + monitor: bool = False, + env_info: Dict[str, Any] = {}) -> Callable: """ Utility function for multiprocessed env. @@ -404,9 +407,8 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, def _init() -> gym.Env: - env = MyRLEnv(df=train_df, prices=price, window_size=window_size, - reward_kwargs=reward_params, id=env_id, seed=seed + rank, - config=config, env_info=env_info) + env = MyRLEnv(df=train_df, prices=price, id=env_id, seed=seed + rank, + **env_info) if monitor: env = Monitor(env) return env diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 58735e78f..a9be87b0b 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -5,7 +5,6 @@ from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv -from freqtrade.enums import RunMode from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env @@ -35,23 +34,20 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - env_info = {"live": False} - if self.data_provider: - env_info["live"] = self.data_provider.runmode in (RunMode.DRY_RUN, RunMode.LIVE) - env_info["fee"] = self.data_provider._exchange \ - .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore + env_info = self.pack_env_dict() env_id = "train_env" - self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config, env_info=env_info) for i + self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, + train_df, prices_train, + monitor=True, + env_info=env_info) for i in range(self.max_threads)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config, env_info=env_info) for i + monitor=True, + env_info=env_info) for i in range(self.max_threads)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), From 581a5296cc7a76ea927eec9157559e426f170daa Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 15 Dec 2022 16:50:08 +0100 Subject: [PATCH 355/421] fix docstrings to reflect new env_info changes --- freqtrade/freqai/RL/BaseEnvironment.py | 3 ++- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 39e8609f5..17d82a3ba 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -56,7 +56,8 @@ class BaseEnvironment(gym.Env): :param id: string id of the environment (used in backend for multiprocessed env) :param seed: Sets the seed of the environment higher in the gym.Env object :param config: Typical user configuration file - :param env_info: Environment info dictionary, used to pass live status, fee, etc. + :param live: Whether or not this environment is active in dry/live/backtesting + :param fee: The fee to use for environmental interactions. """ self.config = config self.rl_config = config['freqai']['rl_config'] diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 62963f194..d7e3a3cad 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -402,6 +402,7 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, :param num_env: (int) the number of environment you wish to have in subprocesses :param seed: (int) the inital seed for RNG :param rank: (int) index of the subprocess + :param env_info: (dict) all required arguments to instantiate the environment. :return: (Callable) """ From 1d5c66da3bcb212732df322efb74d54eca069ca0 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Thu, 15 Dec 2022 17:38:21 +0100 Subject: [PATCH 356/421] + Unit Tests --- tests/plugins/test_remotepairlist.py | 72 ++++++++++++++++++++++++++-- 1 file changed, 68 insertions(+), 4 deletions(-) diff --git a/tests/plugins/test_remotepairlist.py b/tests/plugins/test_remotepairlist.py index 743534bc3..bc4adb616 100644 --- a/tests/plugins/test_remotepairlist.py +++ b/tests/plugins/test_remotepairlist.py @@ -1,11 +1,13 @@ +import json from unittest.mock import MagicMock import pytest +import requests from freqtrade.exceptions import OperationalException from freqtrade.plugins.pairlist.RemotePairList import RemotePairList from freqtrade.plugins.pairlistmanager import PairListManager -from tests.conftest import get_patched_exchange, get_patched_freqtradebot +from tests.conftest import get_patched_exchange, get_patched_freqtradebot, log_has @pytest.fixture(scope="function") @@ -22,10 +24,44 @@ def rpl_config(default_conf): return default_conf +def test_gen_pairlist_with_local_file(mocker, rpl_config): + + mock_file = MagicMock() + mock_file.read.return_value = '{"pairs": ["TKN/USDT","ETH/USDT","NANO/USDT"]}' + mocker.patch('freqtrade.plugins.pairlist.RemotePairList.open', return_value=mock_file) + + mock_file_path = mocker.patch('freqtrade.plugins.pairlist.RemotePairList.Path') + mock_file_path.exists.return_value = True + + jsonparse = json.loads(mock_file.read.return_value) + mocker.patch('freqtrade.plugins.pairlist.RemotePairList.json.load', return_value=jsonparse) + + rpl_config['pairlists'] = [ + { + "method": "RemotePairList", + 'number_assets': 2, + 'refresh_period': 1800, + 'keep_pairlist_on_failure': True, + 'pairlist_url': 'file:///pairlist.json', + 'bearer_token': '', + 'read_timeout': 60 + } + ] + + exchange = get_patched_exchange(mocker, rpl_config) + pairlistmanager = PairListManager(exchange, rpl_config) + + remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, + rpl_config['pairlists'][0], 0) + + result = remote_pairlist.gen_pairlist([]) + + assert result == ['TKN/USDT', 'ETH/USDT'] + + def test_fetch_pairlist_mock_response_html(mocker, rpl_config): mock_response = MagicMock() mock_response.headers = {'content-type': 'text/html'} - mocker.patch('requests.get', return_value=mock_response) rpl_config['pairlists'] = [ { @@ -49,6 +85,34 @@ def test_fetch_pairlist_mock_response_html(mocker, rpl_config): remote_pairlist.fetch_pairlist() +def test_fetch_pairlist_timeout_keep_last_pairlist(mocker, rpl_config, caplog): + rpl_config['pairlists'] = [ + { + "method": "RemotePairList", + "pairlist_url": "http://example.com/pairlist", + "number_assets": 10, + "read_timeout": 10, + "keep_pairlist_on_failure": True, + } + ] + + exchange = get_patched_exchange(mocker, rpl_config) + pairlistmanager = PairListManager(exchange, rpl_config) + + mocker.patch("freqtrade.plugins.pairlist.RemotePairList.requests.get", + side_effect=requests.exceptions.RequestException) + + remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, + rpl_config['pairlists'][0], 0) + + remote_pairlist._last_pairlist = ["BTC/USDT", "ETH/USDT", "LTC/USDT"] + + pairs, time_elapsed, info = remote_pairlist.fetch_pairlist() + assert log_has(f"Was not able to fetch pairlist from: {remote_pairlist._pairlist_url} ", caplog) + assert log_has("Keeping last fetched pairlist", caplog) + assert pairs == ["BTC/USDT", "ETH/USDT", "LTC/USDT"] + + def test_remote_pairlist_init_no_pairlist_url(mocker, rpl_config): rpl_config['pairlists'] = [ @@ -98,7 +162,7 @@ def test_fetch_pairlist_mock_response_valid(mocker, rpl_config): mock_response = MagicMock() mock_response.json.return_value = { - "pairs": ["ETH/BTC", "XRP/BTC", "LTC/BTC", "EOS/BTC"], + "pairs": ["ETH/USDT", "XRP/USDT", "LTC/USDT", "EOS/USDT"], "info": "Mock pairlist response", "refresh_period": 60 } @@ -117,7 +181,7 @@ def test_fetch_pairlist_mock_response_valid(mocker, rpl_config): rpl_config['pairlists'][0], 0) pairs, time_elapsed, info = remote_pairlist.fetch_pairlist() - assert pairs == ["ETH/BTC", "XRP/BTC", "LTC/BTC", "EOS/BTC"] + assert pairs == ["ETH/USDT", "XRP/USDT", "LTC/USDT", "EOS/USDT"] assert time_elapsed == 0.4 assert info == "Mock pairlist response" assert remote_pairlist._refresh_period == 60 From cd1b8b9cee37a0ac412a57af51f02348b40d9565 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Thu, 15 Dec 2022 18:14:37 +0100 Subject: [PATCH 357/421] single space removed for the unit test to pass.. --- tests/plugins/test_remotepairlist.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/plugins/test_remotepairlist.py b/tests/plugins/test_remotepairlist.py index bc4adb616..fc91d3f06 100644 --- a/tests/plugins/test_remotepairlist.py +++ b/tests/plugins/test_remotepairlist.py @@ -108,7 +108,7 @@ def test_fetch_pairlist_timeout_keep_last_pairlist(mocker, rpl_config, caplog): remote_pairlist._last_pairlist = ["BTC/USDT", "ETH/USDT", "LTC/USDT"] pairs, time_elapsed, info = remote_pairlist.fetch_pairlist() - assert log_has(f"Was not able to fetch pairlist from: {remote_pairlist._pairlist_url} ", caplog) + assert log_has(f"Was not able to fetch pairlist from: {remote_pairlist._pairlist_url}", caplog) assert log_has("Keeping last fetched pairlist", caplog) assert pairs == ["BTC/USDT", "ETH/USDT", "LTC/USDT"] From 6fa3db3a1dc79dfd36b121c5b3f41cc8811ad487 Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 15 Dec 2022 19:36:21 +0100 Subject: [PATCH 358/421] Fix failing tests --- tests/plugins/test_pairlist.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/plugins/test_pairlist.py b/tests/plugins/test_pairlist.py index ecc1da3e3..739c3a7ac 100644 --- a/tests/plugins/test_pairlist.py +++ b/tests/plugins/test_pairlist.py @@ -22,6 +22,11 @@ from tests.conftest import (create_mock_trades_usdt, get_patched_exchange, get_p log_has, log_has_re, num_log_has) +# Exclude RemotePairList from tests. +# It has a mandatory parameter, and requires special handling, which happens in test_remotepairlist. +TESTABLE_PAIRLISTS = [p for p in AVAILABLE_PAIRLISTS if p not in ['RemotePairList']] + + @pytest.fixture(scope="function") def whitelist_conf(default_conf): default_conf['stake_currency'] = 'BTC' @@ -824,7 +829,7 @@ def test_pair_whitelist_not_supported_Spread(mocker, default_conf, tickers) -> N get_patched_freqtradebot(mocker, default_conf) -@pytest.mark.parametrize("pairlist", AVAILABLE_PAIRLISTS) +@pytest.mark.parametrize("pairlist", TESTABLE_PAIRLISTS) def test_pairlist_class(mocker, whitelist_conf, markets, pairlist): whitelist_conf['pairlists'][0]['method'] = pairlist mocker.patch.multiple('freqtrade.exchange.Exchange', @@ -839,7 +844,7 @@ def test_pairlist_class(mocker, whitelist_conf, markets, pairlist): assert isinstance(freqtrade.pairlists.blacklist, list) -@pytest.mark.parametrize("pairlist", AVAILABLE_PAIRLISTS) +@pytest.mark.parametrize("pairlist", TESTABLE_PAIRLISTS) @pytest.mark.parametrize("whitelist,log_message", [ (['ETH/BTC', 'TKN/BTC'], ""), # TRX/ETH not in markets @@ -872,7 +877,7 @@ def test__whitelist_for_active_markets(mocker, whitelist_conf, markets, pairlist assert log_message in caplog.text -@pytest.mark.parametrize("pairlist", AVAILABLE_PAIRLISTS) +@pytest.mark.parametrize("pairlist", TESTABLE_PAIRLISTS) def test__whitelist_for_active_markets_empty(mocker, whitelist_conf, pairlist, tickers): whitelist_conf['pairlists'][0]['method'] = pairlist From 32d57f624e06790e4a4ddc4bba493a72ce64ab3c Mon Sep 17 00:00:00 2001 From: Sam Germain Date: Thu, 15 Dec 2022 15:00:27 -0500 Subject: [PATCH 359/421] delisted bibox following ccxt PR https://github.com/ccxt/ccxt/pull/16067 --- freqtrade/exchange/__init__.py | 1 - freqtrade/exchange/bibox.py | 28 ---------------------------- tests/exchange/test_exchange.py | 3 --- 3 files changed, 32 deletions(-) delete mode 100644 freqtrade/exchange/bibox.py diff --git a/freqtrade/exchange/__init__.py b/freqtrade/exchange/__init__.py index 9aed5c507..973ed499b 100644 --- a/freqtrade/exchange/__init__.py +++ b/freqtrade/exchange/__init__.py @@ -3,7 +3,6 @@ from freqtrade.exchange.common import remove_credentials, MAP_EXCHANGE_CHILDCLASS from freqtrade.exchange.exchange import Exchange # isort: on -from freqtrade.exchange.bibox import Bibox from freqtrade.exchange.binance import Binance from freqtrade.exchange.bitpanda import Bitpanda from freqtrade.exchange.bittrex import Bittrex diff --git a/freqtrade/exchange/bibox.py b/freqtrade/exchange/bibox.py deleted file mode 100644 index da1effbfe..000000000 --- a/freqtrade/exchange/bibox.py +++ /dev/null @@ -1,28 +0,0 @@ -""" Bibox exchange subclass """ -import logging -from typing import Dict - -from freqtrade.exchange import Exchange - - -logger = logging.getLogger(__name__) - - -class Bibox(Exchange): - """ - Bibox exchange class. Contains adjustments needed for Freqtrade to work - with this exchange. - - Please note that this exchange is not included in the list of exchanges - officially supported by the Freqtrade development team. So some features - may still not work as expected. - """ - - # fetchCurrencies API point requires authentication for Bibox, - # so switch it off for Freqtrade load_markets() - @property - def _ccxt_config(self) -> Dict: - # Parameters to add directly to ccxt sync/async initialization. - config = {"has": {"fetchCurrencies": False}} - config.update(super()._ccxt_config) - return config diff --git a/tests/exchange/test_exchange.py b/tests/exchange/test_exchange.py index e61ad8532..280e20ff0 100644 --- a/tests/exchange/test_exchange.py +++ b/tests/exchange/test_exchange.py @@ -4014,9 +4014,6 @@ def test_validate_trading_mode_and_margin_mode( ("binance", "spot", {}), ("binance", "margin", {"options": {"defaultType": "margin"}}), ("binance", "futures", {"options": {"defaultType": "future"}}), - ("bibox", "spot", {"has": {"fetchCurrencies": False}}), - ("bibox", "margin", {"has": {"fetchCurrencies": False}, "options": {"defaultType": "margin"}}), - ("bibox", "futures", {"has": {"fetchCurrencies": False}, "options": {"defaultType": "swap"}}), ("bybit", "spot", {"options": {"defaultType": "spot"}}), ("bybit", "futures", {"options": {"defaultType": "linear"}}), ("gateio", "futures", {"options": {"defaultType": "swap"}}), From 935275010f37738efc4667bff608762d89db0559 Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 16 Dec 2022 06:46:44 +0100 Subject: [PATCH 360/421] Remove some unused fixtures --- tests/data/test_dataprovider.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/data/test_dataprovider.py b/tests/data/test_dataprovider.py index 7d61a22be..e0c79d52a 100644 --- a/tests/data/test_dataprovider.py +++ b/tests/data/test_dataprovider.py @@ -144,7 +144,7 @@ def test_available_pairs(mocker, default_conf, ohlcv_history): assert dp.available_pairs == [("XRP/BTC", timeframe), ("UNITTEST/BTC", timeframe), ] -def test_producer_pairs(mocker, default_conf, ohlcv_history): +def test_producer_pairs(default_conf): dataprovider = DataProvider(default_conf, None) producer = "default" @@ -161,7 +161,7 @@ def test_producer_pairs(mocker, default_conf, ohlcv_history): assert dataprovider.get_producer_pairs("bad") == [] -def test_get_producer_df(mocker, default_conf): +def test_get_producer_df(default_conf): dataprovider = DataProvider(default_conf, None) ohlcv_history = generate_test_data('5m', 150) pair = 'BTC/USDT' @@ -221,7 +221,7 @@ def test_emit_df(mocker, default_conf, ohlcv_history): assert send_mock.call_count == 0 -def test_refresh(mocker, default_conf, ohlcv_history): +def test_refresh(mocker, default_conf): refresh_mock = MagicMock() mocker.patch("freqtrade.exchange.Exchange.refresh_latest_ohlcv", refresh_mock) From 36948e2a7480326a4bd786bc6ff72c2eb4c52fd5 Mon Sep 17 00:00:00 2001 From: initrv Date: Fri, 16 Dec 2022 14:14:05 +0300 Subject: [PATCH 361/421] fix base4 env done condition --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index a3ebfdbfa..8f45028b1 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -88,7 +88,8 @@ class Base4ActionRLEnv(BaseEnvironment): {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - if self._total_profit < 1 - self.rl_config.get('max_training_drawdown_pct', 0.8): + if (self._total_profit < self.max_drawdown or + self._total_unrealized_profit < self.max_drawdown): self._done = True self._position_history.append(self._position) From e4284f4e7b1d7f96cb61d113b9496166b732f28c Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 16 Dec 2022 15:20:46 +0100 Subject: [PATCH 362/421] add citation to freqai doc. Update credits --- docs/freqai.md | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/docs/freqai.md b/docs/freqai.md index efa279704..80096996d 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -72,11 +72,25 @@ pip install -r requirements-freqai.txt If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker-compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices. - ### FreqAI position in open-source machine learning landscape Forecasting chaotic time-series based systems, such as equity/cryptocurrency markets, requires a broad set of tools geared toward testing a wide range of hypotheses. Fortunately, a recent maturation of robust machine learning libraries (e.g. `scikit-learn`) has opened up a wide range of research possibilities. Scientists from a diverse range of fields can now easily prototype their studies on an abundance of established machine learning algorithms. Similarly, these user-friendly libraries enable "citzen scientists" to use their basic Python skills for data-exploration. However, leveraging these machine learning libraries on historical and live chaotic data sources can be logistically difficult and expensive. Additionally, robust data-collection, storage, and handling presents a disparate challenge. [`FreqAI`](#freqai) aims to provide a generalized and extensible open-sourced framework geared toward live deployments of adaptive modeling for market forecasting. The `FreqAI` framework is effectively a sandbox for the rich world of open-source machine learning libraries. Inside the `FreqAI` sandbox, users find they can combine a wide variety of third-party libraries to test creative hypotheses on a free live 24/7 chaotic data source - cryptocurrency exchange data. +### Citing FreqAI + +FreqAI is published in the Journal of Open Source Software [link](https://joss.theoj.org/papers/10.21105/joss.04864). If you find FreqAI useful in your research, please use the following citation: + +```bibtex +@article{Caulk2022, + doi = {10.21105/joss.04864}, + url = {https://doi.org/10.21105/joss.04864}, + year = {2022}, publisher = {The Open Journal}, + volume = {7}, number = {80}, pages = {4864}, + author = {Robert A. Caulk and Elin Törnquist and Matthias Voppichler and Andrew R. Lawless and Ryan McMullan and Wagner Costa Santos and Timothy C. Pogue and Johan van der Vlugt and Stefan P. Gehring and Pascal Schmidt}, + title = {FreqAI: generalizing adaptive modeling for chaotic time-series market forecasts}, + journal = {Journal of Open Source Software} } +``` + ## Common pitfalls FreqAI cannot be combined with dynamic `VolumePairlists` (or any pairlist filter that adds and removes pairs dynamically). @@ -99,6 +113,8 @@ Code review and software architecture brainstorming: Software development: Wagner Costa @wagnercosta +Emre Suzen @aemr3 +Timothy Pogue @wizrds Beta testing and bug reporting: -Stefan Gehring @bloodhunter4rc, @longyu, Andrew Lawless @paranoidandy, Pascal Schmidt @smidelis, Ryan McMullan @smarmau, Juha Nykänen @suikula, Johan van der Vlugt @jooopiert, Richárd Józsa @richardjosza, Timothy Pogue @wizrds +Stefan Gehring @bloodhunter4rc, @longyu, Andrew Lawless @paranoidandy, Pascal Schmidt @smidelis, Ryan McMullan @smarmau, Juha Nykänen @suikula, Johan van der Vlugt @jooopiert, Richárd Józsa @richardjosza From dde363343c4932572a013c50df9effddd47282ad Mon Sep 17 00:00:00 2001 From: Emre Date: Fri, 16 Dec 2022 22:16:19 +0300 Subject: [PATCH 363/421] Add can_short param to base env --- freqtrade/freqai/RL/BaseEnvironment.py | 4 +++- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 3 ++- freqtrade/freqai/freqai_interface.py | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 17d82a3ba..ef1c02a3b 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -45,7 +45,7 @@ class BaseEnvironment(gym.Env): def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), reward_kwargs: dict = {}, window_size=10, starting_point=True, id: str = 'baseenv-1', seed: int = 1, config: dict = {}, live: bool = False, - fee: float = 0.0015): + fee: float = 0.0015, can_short: bool = False): """ Initializes the training/eval environment. :param df: dataframe of features @@ -58,6 +58,7 @@ class BaseEnvironment(gym.Env): :param config: Typical user configuration file :param live: Whether or not this environment is active in dry/live/backtesting :param fee: The fee to use for environmental interactions. + :param can_short: Whether or not the environment can short """ self.config = config self.rl_config = config['freqai']['rl_config'] @@ -73,6 +74,7 @@ class BaseEnvironment(gym.Env): # set here to default 5Ac, but all children envs can override this self.actions: Type[Enum] = BaseActions self.tensorboard_metrics: dict = {} + self.can_short = can_short self.live = live if not self.live and self.add_state_info: self.add_state_info = False diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index d7e3a3cad..af0726c0b 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -165,7 +165,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): env_info = {"window_size": self.CONV_WIDTH, "reward_kwargs": self.reward_params, "config": self.config, - "live": self.live} + "live": self.live, + "can_short": self.can_short} if self.data_provider: env_info["fee"] = self.data_provider._exchange \ .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 34780f930..bbae7453f 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -133,6 +133,7 @@ class IFreqaiModel(ABC): self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE) self.dd.set_pair_dict_info(metadata) self.data_provider = strategy.dp + self.can_short = strategy.can_short if self.live: self.inference_timer('start') From 7727f315070c471ea09bc17ad67da5d2cd06e067 Mon Sep 17 00:00:00 2001 From: Emre Date: Fri, 16 Dec 2022 22:18:49 +0300 Subject: [PATCH 364/421] Add 3 Action RL env --- freqtrade/freqai/RL/Base3ActionRLEnv.py | 125 ++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 freqtrade/freqai/RL/Base3ActionRLEnv.py diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py new file mode 100644 index 000000000..3b5fffc58 --- /dev/null +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -0,0 +1,125 @@ +import logging +from enum import Enum + +from gym import spaces + +from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions + + +logger = logging.getLogger(__name__) + + +class Actions(Enum): + Neutral = 0 + Buy = 1 + Sell = 2 + + +class Base3ActionRLEnv(BaseEnvironment): + """ + Base class for a 3 action environment + """ + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.actions = Actions + + def set_action_space(self): + self.action_space = spaces.Discrete(len(Actions)) + + def step(self, action: int): + """ + Logic for a single step (incrementing one candle in time) + by the agent + :param: action: int = the action type that the agent plans + to take for the current step. + :returns: + observation = current state of environment + step_reward = the reward from `calculate_reward()` + _done = if the agent "died" or if the candles finished + info = dict passed back to openai gym lib + """ + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self._update_unrealized_total_profit() + step_reward = self.calculate_reward(action) + self.total_reward += step_reward + self.tensorboard_log(self.actions._member_names_[action]) + + trade_type = None + if self.is_tradesignal(action): + if action == Actions.Buy.value: + if self._position == Positions.Short: + self._update_total_profit() + self._position = Positions.Long + trade_type = "long" + self._last_trade_tick = self._current_tick + elif action == Actions.Sell.value and self.can_short: + if self._position == Positions.Long: + self._update_total_profit() + self._position = Positions.Short + trade_type = "short" + self._last_trade_tick = self._current_tick + elif action == Actions.Sell.value and not self.can_short: + self._update_total_profit() + self._position = Positions.Neutral + trade_type = "neutral" + self._last_trade_tick = None + else: + print("case not defined") + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if (self._total_profit < self.max_drawdown or + self._total_unrealized_profit < self.max_drawdown): + self._done = True + + self._position_history.append(self._position) + + info = dict( + tick=self._current_tick, + action=action, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value, + trade_duration=self.get_trade_duration(), + current_profit_pct=self.get_unrealized_profit() + ) + + observation = self._get_observation() + + self._update_history(info) + + return observation, step_reward, self._done, info + + def is_tradesignal(self, action: int) -> bool: + """ + Determine if the signal is a trade signal + e.g.: agent wants a Actions.Buy while it is in a Positions.short + """ + return ( + (action == Actions.Buy.value and self._position == Positions.Neutral) + or (action == Actions.Sell.value and self._position == Positions.Long) + or (action == Actions.Sell.value and self._position == Positions.Neutral + and self.can_short) + or (action == Actions.Buy.value and self._position == Positions.Short + and self.can_short) + ) + + def _is_valid(self, action: int) -> bool: + """ + Determine if the signal is valid. + e.g.: agent wants a Actions.Sell while it is in a Positions.Long + """ + if self.can_short: + return action in [Actions.Buy.value, Actions.Sell.value, Actions.Neutral.value] + else: + if action == Actions.Sell.value and self._position != Positions.Long: + return False + return True From a8c9aa01fb3c11330618f26efa822bfe9394124e Mon Sep 17 00:00:00 2001 From: Emre Date: Fri, 16 Dec 2022 22:31:44 +0300 Subject: [PATCH 365/421] Add 3ac test --- tests/freqai/test_freqai_interface.py | 5 +- .../ReinforcementLearner_test_3ac.py | 65 +++++++++++++++++++ 2 files changed, 68 insertions(+), 2 deletions(-) create mode 100644 tests/freqai/test_models/ReinforcementLearner_test_3ac.py diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index f19acb018..2c58d4c0a 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -34,6 +34,7 @@ def is_mac() -> bool: ('CatboostRegressor', False, False, False), ('ReinforcementLearner', False, True, False), ('ReinforcementLearner_multiproc', False, False, False), + ('ReinforcementLearner_test_3ac', False, False, False), ('ReinforcementLearner_test_4ac', False, False, False) ]) def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, dbscan, float32): @@ -58,7 +59,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, freqai_conf['freqai']['feature_parameters'].update({"use_SVM_to_remove_outliers": True}) freqai_conf['freqai']['data_split_parameters'].update({'shuffle': True}) - if 'test_4ac' in model: + if 'test_3ac' in model or 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") if 'ReinforcementLearner' in model: @@ -68,7 +69,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, freqai_conf['freqai']['feature_parameters'].update({"use_SVM_to_remove_outliers": True}) freqai_conf['freqai']['data_split_parameters'].update({'shuffle': True}) - if 'test_4ac' in model: + if 'test_3ac' in model or 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") strategy = get_patched_freqai_strategy(mocker, freqai_conf) diff --git a/tests/freqai/test_models/ReinforcementLearner_test_3ac.py b/tests/freqai/test_models/ReinforcementLearner_test_3ac.py new file mode 100644 index 000000000..c267c76a8 --- /dev/null +++ b/tests/freqai/test_models/ReinforcementLearner_test_3ac.py @@ -0,0 +1,65 @@ +import logging + +import numpy as np + +from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner +from freqtrade.freqai.RL.Base3ActionRLEnv import Actions, Base3ActionRLEnv, Positions + + +logger = logging.getLogger(__name__) + + +class ReinforcementLearner_test_3ac(ReinforcementLearner): + """ + User created Reinforcement Learning Model prediction model. + """ + + class MyRLEnv(Base3ActionRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env. Here the user + sets a custom reward based on profit and trade duration. + """ + + def calculate_reward(self, action: int) -> float: + + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100. + + # reward agent for entering trades + if (action in (Actions.Buy.value, Actions.Sell.value) + and self._position == Positions.Neutral): + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick # type: ignore + + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and ( + action == Actions.Neutral.value + or (action == Actions.Sell.value and self._position == Positions.Short) + or (action == Actions.Buy.value and self._position == Positions.Long) + ): + return -1 * trade_duration / max_trade_duration + + # close position + if (action == Actions.Buy.value and self._position == Positions.Short) or ( + action == Actions.Sell.value and self._position == Positions.Long + ): + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config["model_reward_parameters"].get("win_reward_factor", 2) + return float(rew * factor) + + return 0. From e604047158a56fe2e0185fec806b7fa1c465d3fa Mon Sep 17 00:00:00 2001 From: Emre Date: Fri, 16 Dec 2022 22:57:55 +0300 Subject: [PATCH 366/421] Enable RL tests on arm mac --- tests/freqai/test_freqai_interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 2c58d4c0a..15e656776 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -41,7 +41,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, if is_arm() and model == 'CatboostRegressor': pytest.skip("CatBoost is not supported on ARM") - if is_mac() and 'Reinforcement' in model: + if is_mac() and not is_arm() and 'Reinforcement' in model: pytest.skip("Reinforcement learning module not available on intel based Mac OS") model_save_ext = 'joblib' From c293401b22fe582463d14edbb7db75582c831212 Mon Sep 17 00:00:00 2001 From: Emre Date: Fri, 16 Dec 2022 23:19:08 +0300 Subject: [PATCH 367/421] Add can_short to freqai base model --- freqtrade/freqai/freqai_interface.py | 1 + 1 file changed, 1 insertion(+) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index bbae7453f..9025f358a 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -104,6 +104,7 @@ class IFreqaiModel(ABC): self.metadata: Dict[str, Any] = self.dd.load_global_metadata_from_disk() self.data_provider: Optional[DataProvider] = None self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1) + self.can_short = True # overridden in start() with strategy.can_short record_params(config, self.full_path) From 329a0a3f45aa88c34fba9d605e329708a4a0f6b8 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Sat, 17 Dec 2022 18:43:20 +0100 Subject: [PATCH 368/421] Update docs/freqai.md Co-authored-by: Matthias --- docs/freqai.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai.md b/docs/freqai.md index 80096996d..d13d43f66 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -78,7 +78,7 @@ Forecasting chaotic time-series based systems, such as equity/cryptocurrency mar ### Citing FreqAI -FreqAI is published in the Journal of Open Source Software [link](https://joss.theoj.org/papers/10.21105/joss.04864). If you find FreqAI useful in your research, please use the following citation: +FreqAI is [published in the Journal of Open Source Software](https://joss.theoj.org/papers/10.21105/joss.04864). If you find FreqAI useful in your research, please use the following citation: ```bibtex @article{Caulk2022, From bad6fe77d3f388fb1e95b1df9bdf0dd89c6ad373 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 18 Dec 2022 17:21:50 +0100 Subject: [PATCH 369/421] Remove deprecated trade property --- freqtrade/persistence/trade_model.py | 6 ------ tests/test_freqtradebot.py | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/freqtrade/persistence/trade_model.py b/freqtrade/persistence/trade_model.py index 19ba48fcd..186a1e584 100644 --- a/freqtrade/persistence/trade_model.py +++ b/freqtrade/persistence/trade_model.py @@ -397,12 +397,6 @@ class LocalTrade(): def close_date_utc(self): return self.close_date.replace(tzinfo=timezone.utc) - @property - def enter_side(self) -> str: - """ DEPRECATED, please use entry_side instead""" - # TODO: Please remove me after 2022.5 - return self.entry_side - @property def entry_side(self) -> str: if self.is_short: diff --git a/tests/test_freqtradebot.py b/tests/test_freqtradebot.py index faaefcafb..a4431358f 100644 --- a/tests/test_freqtradebot.py +++ b/tests/test_freqtradebot.py @@ -2378,7 +2378,7 @@ def test_close_trade( trade.is_short = is_short assert trade - oobj = Order.parse_from_ccxt_object(enter_order, enter_order['symbol'], trade.enter_side) + oobj = Order.parse_from_ccxt_object(enter_order, enter_order['symbol'], trade.entry_side) trade.update_trade(oobj) oobj = Order.parse_from_ccxt_object(exit_order, exit_order['symbol'], trade.exit_side) trade.update_trade(oobj) From a439488b74f2351df7c70f8030af559e786078f2 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 18 Dec 2022 17:42:05 +0100 Subject: [PATCH 370/421] Add initial trade_object documentation --- docs/strategy-customization.md | 36 ++------- docs/trade-object.md | 132 +++++++++++++++++++++++++++++++++ mkdocs.yml | 1 + 3 files changed, 141 insertions(+), 28 deletions(-) create mode 100644 docs/trade-object.md diff --git a/docs/strategy-customization.md b/docs/strategy-customization.md index 0fb35ce89..462f20402 100644 --- a/docs/strategy-customization.md +++ b/docs/strategy-customization.md @@ -989,38 +989,18 @@ from freqtrade.persistence import Trade The following example queries for the current pair and trades from today, however other filters can easily be added. ``` python -if self.config['runmode'].value in ('live', 'dry_run'): - trades = Trade.get_trades([Trade.pair == metadata['pair'], - Trade.open_date > datetime.utcnow() - timedelta(days=1), - Trade.is_open.is_(False), - ]).order_by(Trade.close_date).all() - # Summarize profit for this pair. - curdayprofit = sum(trade.close_profit for trade in trades) +trades = Trade.get_trades_proxy(pair=metadata['pair'], + open_date=datetime.now(timezone.utc) - timedelta(days=1), + is_open=False, + ]).order_by(Trade.close_date).all() +# Summarize profit for this pair. +curdayprofit = sum(trade.close_profit for trade in trades) ``` -Get amount of stake_currency currently invested in Trades: - -``` python -if self.config['runmode'].value in ('live', 'dry_run'): - total_stakes = Trade.total_open_trades_stakes() -``` - -Retrieve performance per pair. -Returns a List of dicts per pair. - -``` python -if self.config['runmode'].value in ('live', 'dry_run'): - performance = Trade.get_overall_performance() -``` - -Sample return value: ETH/BTC had 5 trades, with a total profit of 1.5% (ratio of 0.015). - -``` json -{"pair": "ETH/BTC", "profit": 0.015, "count": 5} -``` +For a full list of available methods, please consult the [Trade object](trade-object.md) documentation. !!! Warning - Trade history is not available during backtesting or hyperopt. + Trade history is not available in `populate_*` methods during backtesting or hyperopt, and will result in empty results. ## Prevent trades from happening for a specific pair diff --git a/docs/trade-object.md b/docs/trade-object.md new file mode 100644 index 000000000..ff9571047 --- /dev/null +++ b/docs/trade-object.md @@ -0,0 +1,132 @@ +# Trade Object + +## Trade + +A position freqtrade enters is stored in a Trade object - which is persisted to the database. +It's a core concept of Freqtrade - and something you'll come across in many sections of the documentation, which will most likely point you to this location. + +It will be passed to the strategy in many [strategy callbacks](strategy-callbacks.md). The object passed to the strategy cannot be modified. + +## Available attributes + +The following attributes / properties are available for each individual trade - and can be used with `trade.` (e.g. `trade.pair`). + +| Attribute | DataType | Description | +|------------|-------------|-------------| +| `pair`| string | Pair of this trade +| `is_open`| boolean | Is the trade currently open, or has it been concluded +| `open_rate`| float | Rate this trade was entered at (Avg. entry rate in case of trade-adjustments) +| `close_rate`| float | Close rate - only set when is_open = False +| `stake_amount`| float | Amount in Stake (or Quote) currency. +| `amount`| float | Amount in Asset / Base currency that is currently owned. +| `open_date`| datetime | Timestamp when trade was opened **use `open_date_utc` instead** +| `open_date_utc`| datetime | Timestamp when trade was opened - in UTC +| `close_date`| datetime | Timestamp when trade was closed **use `close_date_utc` instead** +| `close_date_utc`| datetime | Timestamp when trade was closed - in UTC +| `close_profit`| float | Relative profit at the time of trade closure. `0.01` == 1% +| `close_profit_abs`| float | Absolute profit (in stake currency) at the time of trade closure. +| `leverage` | float | Leverage used for this trade - defaults to 1.0 in spot markets. +| `enter_tag`| string | Tag provided on entry via the `enter_tag` column in the dataframe +| `is_short` | boolean | True for short trades, False otherwise +| `orders` | Order[] | List of order objects attached to this trade. +| `date_last_filled_utc` | datetime | Time of the last filled order +| `entry_side` | "buy" / "sell" | Order Side the trade was entered +| `exit_side` | "buy" / "sell" | Order Side that will result in a trade exit / position reduction. +| `trade_direction` | "long" / "short" | Trade direction in text - long or short. +| `nr_of_successful_entries` | int | Number of successful (filled) entry orders +| `nr_of_successful_exits` | int | Number of successful (filled) exit orders + +## Class methods + +The following are class methods - which return generic information, and usually result in an explicit query against the database. +They can be used as `Trade.` - e.g. `open_trades = Trade.get_open_trade_count()` + +!!! Warning "Backtesting/hyperopt" + Most methods will work in both backtesting / hyperopt and live/dry modes. + During backtesting, it's limited to usage in [strategy callbacks](strategy-callbacks.md). Usage in `populate_*()` methods is not supported and will result in wrong results. + +### get_trades_proxy + +When your strategy needs some information on existing (open or close) trades - it's best to use `Trade.get_trades_proxy()`. + +Usage: + +``` python +from freqtrade.persistence import Trade +from datetime import timedelta + +# ... +trade_hist = Trade.get_trades_proxy(pair='ETH/USDT', is_open=False, open_date=current_date - timedelta(days=2)) + +``` + +`get_trades_proxy()` supports the following keyword arguments. All arguments are optional - calling `get_trades_proxy()` without arguments will return a list of all trades in the database. + +* `pair` e.g. `pair='ETH/USDT'` +* `is_open` e.g. `is_open=False` +* `open_date` e.g. `open_date=current_date - timedelta(days=2)` +* `close_date` e.g. `close_date=current_date - timedelta(days=5)` + +### get_open_trade_count + +Get the number of currently open trades + +``` python +from freqtrade.persistence import Trade +# ... +open_trades = Trade.get_open_trade_count() +``` + +### get_total_closed_profit + +Retrieve the total profit the bot has generated so far. +Aggregates `close_profit_abs` for all closed trades. + +``` python +from freqtrade.persistence import Trade + +# ... +profit = Trade.get_total_closed_profit() +``` + +### total_open_trades_stakes + +Retrieve the total stake_amount that's currently in trades. + +``` python +from freqtrade.persistence import Trade + +# ... +profit = Trade.total_open_trades_stakes() +``` + +### get_overall_performance + +Retrieve the overall performance - similar to the `/performance` telegram command. + +``` python +from freqtrade.persistence import Trade + +# ... +if self.config['runmode'].value in ('live', 'dry_run'): + performance = Trade.get_overall_performance() +``` + +Sample return value: ETH/BTC had 5 trades, with a total profit of 1.5% (ratio of 0.015). + +``` json +{"pair": "ETH/BTC", "profit": 0.015, "count": 5} +``` + +## Order Object + +An `Order` object represents an order on the exchange (or a simulated order in dry-run mode). +An `Order` object will always be tied to it's corresponding [`Trade`](#trade-object), and only really makes sense in the context of a trade. + +## Available information + +TODO: write me + +| Attribute | DataType | Description | +|------------|-------------|-------------| +TODO: write me diff --git a/mkdocs.yml b/mkdocs.yml index 81f2b7b0b..21fcafbed 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -13,6 +13,7 @@ nav: - Configuration: configuration.md - Strategy Customization: strategy-customization.md - Strategy Callbacks: strategy-callbacks.md + - Trade Object: trade-object.md - Stoploss: stoploss.md - Plugins: plugins.md - Start the bot: bot-usage.md From eda72ef26ce48b759ec6096a1082d393a451e353 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 18 Dec 2022 19:40:49 +0100 Subject: [PATCH 371/421] Add documentation for Order object --- docs/trade-object.md | 74 +++++++++++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 29 deletions(-) diff --git a/docs/trade-object.md b/docs/trade-object.md index ff9571047..7e0db1e3b 100644 --- a/docs/trade-object.md +++ b/docs/trade-object.md @@ -2,39 +2,39 @@ ## Trade -A position freqtrade enters is stored in a Trade object - which is persisted to the database. -It's a core concept of Freqtrade - and something you'll come across in many sections of the documentation, which will most likely point you to this location. +A position freqtrade enters is stored in a `Trade` object - which is persisted to the database. +It's a core concept of freqtrade - and something you'll come across in many sections of the documentation, which will most likely point you to this location. -It will be passed to the strategy in many [strategy callbacks](strategy-callbacks.md). The object passed to the strategy cannot be modified. +It will be passed to the strategy in many [strategy callbacks](strategy-callbacks.md). The object passed to the strategy cannot be modified directly. Indirect modifications may occur based on callback results. -## Available attributes +## Trade - Available attributes The following attributes / properties are available for each individual trade - and can be used with `trade.` (e.g. `trade.pair`). | Attribute | DataType | Description | |------------|-------------|-------------| -| `pair`| string | Pair of this trade -| `is_open`| boolean | Is the trade currently open, or has it been concluded -| `open_rate`| float | Rate this trade was entered at (Avg. entry rate in case of trade-adjustments) -| `close_rate`| float | Close rate - only set when is_open = False -| `stake_amount`| float | Amount in Stake (or Quote) currency. -| `amount`| float | Amount in Asset / Base currency that is currently owned. -| `open_date`| datetime | Timestamp when trade was opened **use `open_date_utc` instead** -| `open_date_utc`| datetime | Timestamp when trade was opened - in UTC -| `close_date`| datetime | Timestamp when trade was closed **use `close_date_utc` instead** -| `close_date_utc`| datetime | Timestamp when trade was closed - in UTC -| `close_profit`| float | Relative profit at the time of trade closure. `0.01` == 1% -| `close_profit_abs`| float | Absolute profit (in stake currency) at the time of trade closure. -| `leverage` | float | Leverage used for this trade - defaults to 1.0 in spot markets. -| `enter_tag`| string | Tag provided on entry via the `enter_tag` column in the dataframe -| `is_short` | boolean | True for short trades, False otherwise -| `orders` | Order[] | List of order objects attached to this trade. -| `date_last_filled_utc` | datetime | Time of the last filled order -| `entry_side` | "buy" / "sell" | Order Side the trade was entered -| `exit_side` | "buy" / "sell" | Order Side that will result in a trade exit / position reduction. -| `trade_direction` | "long" / "short" | Trade direction in text - long or short. -| `nr_of_successful_entries` | int | Number of successful (filled) entry orders -| `nr_of_successful_exits` | int | Number of successful (filled) exit orders +`pair`| string | Pair of this trade +`is_open`| boolean | Is the trade currently open, or has it been concluded +`open_rate`| float | Rate this trade was entered at (Avg. entry rate in case of trade-adjustments) +`close_rate`| float | Close rate - only set when is_open = False +`stake_amount`| float | Amount in Stake (or Quote) currency. +`amount`| float | Amount in Asset / Base currency that is currently owned. +`open_date`| datetime | Timestamp when trade was opened **use `open_date_utc` instead** +`open_date_utc`| datetime | Timestamp when trade was opened - in UTC +`close_date`| datetime | Timestamp when trade was closed **use `close_date_utc` instead** +`close_date_utc`| datetime | Timestamp when trade was closed - in UTC +`close_profit`| float | Relative profit at the time of trade closure. `0.01` == 1% +`close_profit_abs`| float | Absolute profit (in stake currency) at the time of trade closure. +`leverage` | float | Leverage used for this trade - defaults to 1.0 in spot markets. +`enter_tag`| string | Tag provided on entry via the `enter_tag` column in the dataframe +`is_short` | boolean | True for short trades, False otherwise +`orders` | Order[] | List of order objects attached to this trade (includes both filled and cancelled orders) +`date_last_filled_utc` | datetime | Time of the last filled order +`entry_side` | "buy" / "sell" | Order Side the trade was entered +`exit_side` | "buy" / "sell" | Order Side that will result in a trade exit / position reduction. +`trade_direction` | "long" / "short" | Trade direction in text - long or short. +`nr_of_successful_entries` | int | Number of successful (filled) entry orders +`nr_of_successful_exits` | int | Number of successful (filled) exit orders ## Class methods @@ -123,10 +123,26 @@ Sample return value: ETH/BTC had 5 trades, with a total profit of 1.5% (ratio of An `Order` object represents an order on the exchange (or a simulated order in dry-run mode). An `Order` object will always be tied to it's corresponding [`Trade`](#trade-object), and only really makes sense in the context of a trade. -## Available information +### Order - Available attributes -TODO: write me +an Order object is typically attached to a trade. +Most properties here can be None as they are dependant on the exchange response. | Attribute | DataType | Description | |------------|-------------|-------------| -TODO: write me +`trade` | Trade | Trade object this order is attached to +`ft_pair` | string | Pair this order is for +`ft_is_open` | boolean | is the order filled? +`order_type` | string | Order type as defined on the exchange - usually market, limit or stoploss +`status` | string | Status as defined by ccxt. Usually open, closed, expired or canceled +`side` | string | Buy or Sell +`price` | float | Price the order was placed at +`average` | float | Average price the order filled at +`amount` | float | Amount in base currency +`filled` | float | Filled amount (in base currency) +`remaining` | float | Remaining amount +`cost` | float | Cost of the order - usually average * filled +`order_date` | datetime | Order creation date **use `order_date_utc` instead** +`order_date_utc` | datetime | Order creation date (in UTC) +`order_fill_date` | datetime | Order fill date **use `order_fill_utc` instead** +`order_fill_date_utc` | datetime | Order fill date From 1f4cc145c48adf470af21b3022ab0b9c8ae88444 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 18 Dec 2022 20:02:38 +0100 Subject: [PATCH 372/421] Move trade docs to advanced section --- mkdocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mkdocs.yml b/mkdocs.yml index 21fcafbed..c44e4640e 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -13,7 +13,6 @@ nav: - Configuration: configuration.md - Strategy Customization: strategy-customization.md - Strategy Callbacks: strategy-callbacks.md - - Trade Object: trade-object.md - Stoploss: stoploss.md - Plugins: plugins.md - Start the bot: bot-usage.md @@ -42,6 +41,7 @@ nav: - Backtest analysis: advanced-backtesting.md - Advanced Topics: - Advanced Post-installation Tasks: advanced-setup.md + - Trade Object: trade-object.md - Advanced Strategy: strategy-advanced.md - Advanced Hyperopt: advanced-hyperopt.md - Producer/Consumer mode: producer-consumer.md From bb33b96ba7bcf90f442251c9e7e4d44392cec9a2 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Sun, 18 Dec 2022 22:28:12 +0100 Subject: [PATCH 373/421] init cache on first iteration, init checks, limit length and charmap to info replace if invalid, move filter logic --- docs/includes/pairlists.md | 2 +- freqtrade/plugins/pairlist/RemotePairList.py | 87 +++++++++++++------- 2 files changed, 60 insertions(+), 29 deletions(-) diff --git a/docs/includes/pairlists.md b/docs/includes/pairlists.md index 3a6ab7a3c..0bff9b29b 100644 --- a/docs/includes/pairlists.md +++ b/docs/includes/pairlists.md @@ -202,7 +202,7 @@ The user is responsible for providing a server or local file that returns a JSON { "pairs": ["XRP/USDT", "ETH/USDT", "LTC/USDT"], "refresh_period": 1800, - "info": "Pairlist updated on 2022-12-12 at 12:12" + "info": "Pairlist updated on 2022-12-12 at 12:12" // Maximum Length: 256 Characters, Charset: Alphanumeric + "+-.,%:" } ``` diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index e46ac0419..a0e140b42 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -6,7 +6,7 @@ Provides pair list fetched from a remote source import json import logging from pathlib import Path -from typing import Any, Dict, List, Tuple +from typing import Any, Dict, List, Optional, Tuple import requests from cachetools import TTLCache @@ -39,12 +39,13 @@ class RemotePairList(IPairList): 'for "pairlist.config.pairlist_url"') self._number_pairs = self._pairlistconfig['number_assets'] - self._refresh_period = self._pairlistconfig.get('refresh_period', 1800) + self._refresh_period: int = self._pairlistconfig.get('refresh_period', 1800) self._keep_pairlist_on_failure = self._pairlistconfig.get('keep_pairlist_on_failure', True) - self._pair_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period) + self._pair_cache: Optional[TTLCache] = None self._pairlist_url = self._pairlistconfig.get('pairlist_url', '') self._read_timeout = self._pairlistconfig.get('read_timeout', 60) self._bearer_token = self._pairlistconfig.get('bearer_token', '') + self._init_done = False self._last_pairlist: List[Any] = list() @property @@ -62,6 +63,15 @@ class RemotePairList(IPairList): """ return f"{self.name} - {self._pairlistconfig['number_assets']} pairs from RemotePairlist." + def return_last_pairlist(self) -> List[str]: + if self._keep_pairlist_on_failure: + pairlist = self._last_pairlist + self.log_once('Keeping last fetched pairlist', logger.info) + else: + pairlist = [] + + return pairlist + def fetch_pairlist(self) -> Tuple[List[str], float, str]: headers = { @@ -81,23 +91,35 @@ class RemotePairList(IPairList): if "application/json" in str(content_type): jsonparse = response.json() - pairlist = jsonparse['pairs'] - info = jsonparse.get('info', '') - else: - raise OperationalException('RemotePairList is not of type JSON abort ') + pairlist = jsonparse.get('pairs', []) + remote_info = jsonparse.get('info', '')[:256].strip() + remote_refresh_period = jsonparse.get('refresh_period', self._refresh_period) - self._refresh_period = jsonparse.get('refresh_period', self._refresh_period) - self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) + info = "".join(char if char.isalnum() or + char in " +-.,%:" else "-" for char in remote_info) + + if not self._init_done and self._refresh_period < remote_refresh_period: + self.log_once(f'Refresh Period has been increased from {self._refresh_period}' + f' to {remote_refresh_period} from Remote.', logger.info) + + self._refresh_period = remote_refresh_period + self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) + + self._init_done = True + else: + if self._init_done: + self.log_once(f'Error: RemotePairList is not of type JSON: ' + f' {self._pairlist_url}', logger.info) + pairlist = self.return_last_pairlist() + + else: + raise OperationalException('RemotePairList is not of type JSON abort ') except requests.exceptions.RequestException: self.log_once(f'Was not able to fetch pairlist from:' f' {self._pairlist_url}', logger.info) - if self._keep_pairlist_on_failure: - pairlist = self._last_pairlist - self.log_once('Keeping last fetched pairlist', logger.info) - else: - pairlist = [] + pairlist = self.return_last_pairlist() time_elapsed = 0 @@ -110,12 +132,17 @@ class RemotePairList(IPairList): :return: List of pairs """ - pairlist = self._pair_cache.get('pairlist') + if self._init_done and self._pair_cache: + pairlist = self._pair_cache.get('pairlist') + else: + pairlist = [] + time_elapsed = 0.0 if pairlist: # Item found - no refresh necessary return pairlist.copy() + self._init_done = True else: if self._pairlist_url.startswith("file:///"): filename = self._pairlist_url.split("file:///", 1)[1] @@ -127,17 +154,25 @@ class RemotePairList(IPairList): jsonparse = json.load(json_file) pairlist = jsonparse['pairs'] info = jsonparse.get('info', '') - self._refresh_period = jsonparse.get('refresh_period', self._refresh_period) - self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) + if not self._init_done: + self._refresh_period = jsonparse.get('refresh_period', + self._refresh_period) + self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) + self._init_done = True else: raise ValueError(f"{self._pairlist_url} does not exist.") else: # Fetch Pairlist from Remote URL pairlist, time_elapsed, info = self.fetch_pairlist() - pairlist = self.filter_pairlist(pairlist, tickers) - self._pair_cache['pairlist'] = pairlist.copy() + self.log_once(f"Fetched pairs: {pairlist}", logger.debug) + + pairlist = self._whitelist_for_active_markets(pairlist) + pairlist = pairlist[:self._number_pairs] + + if self._pair_cache: + self._pair_cache['pairlist'] = pairlist.copy() if time_elapsed != 0.0: self.log_once(f'{info} Fetched in {time_elapsed} seconds.', logger.info) @@ -145,6 +180,7 @@ class RemotePairList(IPairList): self.log_once(f'{info} Fetched Pairlist.', logger.info) self._last_pairlist = list(pairlist) + return pairlist def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]: @@ -155,12 +191,7 @@ class RemotePairList(IPairList): :param tickers: Tickers (from exchange.get_tickers). May be cached. :return: new whitelist """ - - # Validate whitelist to only have active market pairs - pairlist = self._whitelist_for_active_markets(pairlist) - pairlist = self.verify_blacklist(pairlist, logger.info) - # Limit pairlist to the requested number of pairs - pairlist = pairlist[:self._number_pairs] - self.log_once(f"Searching {self._number_pairs} pairs: {pairlist}", logger.info) - - return pairlist + rpl_pairlist = self.gen_pairlist(tickers) + merged_list = pairlist + rpl_pairlist + merged_list = sorted(set(merged_list), key=merged_list.index) + return merged_list From 6380c3d46205bdb3b29c5bff213cb7b113b93a79 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Sun, 18 Dec 2022 23:37:18 +0100 Subject: [PATCH 374/421] reduce duplicate code, fix cache check --- freqtrade/plugins/pairlist/RemotePairList.py | 54 ++++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index a0e140b42..205ee5742 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -63,6 +63,29 @@ class RemotePairList(IPairList): """ return f"{self.name} - {self._pairlistconfig['number_assets']} pairs from RemotePairlist." + def process_json(self, jsonparse) -> Tuple[List[str], str]: + + pairlist = jsonparse.get('pairs', []) + remote_info = jsonparse.get('info', '')[:256].strip() + remote_refresh_period = jsonparse.get('refresh_period', self._refresh_period) + + info = "".join(char if char.isalnum() or + char in " +-.,%:" else "-" for char in remote_info) + + if not self._init_done: + if self._refresh_period < remote_refresh_period: + self.log_once(f'Refresh Period has been increased from {self._refresh_period}' + f' to {remote_refresh_period} from Remote.', logger.info) + + self._refresh_period = remote_refresh_period + self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) + else: + self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) + + self._init_done = True + + return pairlist, info + def return_last_pairlist(self) -> List[str]: if self._keep_pairlist_on_failure: pairlist = self._last_pairlist @@ -91,27 +114,12 @@ class RemotePairList(IPairList): if "application/json" in str(content_type): jsonparse = response.json() - pairlist = jsonparse.get('pairs', []) - remote_info = jsonparse.get('info', '')[:256].strip() - remote_refresh_period = jsonparse.get('refresh_period', self._refresh_period) - - info = "".join(char if char.isalnum() or - char in " +-.,%:" else "-" for char in remote_info) - - if not self._init_done and self._refresh_period < remote_refresh_period: - self.log_once(f'Refresh Period has been increased from {self._refresh_period}' - f' to {remote_refresh_period} from Remote.', logger.info) - - self._refresh_period = remote_refresh_period - self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) - - self._init_done = True + pairlist, info = self.process_json(jsonparse) else: if self._init_done: self.log_once(f'Error: RemotePairList is not of type JSON: ' f' {self._pairlist_url}', logger.info) pairlist = self.return_last_pairlist() - else: raise OperationalException('RemotePairList is not of type JSON abort ') @@ -132,7 +140,7 @@ class RemotePairList(IPairList): :return: List of pairs """ - if self._init_done and self._pair_cache: + if self._init_done and self._pair_cache is not None: pairlist = self._pair_cache.get('pairlist') else: pairlist = [] @@ -142,7 +150,6 @@ class RemotePairList(IPairList): if pairlist: # Item found - no refresh necessary return pairlist.copy() - self._init_done = True else: if self._pairlist_url.startswith("file:///"): filename = self._pairlist_url.split("file:///", 1)[1] @@ -152,14 +159,7 @@ class RemotePairList(IPairList): with open(filename) as json_file: # Load the JSON data into a dictionary jsonparse = json.load(json_file) - pairlist = jsonparse['pairs'] - info = jsonparse.get('info', '') - - if not self._init_done: - self._refresh_period = jsonparse.get('refresh_period', - self._refresh_period) - self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) - self._init_done = True + pairlist, info = self.process_json(jsonparse) else: raise ValueError(f"{self._pairlist_url} does not exist.") else: @@ -171,7 +171,7 @@ class RemotePairList(IPairList): pairlist = self._whitelist_for_active_markets(pairlist) pairlist = pairlist[:self._number_pairs] - if self._pair_cache: + if self._pair_cache is not None: self._pair_cache['pairlist'] = pairlist.copy() if time_elapsed != 0.0: From b61fc161bfde822c1569696cfa2d5e5d68a198b8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Dec 2022 03:00:43 +0000 Subject: [PATCH 375/421] Bump isort from 5.10.1 to 5.11.3 Bumps [isort](https://github.com/pycqa/isort) from 5.10.1 to 5.11.3. - [Release notes](https://github.com/pycqa/isort/releases) - [Changelog](https://github.com/PyCQA/isort/blob/main/CHANGELOG.md) - [Commits](https://github.com/pycqa/isort/compare/5.10.1...5.11.3) --- updated-dependencies: - dependency-name: isort dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 9de41b273..03129bf07 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -16,7 +16,7 @@ pytest-asyncio==0.20.3 pytest-cov==4.0.0 pytest-mock==3.10.0 pytest-random-order==1.1.0 -isort==5.10.1 +isort==5.11.3 # For datetime mocking time-machine==2.8.2 # fastapi testing From d86885c7f95c9d7eb1ef187cfdec012fa850aba9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Dec 2022 03:00:46 +0000 Subject: [PATCH 376/421] Bump blosc from 1.10.6 to 1.11.1 Bumps [blosc](https://github.com/blosc/python-blosc) from 1.10.6 to 1.11.1. - [Release notes](https://github.com/blosc/python-blosc/releases) - [Changelog](https://github.com/Blosc/python-blosc/blob/main/RELEASE_NOTES.rst) - [Commits](https://github.com/blosc/python-blosc/compare/v1.10.6...v1.11.1) --- updated-dependencies: - dependency-name: blosc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 37f1d31e1..eae0cc7da 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,7 @@ pycoingecko==3.1.0 jinja2==3.1.2 tables==3.7.0 blosc==1.10.6; platform_machine == 'arm64' -blosc==1.11.0; platform_machine != 'arm64' +blosc==1.11.1; platform_machine != 'arm64' joblib==1.2.0 pyarrow==10.0.1; platform_machine != 'armv7l' From 06225b9501b178c67c9cea5481d32298ebf07c4a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Dec 2022 03:00:54 +0000 Subject: [PATCH 377/421] Bump types-python-dateutil from 2.8.19.4 to 2.8.19.5 Bumps [types-python-dateutil](https://github.com/python/typeshed) from 2.8.19.4 to 2.8.19.5. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-python-dateutil dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 9de41b273..e750dd577 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -30,4 +30,4 @@ types-cachetools==5.2.1 types-filelock==3.2.7 types-requests==2.28.11.5 types-tabulate==0.9.0.0 -types-python-dateutil==2.8.19.4 +types-python-dateutil==2.8.19.5 From 7216d140ded9aa54d2655f9d32bc9927dfb9acd6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Dec 2022 03:01:09 +0000 Subject: [PATCH 378/421] Bump torch from 1.13.0 to 1.13.1 Bumps [torch](https://github.com/pytorch/pytorch) from 1.13.0 to 1.13.1. - [Release notes](https://github.com/pytorch/pytorch/releases) - [Changelog](https://github.com/pytorch/pytorch/blob/master/RELEASE.md) - [Commits](https://github.com/pytorch/pytorch/compare/v1.13.0...v1.13.1) --- updated-dependencies: - dependency-name: torch dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-freqai-rl.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index 67bd66102..db8d8d169 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -2,7 +2,7 @@ -r requirements-freqai.txt # Required for freqai-rl -torch==1.13.0 +torch==1.13.1 stable-baselines3==1.6.2 sb3-contrib==1.6.2 # Gym is forced to this version by stable-baselines3. From fa87e080715bf69ce6ac8301e66262aaf8b7425e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Dec 2022 03:01:26 +0000 Subject: [PATCH 379/421] Bump ccxt from 2.2.92 to 2.4.27 Bumps [ccxt](https://github.com/ccxt/ccxt) from 2.2.92 to 2.4.27. - [Release notes](https://github.com/ccxt/ccxt/releases) - [Changelog](https://github.com/ccxt/ccxt/blob/master/exchanges.cfg) - [Commits](https://github.com/ccxt/ccxt/compare/2.2.92...2.4.27) --- updated-dependencies: - dependency-name: ccxt dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 37f1d31e1..bc3b8ad77 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ numpy==1.23.5 pandas==1.5.2 pandas-ta==0.3.14b -ccxt==2.2.92 +ccxt==2.4.27 # Pin cryptography for now due to rust build errors with piwheels cryptography==38.0.1; platform_machine == 'armv7l' cryptography==38.0.4; platform_machine != 'armv7l' From 0c8d657d9202345e3afe17d1bdb3c994337de2b0 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 19 Dec 2022 06:27:38 +0100 Subject: [PATCH 380/421] update types-dateutil precommit --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ccf9d5098..a7e60ce90 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,7 +17,7 @@ repos: - types-filelock==3.2.7 - types-requests==2.28.11.5 - types-tabulate==0.9.0.0 - - types-python-dateutil==2.8.19.4 + - types-python-dateutil==2.8.19.5 # stages: [push] - repo: https://github.com/pycqa/isort From 86b30d2d6648971cd8a60e374bb932ac7ffabbd3 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 19 Dec 2022 07:01:22 +0100 Subject: [PATCH 381/421] Improve emc test resiliancy --- tests/rpc/test_rpc_emc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/rpc/test_rpc_emc.py b/tests/rpc/test_rpc_emc.py index e1537ec9e..26512e30b 100644 --- a/tests/rpc/test_rpc_emc.py +++ b/tests/rpc/test_rpc_emc.py @@ -267,7 +267,7 @@ async def test_emc_create_connection_error(default_conf, caplog, mocker): emc = ExternalMessageConsumer(default_conf, dp) try: - await asyncio.sleep(0.01) + await asyncio.sleep(0.05) assert log_has("Unexpected error has occurred:", caplog) finally: emc.shutdown() From a276ef4b061183e5209497bee9b7cb83a87d8126 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 19 Dec 2022 11:49:31 +0100 Subject: [PATCH 382/421] ensure long only RL is tested --- tests/freqai/test_freqai_interface.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 15e656776..af104f3d2 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -27,17 +27,19 @@ def is_mac() -> bool: return "Darwin" in machine -@pytest.mark.parametrize('model, pca, dbscan, float32', [ - ('LightGBMRegressor', True, False, True), - ('XGBoostRegressor', False, True, False), - ('XGBoostRFRegressor', False, False, False), - ('CatboostRegressor', False, False, False), - ('ReinforcementLearner', False, True, False), - ('ReinforcementLearner_multiproc', False, False, False), - ('ReinforcementLearner_test_3ac', False, False, False), - ('ReinforcementLearner_test_4ac', False, False, False) +@pytest.mark.parametrize('model, pca, dbscan, float32, can_short', [ + ('LightGBMRegressor', True, False, True, True), + ('XGBoostRegressor', False, True, False, True), + ('XGBoostRFRegressor', False, False, False, True), + ('CatboostRegressor', False, False, False, True), + ('ReinforcementLearner', False, True, False, True), + ('ReinforcementLearner_multiproc', False, False, False, True), + ('ReinforcementLearner_test_3ac', False, False, False, False), + ('ReinforcementLearner_test_3ac', False, False, False, True), + ('ReinforcementLearner_test_4ac', False, False, False, True) ]) -def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, dbscan, float32): +def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, + dbscan, float32, can_short): if is_arm() and model == 'CatboostRegressor': pytest.skip("CatBoost is not supported on ARM") @@ -59,9 +61,6 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, freqai_conf['freqai']['feature_parameters'].update({"use_SVM_to_remove_outliers": True}) freqai_conf['freqai']['data_split_parameters'].update({'shuffle': True}) - if 'test_3ac' in model or 'test_4ac' in model: - freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") - if 'ReinforcementLearner' in model: model_save_ext = 'zip' freqai_conf = make_rl_config(freqai_conf) @@ -78,6 +77,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, strategy.freqai_info = freqai_conf.get("freqai", {}) freqai = strategy.freqai freqai.live = True + freqai.can_short = can_short freqai.dk = FreqaiDataKitchen(freqai_conf) freqai.dk.set_paths('ADA/BTC', 10000) timerange = TimeRange.parse_timerange("20180110-20180130") From 5405d8fa6fd425a63c4287574f8a168131772967 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 19 Dec 2022 12:14:53 +0100 Subject: [PATCH 383/421] add discussion and tips for Base3ActionRLEnvironment --- docs/freqai-reinforcement-learning.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index f3d6c97f8..c2bcb75f8 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -275,12 +275,12 @@ FreqAI also provides a built in episodic summary logger called `self.tensorboard ### Choosing a base environment -FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 4 or 5 actions. In the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Meanwhile, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include: +FreqAI provides three base environments, `Base3ActionRLEnvironment`, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 3, 4 or 5 actions. The `Base3ActionEnvironment` is the simplest, the agent can select from hold, long, or short. This environment can also be used for long-only bots (it automatically follows the `can_short` flag from the strategy), where long is the enter condition and short is the exit condition. Meanwhile, in the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Meanwhile, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include: * the actions available in the `calculate_reward` * the actions consumed by the user strategy -Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-a-custom-reward-function)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. +All of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-a-custom-reward-function)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. !!! Note - FreqAI does not provide by default, a long-only training environment. However, creating one should be as simple as copy-pasting one of the built in environments and removing the `short` actions (and all associated references to those). + Only the `Base3ActionRLEnv` can do long-only training/trading (set the user strategy attribute `can_short = False`). From 5b9e3af276b3a7ebc1f9a4df0742f8a0eafa4332 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 19 Dec 2022 12:22:15 +0100 Subject: [PATCH 384/421] improve wording --- docs/freqai-reinforcement-learning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index c2bcb75f8..22772c2ec 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -275,7 +275,7 @@ FreqAI also provides a built in episodic summary logger called `self.tensorboard ### Choosing a base environment -FreqAI provides three base environments, `Base3ActionRLEnvironment`, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 3, 4 or 5 actions. The `Base3ActionEnvironment` is the simplest, the agent can select from hold, long, or short. This environment can also be used for long-only bots (it automatically follows the `can_short` flag from the strategy), where long is the enter condition and short is the exit condition. Meanwhile, in the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Meanwhile, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include: +FreqAI provides three base environments, `Base3ActionRLEnvironment`, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 3, 4 or 5 actions. The `Base3ActionEnvironment` is the simplest, the agent can select from hold, long, or short. This environment can also be used for long-only bots (it automatically follows the `can_short` flag from the strategy), where long is the enter condition and short is the exit condition. Meanwhile, in the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Finally, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include: * the actions available in the `calculate_reward` * the actions consumed by the user strategy From 4bad2b5c042fc754b7fd911ca9e5aff7a530a912 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 19 Dec 2022 13:27:07 +0100 Subject: [PATCH 385/421] Apply suggestions from code review Co-authored-by: Emre --- requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index eae0cc7da..170389247 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,8 +20,7 @@ tabulate==0.9.0 pycoingecko==3.1.0 jinja2==3.1.2 tables==3.7.0 -blosc==1.10.6; platform_machine == 'arm64' -blosc==1.11.1; platform_machine != 'arm64' +blosc==1.11.1 joblib==1.2.0 pyarrow==10.0.1; platform_machine != 'armv7l' From 43f5a16006805d763b59c3cf1f8ff32aee47df66 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Mon, 19 Dec 2022 15:36:28 +0100 Subject: [PATCH 386/421] parse exception handling, remove info, cache change --- docs/includes/pairlists.md | 3 +- freqtrade/plugins/pairlist/RemotePairList.py | 68 +++++++++++--------- tests/plugins/test_remotepairlist.py | 6 +- 3 files changed, 41 insertions(+), 36 deletions(-) diff --git a/docs/includes/pairlists.md b/docs/includes/pairlists.md index 0bff9b29b..5fda038bd 100644 --- a/docs/includes/pairlists.md +++ b/docs/includes/pairlists.md @@ -202,11 +202,10 @@ The user is responsible for providing a server or local file that returns a JSON { "pairs": ["XRP/USDT", "ETH/USDT", "LTC/USDT"], "refresh_period": 1800, - "info": "Pairlist updated on 2022-12-12 at 12:12" // Maximum Length: 256 Characters, Charset: Alphanumeric + "+-.,%:" } ``` -The `pairs` property should contain a list of strings with the trading pairs to be used by the bot. The `refresh_period` property is optional and specifies the number of seconds that the pairlist should be cached before being refreshed. The `info` property is also optional and can be used to provide any additional information about the pairlist. +The `pairs` property should contain a list of strings with the trading pairs to be used by the bot. The `refresh_period` property is optional and specifies the number of seconds that the pairlist should be cached before being refreshed. The optional `keep_pairlist_on_failure` specifies whether the previous received pairlist should be used if the remote server is not reachable or returns an error. The default value is true. diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index 205ee5742..25530457a 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -6,7 +6,7 @@ Provides pair list fetched from a remote source import json import logging from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Tuple import requests from cachetools import TTLCache @@ -41,7 +41,7 @@ class RemotePairList(IPairList): self._number_pairs = self._pairlistconfig['number_assets'] self._refresh_period: int = self._pairlistconfig.get('refresh_period', 1800) self._keep_pairlist_on_failure = self._pairlistconfig.get('keep_pairlist_on_failure', True) - self._pair_cache: Optional[TTLCache] = None + self._pair_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period) self._pairlist_url = self._pairlistconfig.get('pairlist_url', '') self._read_timeout = self._pairlistconfig.get('read_timeout', 60) self._bearer_token = self._pairlistconfig.get('bearer_token', '') @@ -63,28 +63,20 @@ class RemotePairList(IPairList): """ return f"{self.name} - {self._pairlistconfig['number_assets']} pairs from RemotePairlist." - def process_json(self, jsonparse) -> Tuple[List[str], str]: + def process_json(self, jsonparse) -> List[str]: pairlist = jsonparse.get('pairs', []) - remote_info = jsonparse.get('info', '')[:256].strip() - remote_refresh_period = jsonparse.get('refresh_period', self._refresh_period) + remote_refresh_period = int(jsonparse.get('refresh_period', self._refresh_period)) - info = "".join(char if char.isalnum() or - char in " +-.,%:" else "-" for char in remote_info) - - if not self._init_done: - if self._refresh_period < remote_refresh_period: - self.log_once(f'Refresh Period has been increased from {self._refresh_period}' - f' to {remote_refresh_period} from Remote.', logger.info) - - self._refresh_period = remote_refresh_period - self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) - else: - self._pair_cache = TTLCache(maxsize=1, ttl=self._refresh_period) + if self._refresh_period < remote_refresh_period: + self.log_once(f'Refresh Period has been increased from {self._refresh_period}' + f' to minimum allowed: {remote_refresh_period} from Remote.', logger.info) + self._refresh_period = remote_refresh_period + self._pair_cache = TTLCache(maxsize=1, ttl=remote_refresh_period) self._init_done = True - return pairlist, info + return pairlist def return_last_pairlist(self) -> List[str]: if self._keep_pairlist_on_failure: @@ -95,7 +87,7 @@ class RemotePairList(IPairList): return pairlist - def fetch_pairlist(self) -> Tuple[List[str], float, str]: + def fetch_pairlist(self) -> Tuple[List[str], float]: headers = { 'User-Agent': 'Freqtrade/' + __version__ + ' Remotepairlist' @@ -104,8 +96,6 @@ class RemotePairList(IPairList): if self._bearer_token: headers['Authorization'] = f'Bearer {self._bearer_token}' - info = "Pairlist" - try: response = requests.get(self._pairlist_url, headers=headers, timeout=self._read_timeout) @@ -114,7 +104,17 @@ class RemotePairList(IPairList): if "application/json" in str(content_type): jsonparse = response.json() - pairlist, info = self.process_json(jsonparse) + + try: + pairlist = self.process_json(jsonparse) + except Exception as e: + + if self._init_done: + pairlist = self.return_last_pairlist() + logger.warning(f'Error while processing JSON data: {type(e)}') + else: + raise OperationalException(f'Error while processing JSON data: {type(e)}') + else: if self._init_done: self.log_once(f'Error: RemotePairList is not of type JSON: ' @@ -131,7 +131,7 @@ class RemotePairList(IPairList): time_elapsed = 0 - return pairlist, time_elapsed, info + return pairlist, time_elapsed def gen_pairlist(self, tickers: Tickers) -> List[str]: """ @@ -140,7 +140,7 @@ class RemotePairList(IPairList): :return: List of pairs """ - if self._init_done and self._pair_cache is not None: + if self._init_done: pairlist = self._pair_cache.get('pairlist') else: pairlist = [] @@ -159,25 +159,33 @@ class RemotePairList(IPairList): with open(filename) as json_file: # Load the JSON data into a dictionary jsonparse = json.load(json_file) - pairlist, info = self.process_json(jsonparse) + + try: + pairlist = self.process_json(jsonparse) + except Exception as e: + if self._init_done: + pairlist = self.return_last_pairlist() + logger.warning(f'Error while processing JSON data: {type(e)}') + else: + raise OperationalException('Error while processing' + f'JSON data: {type(e)}') else: raise ValueError(f"{self._pairlist_url} does not exist.") else: # Fetch Pairlist from Remote URL - pairlist, time_elapsed, info = self.fetch_pairlist() + pairlist, time_elapsed = self.fetch_pairlist() self.log_once(f"Fetched pairs: {pairlist}", logger.debug) pairlist = self._whitelist_for_active_markets(pairlist) pairlist = pairlist[:self._number_pairs] - if self._pair_cache is not None: - self._pair_cache['pairlist'] = pairlist.copy() + self._pair_cache['pairlist'] = pairlist.copy() if time_elapsed != 0.0: - self.log_once(f'{info} Fetched in {time_elapsed} seconds.', logger.info) + self.log_once(f'Pairlist Fetched in {time_elapsed} seconds.', logger.info) else: - self.log_once(f'{info} Fetched Pairlist.', logger.info) + self.log_once('Fetched Pairlist.', logger.info) self._last_pairlist = list(pairlist) diff --git a/tests/plugins/test_remotepairlist.py b/tests/plugins/test_remotepairlist.py index fc91d3f06..b7a484c92 100644 --- a/tests/plugins/test_remotepairlist.py +++ b/tests/plugins/test_remotepairlist.py @@ -107,7 +107,7 @@ def test_fetch_pairlist_timeout_keep_last_pairlist(mocker, rpl_config, caplog): remote_pairlist._last_pairlist = ["BTC/USDT", "ETH/USDT", "LTC/USDT"] - pairs, time_elapsed, info = remote_pairlist.fetch_pairlist() + pairs, time_elapsed = remote_pairlist.fetch_pairlist() assert log_has(f"Was not able to fetch pairlist from: {remote_pairlist._pairlist_url}", caplog) assert log_has("Keeping last fetched pairlist", caplog) assert pairs == ["BTC/USDT", "ETH/USDT", "LTC/USDT"] @@ -163,7 +163,6 @@ def test_fetch_pairlist_mock_response_valid(mocker, rpl_config): mock_response.json.return_value = { "pairs": ["ETH/USDT", "XRP/USDT", "LTC/USDT", "EOS/USDT"], - "info": "Mock pairlist response", "refresh_period": 60 } @@ -179,9 +178,8 @@ def test_fetch_pairlist_mock_response_valid(mocker, rpl_config): pairlistmanager = PairListManager(exchange, rpl_config) remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, rpl_config['pairlists'][0], 0) - pairs, time_elapsed, info = remote_pairlist.fetch_pairlist() + pairs, time_elapsed = remote_pairlist.fetch_pairlist() assert pairs == ["ETH/USDT", "XRP/USDT", "LTC/USDT", "EOS/USDT"] assert time_elapsed == 0.4 - assert info == "Mock pairlist response" assert remote_pairlist._refresh_period == 60 From ebf60d85da374a24601c8cff3ecd49fc5931fe02 Mon Sep 17 00:00:00 2001 From: Bloodhunter4rc Date: Mon, 19 Dec 2022 16:25:22 +0100 Subject: [PATCH 387/421] self._init_done placed wrong. fixed --- freqtrade/plugins/pairlist/RemotePairList.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index 25530457a..0746f7e6f 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -74,7 +74,8 @@ class RemotePairList(IPairList): self._refresh_period = remote_refresh_period self._pair_cache = TTLCache(maxsize=1, ttl=remote_refresh_period) - self._init_done = True + + self._init_done = True return pairlist From a119fbd895f6a6ef5c33abd128b8d96f0321c520 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 19 Dec 2022 18:19:55 +0100 Subject: [PATCH 388/421] Small error-message finetuning --- freqtrade/plugins/pairlist/RemotePairList.py | 2 +- tests/plugins/test_remotepairlist.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index 0746f7e6f..b54be1fa7 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -122,7 +122,7 @@ class RemotePairList(IPairList): f' {self._pairlist_url}', logger.info) pairlist = self.return_last_pairlist() else: - raise OperationalException('RemotePairList is not of type JSON abort ') + raise OperationalException('RemotePairList is not of type JSON, abort.') except requests.exceptions.RequestException: self.log_once(f'Was not able to fetch pairlist from:' diff --git a/tests/plugins/test_remotepairlist.py b/tests/plugins/test_remotepairlist.py index b7a484c92..ac1d1f5ed 100644 --- a/tests/plugins/test_remotepairlist.py +++ b/tests/plugins/test_remotepairlist.py @@ -81,7 +81,7 @@ def test_fetch_pairlist_mock_response_html(mocker, rpl_config): remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, rpl_config['pairlists'][0], 0) - with pytest.raises(OperationalException, match='RemotePairList is not of type JSON abort'): + with pytest.raises(OperationalException, match='RemotePairList is not of type JSON, abort.'): remote_pairlist.fetch_pairlist() From 5dbd5c235af3fae7b06a6fd1accac099f9fe007a Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 20 Dec 2022 07:21:52 +0100 Subject: [PATCH 389/421] Add endpoint for freqAI models --- freqtrade/rpc/api_server/api_schemas.py | 5 +++++ freqtrade/rpc/api_server/api_v1.py | 26 ++++++++++++++++++------- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/freqtrade/rpc/api_server/api_schemas.py b/freqtrade/rpc/api_server/api_schemas.py index ada20230a..2100a6fe2 100644 --- a/freqtrade/rpc/api_server/api_schemas.py +++ b/freqtrade/rpc/api_server/api_schemas.py @@ -372,6 +372,10 @@ class StrategyListResponse(BaseModel): strategies: List[str] +class FreqAIModelListResponse(BaseModel): + freqaimodels: List[str] + + class StrategyResponse(BaseModel): strategy: str code: str @@ -419,6 +423,7 @@ class BacktestRequest(BaseModel): stake_amount: Optional[str] enable_protections: bool dry_run_wallet: Optional[float] + freqaimodel: Optional[str] class BacktestResponse(BaseModel): diff --git a/freqtrade/rpc/api_server/api_v1.py b/freqtrade/rpc/api_server/api_v1.py index 9e4b140e4..e26df6eea 100644 --- a/freqtrade/rpc/api_server/api_v1.py +++ b/freqtrade/rpc/api_server/api_v1.py @@ -13,12 +13,13 @@ from freqtrade.rpc import RPC from freqtrade.rpc.api_server.api_schemas import (AvailablePairs, Balances, BlacklistPayload, BlacklistResponse, Count, Daily, DeleteLockRequest, DeleteTrade, ForceEnterPayload, - ForceEnterResponse, ForceExitPayload, Health, - Locks, Logs, OpenTradeSchema, PairHistory, - PerformanceEntry, Ping, PlotConfig, Profit, - ResultMsg, ShowConfig, Stats, StatusMsg, - StrategyListResponse, StrategyResponse, SysInfo, - Version, WhitelistResponse) + ForceEnterResponse, ForceExitPayload, + FreqAIModelListResponse, Health, Locks, Logs, + OpenTradeSchema, PairHistory, PerformanceEntry, + Ping, PlotConfig, Profit, ResultMsg, ShowConfig, + Stats, StatusMsg, StrategyListResponse, + StrategyResponse, SysInfo, Version, + WhitelistResponse) from freqtrade.rpc.api_server.deps import get_config, get_exchange, get_rpc, get_rpc_optional from freqtrade.rpc.rpc import RPCException @@ -38,7 +39,8 @@ logger = logging.getLogger(__name__) # 2.17: Forceentry - leverage, partial force_exit # 2.20: Add websocket endpoints # 2.21: Add new_candle messagetype -API_VERSION = 2.21 +# 2.22: Add FreqAI to backtesting +API_VERSION = 2.22 # Public API, requires no auth. router_public = APIRouter() @@ -279,6 +281,16 @@ def get_strategy(strategy: str, config=Depends(get_config)): } +@router.get('/freqaimodels', response_model=FreqAIModelListResponse, tags=['freqai']) +def list_freqaimodels(config=Depends(get_config)): + from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver + strategies = FreqaiModelResolver.search_all_objects( + config, False) + strategies = sorted(strategies, key=lambda x: x['name']) + + return {'freqaimodels': [x['name'] for x in strategies]} + + @router.get('/available_pairs', response_model=AvailablePairs, tags=['candle data']) def list_available_pairs(timeframe: Optional[str] = None, stake_currency: Optional[str] = None, candletype: Optional[CandleType] = None, config=Depends(get_config)): From 256fac2a2b56b7c384fd330eb29828b6661f1702 Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 20 Dec 2022 07:23:41 +0100 Subject: [PATCH 390/421] Add test for freqaimodels endpoint --- tests/rpc/test_rpc_apiserver.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/tests/rpc/test_rpc_apiserver.py b/tests/rpc/test_rpc_apiserver.py index ee067f911..74e620def 100644 --- a/tests/rpc/test_rpc_apiserver.py +++ b/tests/rpc/test_rpc_apiserver.py @@ -1488,6 +1488,33 @@ def test_api_strategy(botclient): assert_response(rc, 500) +def test_api_freqaimodels(botclient, tmpdir): + ftbot, client = botclient + ftbot.config['user_data_dir'] = Path(tmpdir) + + rc = client_get(client, f"{BASE_URI}/freqaimodels") + + assert_response(rc) + + assert rc.json() == {'freqaimodels': [ + 'CatboostClassifier', + 'CatboostClassifierMultiTarget', + 'CatboostRegressor', + 'CatboostRegressorMultiTarget', + 'LightGBMClassifier', + 'LightGBMClassifierMultiTarget', + 'LightGBMRegressor', + 'LightGBMRegressorMultiTarget', + 'ReinforcementLearner', + 'ReinforcementLearner_multiproc', + 'XGBoostClassifier', + 'XGBoostRFClassifier', + 'XGBoostRFRegressor', + 'XGBoostRegressor', + 'XGBoostRegressorMultiTarget' + ]} + + def test_list_available_pairs(botclient): ftbot, client = botclient From 6d9f1fafb7b3105c96d02858adc0d30ef41d62d8 Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 20 Dec 2022 19:20:39 +0100 Subject: [PATCH 391/421] allow backtest_cache to be provided via backtest API --- freqtrade/rpc/api_server/api_schemas.py | 1 + 1 file changed, 1 insertion(+) diff --git a/freqtrade/rpc/api_server/api_schemas.py b/freqtrade/rpc/api_server/api_schemas.py index 2100a6fe2..6c423c959 100644 --- a/freqtrade/rpc/api_server/api_schemas.py +++ b/freqtrade/rpc/api_server/api_schemas.py @@ -423,6 +423,7 @@ class BacktestRequest(BaseModel): stake_amount: Optional[str] enable_protections: bool dry_run_wallet: Optional[float] + backtest_cache: Optional[str] freqaimodel: Optional[str] From 07606a9e2380cc3b86ddf7753c5b05f7abeb37a3 Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 20 Dec 2022 19:32:29 +0100 Subject: [PATCH 392/421] Simplify APi backtest config merging --- freqtrade/rpc/api_server/api_backtest.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/freqtrade/rpc/api_server/api_backtest.py b/freqtrade/rpc/api_server/api_backtest.py index b17636a7d..2d3da6d20 100644 --- a/freqtrade/rpc/api_server/api_backtest.py +++ b/freqtrade/rpc/api_server/api_backtest.py @@ -11,6 +11,7 @@ from freqtrade.configuration.config_validation import validate_config_consistenc from freqtrade.data.btanalysis import get_backtest_resultlist, load_and_merge_backtest_result from freqtrade.enums import BacktestState from freqtrade.exceptions import DependencyException +from freqtrade.misc import deep_merge_dicts from freqtrade.rpc.api_server.api_schemas import (BacktestHistoryEntry, BacktestRequest, BacktestResponse) from freqtrade.rpc.api_server.deps import get_config, is_webserver_mode @@ -38,9 +39,8 @@ async def api_start_backtest(bt_settings: BacktestRequest, background_tasks: Bac btconfig = deepcopy(config) settings = dict(bt_settings) # Pydantic models will contain all keys, but non-provided ones are None - for setting in settings.keys(): - if settings[setting] is not None: - btconfig[setting] = settings[setting] + + btconfig = deep_merge_dicts(settings, btconfig, allow_null_overrides=False) try: btconfig['stake_amount'] = float(btconfig['stake_amount']) except ValueError: From 70531224e61641587a22d1816530f79a824139e7 Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 20 Dec 2022 19:44:01 +0100 Subject: [PATCH 393/421] Allow setting identifier via UI --- freqtrade/rpc/api_server/api_backtest.py | 2 ++ freqtrade/rpc/api_server/api_schemas.py | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/freqtrade/rpc/api_server/api_backtest.py b/freqtrade/rpc/api_server/api_backtest.py index 2d3da6d20..4e43a63b1 100644 --- a/freqtrade/rpc/api_server/api_backtest.py +++ b/freqtrade/rpc/api_server/api_backtest.py @@ -38,6 +38,8 @@ async def api_start_backtest(bt_settings: BacktestRequest, background_tasks: Bac btconfig = deepcopy(config) settings = dict(bt_settings) + if 'freqai' in settings: + settings['freqai'] = dict(settings['freqai']) # Pydantic models will contain all keys, but non-provided ones are None btconfig = deep_merge_dicts(settings, btconfig, allow_null_overrides=False) diff --git a/freqtrade/rpc/api_server/api_schemas.py b/freqtrade/rpc/api_server/api_schemas.py index 6c423c959..17dff222d 100644 --- a/freqtrade/rpc/api_server/api_schemas.py +++ b/freqtrade/rpc/api_server/api_schemas.py @@ -414,6 +414,10 @@ class PairHistory(BaseModel): } +class BacktestFreqAIInputs(BaseModel): + identifier: str + + class BacktestRequest(BaseModel): strategy: str timeframe: Optional[str] @@ -425,6 +429,7 @@ class BacktestRequest(BaseModel): dry_run_wallet: Optional[float] backtest_cache: Optional[str] freqaimodel: Optional[str] + freqai: Optional[BacktestFreqAIInputs] class BacktestResponse(BaseModel): From 73792fd6ce81f004dfa0fbe3129f86a6e9e2c697 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 21 Dec 2022 06:28:55 +0100 Subject: [PATCH 394/421] Don't attempt to convert None to dict --- freqtrade/rpc/api_server/api_backtest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/rpc/api_server/api_backtest.py b/freqtrade/rpc/api_server/api_backtest.py index 4e43a63b1..bc2a40d91 100644 --- a/freqtrade/rpc/api_server/api_backtest.py +++ b/freqtrade/rpc/api_server/api_backtest.py @@ -38,7 +38,7 @@ async def api_start_backtest(bt_settings: BacktestRequest, background_tasks: Bac btconfig = deepcopy(config) settings = dict(bt_settings) - if 'freqai' in settings: + if settings.get('freqai', None) is not None: settings['freqai'] = dict(settings['freqai']) # Pydantic models will contain all keys, but non-provided ones are None From 2a7369b56a7f6063835a65c3d7e00b0f12889da4 Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 23 Dec 2022 07:38:33 +0100 Subject: [PATCH 395/421] fix macos CI --- .github/workflows/ci.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0a787bc47..77432cc9e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -147,7 +147,15 @@ jobs: - name: Installation - macOS if: runner.os == 'macOS' run: | - brew update + # homebrew fails to update python 3.9.1 to 3.9.1.1 due to unlinking failure + rm /usr/local/bin/2to3 || true + # homebrew fails to update python from 3.9 to 3.10 due to another unlinking failure + rm /usr/local/bin/idle3 || true + rm /usr/local/bin/pydoc3 || true + rm /usr/local/bin/python3 || true + rm /usr/local/bin/python3-config || true + # Ignore brew update failures - https://github.com/actions/runner-images/issues/6817 + brew update || true brew install hdf5 c-blosc python -m pip install --upgrade pip wheel export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH From ad0d7c9a9ee0e2bdcbff02d487752bb1ffdcb57d Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 23 Dec 2022 16:09:35 +0100 Subject: [PATCH 396/421] Don't allow DCA trades to go beyond max order size closes #7924 --- freqtrade/freqtradebot.py | 1 + freqtrade/optimize/backtesting.py | 1 + freqtrade/wallets.py | 7 ++++++- tests/test_wallets.py | 26 ++++++++++++++------------ 4 files changed, 22 insertions(+), 13 deletions(-) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index f6c4a52bb..258a45008 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -912,6 +912,7 @@ class FreqtradeBot(LoggingMixin): stake_amount=stake_amount, min_stake_amount=min_stake_amount, max_stake_amount=max_stake_amount, + trade_amount=trade.stake_amount if trade else None, ) return enter_limit_requested, stake_amount, leverage diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index 82aa2b3e9..2b8b96cba 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -769,6 +769,7 @@ class Backtesting: stake_amount=stake_amount, min_stake_amount=min_stake_amount, max_stake_amount=max_stake_amount, + trade_amount=trade.stake_amount if trade else None ) return propose_rate, stake_amount_val, leverage, min_stake_amount diff --git a/freqtrade/wallets.py b/freqtrade/wallets.py index 0a9ecc638..97db3fba5 100644 --- a/freqtrade/wallets.py +++ b/freqtrade/wallets.py @@ -291,12 +291,17 @@ class Wallets: return self._check_available_stake_amount(stake_amount, available_amount) def validate_stake_amount(self, pair: str, stake_amount: Optional[float], - min_stake_amount: Optional[float], max_stake_amount: float): + min_stake_amount: Optional[float], max_stake_amount: float, + trade_amount: Optional[float]): if not stake_amount: logger.debug(f"Stake amount is {stake_amount}, ignoring possible trade for {pair}.") return 0 max_stake_amount = min(max_stake_amount, self.get_available_stake_amount()) + if trade_amount: + # if in a trade, then the resulting trade size cannot go beyond the max stake + # Otherwise we could no longer exit. + max_stake_amount = min(max_stake_amount, max_stake_amount - trade_amount) if min_stake_amount is not None and min_stake_amount > max_stake_amount: if self._log: diff --git a/tests/test_wallets.py b/tests/test_wallets.py index 73a34bbae..0117f7427 100644 --- a/tests/test_wallets.py +++ b/tests/test_wallets.py @@ -180,17 +180,17 @@ def test_get_trade_stake_amount_unlimited_amount(default_conf, ticker, balance_r assert result == 0 -@pytest.mark.parametrize('stake_amount,min_stake,stake_available,max_stake,expected', [ - (22, 11, 50, 10000, 22), - (100, 11, 500, 10000, 100), - (1000, 11, 500, 10000, 500), # Above stake_available - (700, 11, 1000, 400, 400), # Above max_stake, below stake available - (20, 15, 10, 10000, 0), # Minimum stake > stake_available - (9, 11, 100, 10000, 11), # Below min stake - (1, 15, 10, 10000, 0), # Below min stake and min_stake > stake_available - (20, 50, 100, 10000, 0), # Below min stake and stake * 1.3 > min_stake - (1000, None, 1000, 10000, 1000), # No min-stake-amount could be determined - +@pytest.mark.parametrize('stake_amount,min_stake,stake_available,max_stake,trade_amount,expected', [ + (22, 11, 50, 10000, None, 22), + (100, 11, 500, 10000, None, 100), + (1000, 11, 500, 10000, None, 500), # Above stake_available + (700, 11, 1000, 400, None, 400), # Above max_stake, below stake available + (20, 15, 10, 10000, None, 0), # Minimum stake > stake_available + (9, 11, 100, 10000, None, 11), # Below min stake + (1, 15, 10, 10000, None, 0), # Below min stake and min_stake > stake_available + (20, 50, 100, 10000, None, 0), # Below min stake and stake * 1.3 > min_stake + (1000, None, 1000, 10000, None, 1000), # No min-stake-amount could be determined + (2000, 15, 2000, 3000, 1500, 500), # Rebuy - resulting in too high stake amount. Adjusting. ]) def test_validate_stake_amount( mocker, @@ -199,13 +199,15 @@ def test_validate_stake_amount( min_stake, stake_available, max_stake, + trade_amount, expected, ): freqtrade = get_patched_freqtradebot(mocker, default_conf) mocker.patch("freqtrade.wallets.Wallets.get_available_stake_amount", return_value=stake_available) - res = freqtrade.wallets.validate_stake_amount('XRP/USDT', stake_amount, min_stake, max_stake) + res = freqtrade.wallets.validate_stake_amount( + 'XRP/USDT', stake_amount, min_stake, max_stake, trade_amount) assert res == expected From 524da3c7ab1e4ab77d6eb0d0a87226f278e5b870 Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 23 Dec 2022 16:19:12 +0100 Subject: [PATCH 397/421] Don't actually load models to avoid random failures --- tests/rpc/test_rpc_apiserver.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/tests/rpc/test_rpc_apiserver.py b/tests/rpc/test_rpc_apiserver.py index 74e620def..aea8ea059 100644 --- a/tests/rpc/test_rpc_apiserver.py +++ b/tests/rpc/test_rpc_apiserver.py @@ -1488,19 +1488,30 @@ def test_api_strategy(botclient): assert_response(rc, 500) -def test_api_freqaimodels(botclient, tmpdir): +def test_api_freqaimodels(botclient, tmpdir, mocker): ftbot, client = botclient ftbot.config['user_data_dir'] = Path(tmpdir) + mocker.patch( + "freqtrade.resolvers.freqaimodel_resolver.FreqaiModelResolver.search_all_objects", + return_value=[ + {'name': 'LightGBMClassifier'}, + {'name': 'LightGBMClassifierMultiTarget'}, + {'name': 'LightGBMRegressor'}, + {'name': 'LightGBMRegressorMultiTarget'}, + {'name': 'ReinforcementLearner'}, + {'name': 'ReinforcementLearner_multiproc'}, + {'name': 'XGBoostClassifier'}, + {'name': 'XGBoostRFClassifier'}, + {'name': 'XGBoostRFRegressor'}, + {'name': 'XGBoostRegressor'}, + {'name': 'XGBoostRegressorMultiTarget'}, + ]) rc = client_get(client, f"{BASE_URI}/freqaimodels") assert_response(rc) assert rc.json() == {'freqaimodels': [ - 'CatboostClassifier', - 'CatboostClassifierMultiTarget', - 'CatboostRegressor', - 'CatboostRegressorMultiTarget', 'LightGBMClassifier', 'LightGBMClassifierMultiTarget', 'LightGBMRegressor', From ce13ce4b10ec8887b4fb21ccddd942aaca3eee1f Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 25 Dec 2022 09:05:53 +0100 Subject: [PATCH 398/421] Update binance stoploss order types closes #7927 an update to the most recent ccxt version (>2.4.55) would have the same effect. --- freqtrade/exchange/binance.py | 2 +- tests/exchange/test_binance.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/exchange/binance.py b/freqtrade/exchange/binance.py index b21e64eb2..7462e4f81 100644 --- a/freqtrade/exchange/binance.py +++ b/freqtrade/exchange/binance.py @@ -31,7 +31,7 @@ class Binance(Exchange): "ccxt_futures_name": "future" } _ft_has_futures: Dict = { - "stoploss_order_types": {"limit": "limit", "market": "market"}, + "stoploss_order_types": {"limit": "stop", "market": "stop_market"}, "tickers_have_price": False, } diff --git a/tests/exchange/test_binance.py b/tests/exchange/test_binance.py index 1fc8b4153..306a30985 100644 --- a/tests/exchange/test_binance.py +++ b/tests/exchange/test_binance.py @@ -23,7 +23,7 @@ from tests.exchange.test_exchange import ccxt_exceptionhandlers def test_stoploss_order_binance(default_conf, mocker, limitratio, expected, side, trademode): api_mock = MagicMock() order_id = 'test_prod_buy_{}'.format(randint(0, 10 ** 6)) - order_type = 'stop_loss_limit' if trademode == TradingMode.SPOT else 'limit' + order_type = 'stop_loss_limit' if trademode == TradingMode.SPOT else 'stop' api_mock.create_order = MagicMock(return_value={ 'id': order_id, From b1bf6d8dc96390a18dd81ecf63c6f1ee924d29e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Dec 2022 03:00:41 +0000 Subject: [PATCH 399/421] Bump nbconvert from 7.2.6 to 7.2.7 Bumps [nbconvert](https://github.com/jupyter/nbconvert) from 7.2.6 to 7.2.7. - [Release notes](https://github.com/jupyter/nbconvert/releases) - [Changelog](https://github.com/jupyter/nbconvert/blob/main/CHANGELOG.md) - [Commits](https://github.com/jupyter/nbconvert/compare/v7.2.6...v7.2.7) --- updated-dependencies: - dependency-name: nbconvert dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 336351019..3e6226f71 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -23,7 +23,7 @@ time-machine==2.8.2 httpx==0.23.1 # Convert jupyter notebooks to markdown documents -nbconvert==7.2.6 +nbconvert==7.2.7 # mypy types types-cachetools==5.2.1 From e0f60e175f396285d9a9534df09a8ba893dcfbd9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Dec 2022 03:01:00 +0000 Subject: [PATCH 400/421] Bump pre-commit from 2.20.0 to 2.21.0 Bumps [pre-commit](https://github.com/pre-commit/pre-commit) from 2.20.0 to 2.21.0. - [Release notes](https://github.com/pre-commit/pre-commit/releases) - [Changelog](https://github.com/pre-commit/pre-commit/blob/main/CHANGELOG.md) - [Commits](https://github.com/pre-commit/pre-commit/compare/v2.20.0...v2.21.0) --- updated-dependencies: - dependency-name: pre-commit dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 336351019..6ed13397a 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -10,7 +10,7 @@ coveralls==3.3.1 flake8==6.0.0 flake8-tidy-imports==4.8.0 mypy==0.991 -pre-commit==2.20.0 +pre-commit==2.21.0 pytest==7.2.0 pytest-asyncio==0.20.3 pytest-cov==4.0.0 From 3993bd7c1c4b52cde41275585d36b4a434d66202 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Dec 2022 03:01:11 +0000 Subject: [PATCH 401/421] Bump types-requests from 2.28.11.5 to 2.28.11.7 Bumps [types-requests](https://github.com/python/typeshed) from 2.28.11.5 to 2.28.11.7. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-requests dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 336351019..842b1ae76 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -28,6 +28,6 @@ nbconvert==7.2.6 # mypy types types-cachetools==5.2.1 types-filelock==3.2.7 -types-requests==2.28.11.5 +types-requests==2.28.11.7 types-tabulate==0.9.0.0 types-python-dateutil==2.8.19.5 From 9ea8792d3cba24487f2927e1b86808fca56510f7 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 26 Dec 2022 08:45:02 +0100 Subject: [PATCH 402/421] Attempt brew fix --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 77432cc9e..ef85d1bdd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -149,6 +149,7 @@ jobs: run: | # homebrew fails to update python 3.9.1 to 3.9.1.1 due to unlinking failure rm /usr/local/bin/2to3 || true + rm /usr/local/bin/2to3-3.11 || true # homebrew fails to update python from 3.9 to 3.10 due to another unlinking failure rm /usr/local/bin/idle3 || true rm /usr/local/bin/pydoc3 || true From 18709406c5bac070950df2bd076e7f216a8d1158 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 26 Dec 2022 08:50:55 +0100 Subject: [PATCH 403/421] use link overwrite --- .github/workflows/ci.yml | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ef85d1bdd..3fa06951d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -147,16 +147,10 @@ jobs: - name: Installation - macOS if: runner.os == 'macOS' run: | - # homebrew fails to update python 3.9.1 to 3.9.1.1 due to unlinking failure - rm /usr/local/bin/2to3 || true - rm /usr/local/bin/2to3-3.11 || true - # homebrew fails to update python from 3.9 to 3.10 due to another unlinking failure - rm /usr/local/bin/idle3 || true - rm /usr/local/bin/pydoc3 || true - rm /usr/local/bin/python3 || true - rm /usr/local/bin/python3-config || true + brew update + # homebrew fails to update python due to unlinking failures # Ignore brew update failures - https://github.com/actions/runner-images/issues/6817 - brew update || true + brew link --overwrite python@3.10 python@3.11 brew install hdf5 c-blosc python -m pip install --upgrade pip wheel export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH From 9a556d2639e89cc34e21a4f392edcda6cf4d962b Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 26 Dec 2022 08:57:01 +0100 Subject: [PATCH 404/421] Remove all mac conflicts --- .github/workflows/ci.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3fa06951d..608565fdc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -149,8 +149,18 @@ jobs: run: | brew update # homebrew fails to update python due to unlinking failures - # Ignore brew update failures - https://github.com/actions/runner-images/issues/6817 - brew link --overwrite python@3.10 python@3.11 + # https://github.com/actions/runner-images/issues/6817 + rm /usr/local/bin/2to3 || true + rm /usr/local/bin/2to3-3.11 || true + rm /usr/local/bin/idle3 || true + rm /usr/local/bin/idle3.11 || true + rm /usr/local/bin/pydoc3 || true + rm /usr/local/bin/pydoc3.11 || true + rm /usr/local/bin/python3 || true + rm /usr/local/bin/python3.11 || true + rm /usr/local/bin/python3-config || true + rm /usr/local/bin/python3.11-config || true + brew install hdf5 c-blosc python -m pip install --upgrade pip wheel export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH From 00112d81d22bbac6057f20062c32617ef7d581a6 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 26 Dec 2022 09:21:18 +0100 Subject: [PATCH 405/421] Bump types-requests pre-commit --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a7e60ce90..306e4bbda 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,7 +15,7 @@ repos: additional_dependencies: - types-cachetools==5.2.1 - types-filelock==3.2.7 - - types-requests==2.28.11.5 + - types-requests==2.28.11.7 - types-tabulate==0.9.0.0 - types-python-dateutil==2.8.19.5 # stages: [push] From c5b246af8001051903a42f8da5548d3ea128c8d3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Dec 2022 08:24:40 +0000 Subject: [PATCH 406/421] Bump isort from 5.11.3 to 5.11.4 Bumps [isort](https://github.com/pycqa/isort) from 5.11.3 to 5.11.4. - [Release notes](https://github.com/pycqa/isort/releases) - [Changelog](https://github.com/PyCQA/isort/blob/main/CHANGELOG.md) - [Commits](https://github.com/pycqa/isort/compare/5.11.3...5.11.4) --- updated-dependencies: - dependency-name: isort dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 3e6226f71..a3aaeee1e 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -16,7 +16,7 @@ pytest-asyncio==0.20.3 pytest-cov==4.0.0 pytest-mock==3.10.0 pytest-random-order==1.1.0 -isort==5.11.3 +isort==5.11.4 # For datetime mocking time-machine==2.8.2 # fastapi testing From aaeeb86622d7fe98a3c461600bc5d90c8757bdae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Dec 2022 12:40:26 +0000 Subject: [PATCH 407/421] Bump ccxt from 2.4.27 to 2.4.60 Bumps [ccxt](https://github.com/ccxt/ccxt) from 2.4.27 to 2.4.60. - [Release notes](https://github.com/ccxt/ccxt/releases) - [Changelog](https://github.com/ccxt/ccxt/blob/master/exchanges.cfg) - [Commits](https://github.com/ccxt/ccxt/compare/2.4.27...2.4.60) --- updated-dependencies: - dependency-name: ccxt dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index fa689de14..6e0edcb3b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ numpy==1.23.5 pandas==1.5.2 pandas-ta==0.3.14b -ccxt==2.4.27 +ccxt==2.4.60 # Pin cryptography for now due to rust build errors with piwheels cryptography==38.0.1; platform_machine == 'armv7l' cryptography==38.0.4; platform_machine != 'armv7l' From 63f114395ad71ffc9499697057f7a04a1af90ef9 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 26 Dec 2022 14:02:47 +0100 Subject: [PATCH 408/421] is_short should be a boolean --- freqtrade/data/btanalysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/data/btanalysis.py b/freqtrade/data/btanalysis.py index 9bc543a9d..6350aca55 100644 --- a/freqtrade/data/btanalysis.py +++ b/freqtrade/data/btanalysis.py @@ -279,7 +279,7 @@ def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = Non ) # Compatibility support for pre short Columns if 'is_short' not in df.columns: - df['is_short'] = 0 + df['is_short'] = False if 'leverage' not in df.columns: df['leverage'] = 1.0 if 'enter_tag' not in df.columns: From 1cef40a1342cfd798992eee87e45989489b2079d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Dec 2022 14:31:47 +0000 Subject: [PATCH 409/421] Bump numpy from 1.23.5 to 1.24.1 Bumps [numpy](https://github.com/numpy/numpy) from 1.23.5 to 1.24.1. - [Release notes](https://github.com/numpy/numpy/releases) - [Changelog](https://github.com/numpy/numpy/blob/main/doc/RELEASE_WALKTHROUGH.rst) - [Commits](https://github.com/numpy/numpy/compare/v1.23.5...v1.24.1) --- updated-dependencies: - dependency-name: numpy dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6e0edcb3b..90bc4f702 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -numpy==1.23.5 +numpy==1.24.1 pandas==1.5.2 pandas-ta==0.3.14b From 6a15a9b41216a870f8655a53bd78978d81a568a5 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 26 Dec 2022 14:25:45 +0100 Subject: [PATCH 410/421] Update backtest-result_new fixing the calculation of profit_abs - which was incorrect previously. --- tests/data/test_btanalysis.py | 15 ++++++++------- tests/test_plotting.py | 2 +- .../backtest_results/backtest-result_new.json | 2 +- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/tests/data/test_btanalysis.py b/tests/data/test_btanalysis.py index ec7b457ea..95de6b53e 100644 --- a/tests/data/test_btanalysis.py +++ b/tests/data/test_btanalysis.py @@ -268,7 +268,7 @@ def test_create_cum_profit(testdatadir): "cum_profits", timeframe="5m") assert "cum_profits" in cum_profits.columns assert cum_profits.iloc[0]['cum_profits'] == 0 - assert pytest.approx(cum_profits.iloc[-1]['cum_profits']) == 8.723007518796964e-06 + assert pytest.approx(cum_profits.iloc[-1]['cum_profits']) == 9.0225563e-05 def test_create_cum_profit1(testdatadir): @@ -286,7 +286,7 @@ def test_create_cum_profit1(testdatadir): "cum_profits", timeframe="5m") assert "cum_profits" in cum_profits.columns assert cum_profits.iloc[0]['cum_profits'] == 0 - assert pytest.approx(cum_profits.iloc[-1]['cum_profits']) == 8.723007518796964e-06 + assert pytest.approx(cum_profits.iloc[-1]['cum_profits']) == 9.0225563e-05 with pytest.raises(ValueError, match='Trade dataframe empty.'): create_cum_profit(df.set_index('date'), bt_data[bt_data["pair"] == 'NOTAPAIR'], @@ -299,13 +299,13 @@ def test_calculate_max_drawdown(testdatadir): _, hdate, lowdate, hval, lval, drawdown = calculate_max_drawdown( bt_data, value_col="profit_abs") assert isinstance(drawdown, float) - assert pytest.approx(drawdown) == 0.12071099 + assert pytest.approx(drawdown) == 0.29753914 assert isinstance(hdate, Timestamp) assert isinstance(lowdate, Timestamp) assert isinstance(hval, float) assert isinstance(lval, float) - assert hdate == Timestamp('2018-01-25 01:30:00', tz='UTC') - assert lowdate == Timestamp('2018-01-25 03:50:00', tz='UTC') + assert hdate == Timestamp('2018-01-16 19:30:00', tz='UTC') + assert lowdate == Timestamp('2018-01-16 22:25:00', tz='UTC') underwater = calculate_underwater(bt_data) assert isinstance(underwater, DataFrame) @@ -324,8 +324,9 @@ def test_calculate_csum(testdatadir): assert isinstance(csum_min, float) assert isinstance(csum_max, float) - assert csum_min < 0.01 - assert csum_max > 0.02 + assert csum_min < csum_max + assert csum_min < 0.0001 + assert csum_max > 0.0002 csum_min1, csum_max1 = calculate_csum(bt_data, 5) assert csum_min1 == csum_min + 5 diff --git a/tests/test_plotting.py b/tests/test_plotting.py index f13bdee13..64089c4c6 100644 --- a/tests/test_plotting.py +++ b/tests/test_plotting.py @@ -354,7 +354,7 @@ def test_generate_profit_graph(testdatadir): profit = find_trace_in_fig_data(figure.data, "Profit") assert isinstance(profit, go.Scatter) - drawdown = find_trace_in_fig_data(figure.data, "Max drawdown 35.69%") + drawdown = find_trace_in_fig_data(figure.data, "Max drawdown 73.89%") assert isinstance(drawdown, go.Scatter) parallel = find_trace_in_fig_data(figure.data, "Parallel trades") assert isinstance(parallel, go.Scatter) diff --git a/tests/testdata/backtest_results/backtest-result_new.json b/tests/testdata/backtest_results/backtest-result_new.json index 03fdb455a..f16f95c33 100644 --- a/tests/testdata/backtest_results/backtest-result_new.json +++ b/tests/testdata/backtest_results/backtest-result_new.json @@ -1 +1 @@ -{"strategy":{"StrategyTestV3":{"trades":[{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.37344398340249,"open_date":"2018-01-10 07:15:00+00:00","close_date":"2018-01-10 07:20:00+00:00","open_rate":9.64e-05,"close_rate":0.00010074887218045112,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.348872180451118e-06,"exit_reason":"roi","initial_stop_loss_abs":8.676e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.676e-05,"stop_loss_ratio":0.1,"min_rate":9.64e-05,"max_rate":0.00010074887218045112,"is_open":false,"buy_tag":null,"open_timestamp":1515568500000.0,"close_timestamp":1515568800000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":21.026072329688816,"open_date":"2018-01-10 07:15:00+00:00","close_date":"2018-01-10 07:30:00+00:00","open_rate":4.756e-05,"close_rate":4.9705563909774425e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":15,"profit_ratio":0.03990025,"profit_abs":2.1455639097744267e-06,"exit_reason":"roi","initial_stop_loss_abs":4.2804e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.2804e-05,"stop_loss_ratio":0.1,"min_rate":4.756e-05,"max_rate":4.9705563909774425e-05,"is_open":false,"buy_tag":"buy_tag","open_timestamp":1515568500000.0,"close_timestamp":1515569400000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":29.94908655286014,"open_date":"2018-01-10 07:25:00+00:00","close_date":"2018-01-10 07:35:00+00:00","open_rate":3.339e-05,"close_rate":3.489631578947368e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":10,"profit_ratio":0.03990025,"profit_abs":1.506315789473681e-06,"exit_reason":"roi","initial_stop_loss_abs":3.0050999999999997e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.0050999999999997e-05,"stop_loss_ratio":0.1,"min_rate":3.339e-05,"max_rate":3.489631578947368e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515569100000.0,"close_timestamp":1515569700000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.313531353135314,"open_date":"2018-01-10 07:25:00+00:00","close_date":"2018-01-10 07:40:00+00:00","open_rate":9.696e-05,"close_rate":0.00010133413533834584,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":15,"profit_ratio":0.03990025,"profit_abs":4.3741353383458455e-06,"exit_reason":"roi","initial_stop_loss_abs":8.7264e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.7264e-05,"stop_loss_ratio":0.1,"min_rate":9.696e-05,"max_rate":0.00010133413533834584,"is_open":false,"buy_tag":null,"open_timestamp":1515569100000.0,"close_timestamp":1515570000000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010604453870625663,"open_date":"2018-01-10 07:35:00+00:00","close_date":"2018-01-10 08:35:00+00:00","open_rate":0.0943,"close_rate":0.09477268170426063,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.0,"profit_abs":0.0004726817042606385,"exit_reason":"roi","initial_stop_loss_abs":0.08487,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08487,"stop_loss_ratio":0.1,"min_rate":0.0943,"max_rate":0.09477268170426063,"is_open":false,"buy_tag":null,"open_timestamp":1515569700000.0,"close_timestamp":1515573300000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03677001860930642,"open_date":"2018-01-10 07:40:00+00:00","close_date":"2018-01-10 08:10:00+00:00","open_rate":0.02719607,"close_rate":0.02760503345864661,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":0.00040896345864661204,"exit_reason":"roi","initial_stop_loss_abs":0.024476463,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.024476463,"stop_loss_ratio":0.1,"min_rate":0.02719607,"max_rate":0.02760503345864661,"is_open":false,"buy_tag":null,"open_timestamp":1515570000000.0,"close_timestamp":1515571800000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.021575196463739,"open_date":"2018-01-10 08:15:00+00:00","close_date":"2018-01-10 09:55:00+00:00","open_rate":0.04634952,"close_rate":0.046581848421052625,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":100,"profit_ratio":0.0,"profit_abs":0.0002323284210526272,"exit_reason":"roi","initial_stop_loss_abs":0.041714568,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.041714568,"stop_loss_ratio":0.1,"min_rate":0.04634952,"max_rate":0.046581848421052625,"is_open":false,"buy_tag":null,"open_timestamp":1515572100000.0,"close_timestamp":1515578100000.0},{"pair":"NXT/BTC","stake_amount":0.001,"amount":32.615786040443574,"open_date":"2018-01-10 14:45:00+00:00","close_date":"2018-01-10 15:50:00+00:00","open_rate":3.066e-05,"close_rate":3.081368421052631e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":65,"profit_ratio":-0.0,"profit_abs":1.5368421052630647e-07,"exit_reason":"roi","initial_stop_loss_abs":2.7594e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7594e-05,"stop_loss_ratio":0.1,"min_rate":3.066e-05,"max_rate":3.081368421052631e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515595500000.0,"close_timestamp":1515599400000.0},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.05917194776300452,"open_date":"2018-01-10 16:35:00+00:00","close_date":"2018-01-10 17:15:00+00:00","open_rate":0.0168999,"close_rate":0.016984611278195488,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":8.471127819548868e-05,"exit_reason":"roi","initial_stop_loss_abs":0.01520991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.01520991,"stop_loss_ratio":0.1,"min_rate":0.0168999,"max_rate":0.016984611278195488,"is_open":false,"buy_tag":null,"open_timestamp":1515602100000.0,"close_timestamp":1515604500000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010949822656672253,"open_date":"2018-01-10 16:40:00+00:00","close_date":"2018-01-10 17:20:00+00:00","open_rate":0.09132568,"close_rate":0.0917834528320802,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.0004577728320801916,"exit_reason":"roi","initial_stop_loss_abs":0.08219311200000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08219311200000001,"stop_loss_ratio":0.1,"min_rate":0.09132568,"max_rate":0.0917834528320802,"is_open":false,"buy_tag":null,"open_timestamp":1515602400000.0,"close_timestamp":1515604800000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011238476768326556,"open_date":"2018-01-10 18:50:00+00:00","close_date":"2018-01-10 19:45:00+00:00","open_rate":0.08898003,"close_rate":0.08942604518796991,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":0.00044601518796991146,"exit_reason":"roi","initial_stop_loss_abs":0.080082027,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.080082027,"stop_loss_ratio":0.1,"min_rate":0.08898003,"max_rate":0.08942604518796991,"is_open":false,"buy_tag":null,"open_timestamp":1515610200000.0,"close_timestamp":1515613500000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011682232072680307,"open_date":"2018-01-10 22:15:00+00:00","close_date":"2018-01-10 23:00:00+00:00","open_rate":0.08560008,"close_rate":0.08602915308270676,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":0.0,"profit_abs":0.00042907308270676014,"exit_reason":"roi","initial_stop_loss_abs":0.077040072,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.077040072,"stop_loss_ratio":0.1,"min_rate":0.08560008,"max_rate":0.08602915308270676,"is_open":false,"buy_tag":null,"open_timestamp":1515622500000.0,"close_timestamp":1515625200000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.4014726015023105,"open_date":"2018-01-10 22:50:00+00:00","close_date":"2018-01-10 23:20:00+00:00","open_rate":0.00249083,"close_rate":0.0025282860902255634,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":3.745609022556351e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002241747,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002241747,"stop_loss_ratio":0.1,"min_rate":0.00249083,"max_rate":0.0025282860902255634,"is_open":false,"buy_tag":null,"open_timestamp":1515624600000.0,"close_timestamp":1515626400000.0},{"pair":"NXT/BTC","stake_amount":0.001,"amount":33.090668431502316,"open_date":"2018-01-10 23:15:00+00:00","close_date":"2018-01-11 00:15:00+00:00","open_rate":3.022e-05,"close_rate":3.037147869674185e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.0,"profit_abs":1.5147869674185174e-07,"exit_reason":"roi","initial_stop_loss_abs":2.7198e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7198e-05,"stop_loss_ratio":0.1,"min_rate":3.022e-05,"max_rate":3.037147869674185e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515626100000.0,"close_timestamp":1515629700000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.41034058268362744,"open_date":"2018-01-10 23:40:00+00:00","close_date":"2018-01-11 00:05:00+00:00","open_rate":0.002437,"close_rate":0.0024980776942355883,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":6.107769423558838e-05,"exit_reason":"roi","initial_stop_loss_abs":0.0021933,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0021933,"stop_loss_ratio":0.1,"min_rate":0.002437,"max_rate":0.0024980776942355883,"is_open":false,"buy_tag":null,"open_timestamp":1515627600000.0,"close_timestamp":1515629100000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02095643931654345,"open_date":"2018-01-11 00:00:00+00:00","close_date":"2018-01-11 00:35:00+00:00","open_rate":0.04771803,"close_rate":0.04843559436090225,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":0.0007175643609022495,"exit_reason":"roi","initial_stop_loss_abs":0.042946227000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.042946227000000003,"stop_loss_ratio":0.1,"min_rate":0.04771803,"max_rate":0.04843559436090225,"is_open":false,"buy_tag":null,"open_timestamp":1515628800000.0,"close_timestamp":1515630900000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":27.389756231169542,"open_date":"2018-01-11 03:40:00+00:00","close_date":"2018-01-11 04:25:00+00:00","open_rate":3.651e-05,"close_rate":3.2859000000000005e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.10448878,"profit_abs":-3.650999999999996e-06,"exit_reason":"stop_loss","initial_stop_loss_abs":3.2859000000000005e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.2859000000000005e-05,"stop_loss_ratio":0.1,"min_rate":3.2859000000000005e-05,"max_rate":3.651e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515642000000.0,"close_timestamp":1515644700000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011332594070446804,"open_date":"2018-01-11 03:55:00+00:00","close_date":"2018-01-11 04:25:00+00:00","open_rate":0.08824105,"close_rate":0.08956798308270676,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":0.0013269330827067605,"exit_reason":"roi","initial_stop_loss_abs":0.079416945,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.079416945,"stop_loss_ratio":0.1,"min_rate":0.08824105,"max_rate":0.08956798308270676,"is_open":false,"buy_tag":null,"open_timestamp":1515642900000.0,"close_timestamp":1515644700000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.411522633744856,"open_date":"2018-01-11 04:00:00+00:00","close_date":"2018-01-11 04:50:00+00:00","open_rate":0.00243,"close_rate":0.002442180451127819,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":-0.0,"profit_abs":1.2180451127819219e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002187,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002187,"stop_loss_ratio":0.1,"min_rate":0.00243,"max_rate":0.002442180451127819,"is_open":false,"buy_tag":null,"open_timestamp":1515643200000.0,"close_timestamp":1515646200000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.022001890402423376,"open_date":"2018-01-11 04:30:00+00:00","close_date":"2018-01-11 04:55:00+00:00","open_rate":0.04545064,"close_rate":0.046589753784461146,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":0.001139113784461146,"exit_reason":"roi","initial_stop_loss_abs":0.040905576,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.040905576,"stop_loss_ratio":0.1,"min_rate":0.04545064,"max_rate":0.046589753784461146,"is_open":false,"buy_tag":null,"open_timestamp":1515645000000.0,"close_timestamp":1515646500000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":29.655990510083036,"open_date":"2018-01-11 04:30:00+00:00","close_date":"2018-01-11 04:50:00+00:00","open_rate":3.372e-05,"close_rate":3.456511278195488e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":8.4511278195488e-07,"exit_reason":"roi","initial_stop_loss_abs":3.0348e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.0348e-05,"stop_loss_ratio":0.1,"min_rate":3.372e-05,"max_rate":3.456511278195488e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515645000000.0,"close_timestamp":1515646200000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.037821482602118005,"open_date":"2018-01-11 04:55:00+00:00","close_date":"2018-01-11 05:15:00+00:00","open_rate":0.02644,"close_rate":0.02710265664160401,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":0.0006626566416040071,"exit_reason":"roi","initial_stop_loss_abs":0.023796,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.023796,"stop_loss_ratio":0.1,"min_rate":0.02644,"max_rate":0.02710265664160401,"is_open":false,"buy_tag":null,"open_timestamp":1515646500000.0,"close_timestamp":1515647700000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011348161597821153,"open_date":"2018-01-11 11:20:00+00:00","close_date":"2018-01-11 12:00:00+00:00","open_rate":0.08812,"close_rate":0.08856170426065162,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.0004417042606516125,"exit_reason":"roi","initial_stop_loss_abs":0.079308,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.079308,"stop_loss_ratio":0.1,"min_rate":0.08812,"max_rate":0.08856170426065162,"is_open":false,"buy_tag":null,"open_timestamp":1515669600000.0,"close_timestamp":1515672000000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.037263696923919086,"open_date":"2018-01-11 11:35:00+00:00","close_date":"2018-01-11 12:15:00+00:00","open_rate":0.02683577,"close_rate":0.026970285137844607,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.00013451513784460897,"exit_reason":"roi","initial_stop_loss_abs":0.024152193,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.024152193,"stop_loss_ratio":0.1,"min_rate":0.02683577,"max_rate":0.026970285137844607,"is_open":false,"buy_tag":null,"open_timestamp":1515670500000.0,"close_timestamp":1515672900000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":20.329335230737954,"open_date":"2018-01-11 14:00:00+00:00","close_date":"2018-01-11 14:25:00+00:00","open_rate":4.919e-05,"close_rate":5.04228320802005e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":1.232832080200495e-06,"exit_reason":"roi","initial_stop_loss_abs":4.4271000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4271000000000004e-05,"stop_loss_ratio":0.1,"min_rate":4.919e-05,"max_rate":5.04228320802005e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515679200000.0,"close_timestamp":1515680700000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.01138317402960718,"open_date":"2018-01-11 19:25:00+00:00","close_date":"2018-01-11 20:35:00+00:00","open_rate":0.08784896,"close_rate":0.08828930566416039,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":70,"profit_ratio":-0.0,"profit_abs":0.0004403456641603881,"exit_reason":"roi","initial_stop_loss_abs":0.079064064,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.079064064,"stop_loss_ratio":0.1,"min_rate":0.08784896,"max_rate":0.08828930566416039,"is_open":false,"buy_tag":null,"open_timestamp":1515698700000.0,"close_timestamp":1515702900000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.58863858961802,"open_date":"2018-01-11 22:35:00+00:00","close_date":"2018-01-11 23:30:00+00:00","open_rate":5.105e-05,"close_rate":5.130588972431077e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":2.558897243107704e-07,"exit_reason":"roi","initial_stop_loss_abs":4.5945e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.5945e-05,"stop_loss_ratio":0.1,"min_rate":5.105e-05,"max_rate":5.130588972431077e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515710100000.0,"close_timestamp":1515713400000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":25.252525252525253,"open_date":"2018-01-11 22:55:00+00:00","close_date":"2018-01-11 23:25:00+00:00","open_rate":3.96e-05,"close_rate":4.019548872180451e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":5.954887218045116e-07,"exit_reason":"roi","initial_stop_loss_abs":3.5640000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.5640000000000004e-05,"stop_loss_ratio":0.1,"min_rate":3.96e-05,"max_rate":4.019548872180451e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515711300000.0,"close_timestamp":1515713100000.0},{"pair":"NXT/BTC","stake_amount":0.001,"amount":34.66204506065858,"open_date":"2018-01-11 22:55:00+00:00","close_date":"2018-01-11 23:35:00+00:00","open_rate":2.885e-05,"close_rate":2.899461152882205e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":1.4461152882205115e-07,"exit_reason":"roi","initial_stop_loss_abs":2.5965e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.5965e-05,"stop_loss_ratio":0.1,"min_rate":2.885e-05,"max_rate":2.899461152882205e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515711300000.0,"close_timestamp":1515713700000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03780718336483932,"open_date":"2018-01-11 23:30:00+00:00","close_date":"2018-01-12 00:05:00+00:00","open_rate":0.02645,"close_rate":0.026847744360902256,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":0.0003977443609022545,"exit_reason":"roi","initial_stop_loss_abs":0.023805000000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.023805000000000003,"stop_loss_ratio":0.1,"min_rate":0.02645,"max_rate":0.026847744360902256,"is_open":false,"buy_tag":null,"open_timestamp":1515713400000.0,"close_timestamp":1515715500000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.020833333333333332,"open_date":"2018-01-11 23:55:00+00:00","close_date":"2018-01-12 01:15:00+00:00","open_rate":0.048,"close_rate":0.04824060150375939,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.0,"profit_abs":0.00024060150375938838,"exit_reason":"roi","initial_stop_loss_abs":0.0432,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0432,"stop_loss_ratio":0.1,"min_rate":0.048,"max_rate":0.04824060150375939,"is_open":false,"buy_tag":null,"open_timestamp":1515714900000.0,"close_timestamp":1515719700000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":21.31287297527707,"open_date":"2018-01-12 21:15:00+00:00","close_date":"2018-01-12 21:40:00+00:00","open_rate":4.692e-05,"close_rate":4.809593984962405e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":1.1759398496240516e-06,"exit_reason":"roi","initial_stop_loss_abs":4.2227999999999996e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.2227999999999996e-05,"stop_loss_ratio":0.1,"min_rate":4.692e-05,"max_rate":4.809593984962405e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515791700000.0,"close_timestamp":1515793200000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.38915654211062944,"open_date":"2018-01-13 00:55:00+00:00","close_date":"2018-01-13 06:20:00+00:00","open_rate":0.00256966,"close_rate":0.0025825405012531327,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":325,"profit_ratio":-0.0,"profit_abs":1.2880501253132587e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002312694,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002312694,"stop_loss_ratio":0.1,"min_rate":0.00256966,"max_rate":0.0025825405012531327,"is_open":false,"buy_tag":null,"open_timestamp":1515804900000.0,"close_timestamp":1515824400000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":15.96933886937081,"open_date":"2018-01-13 10:55:00+00:00","close_date":"2018-01-13 11:35:00+00:00","open_rate":6.262e-05,"close_rate":6.293388471177944e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":3.138847117794446e-07,"exit_reason":"roi","initial_stop_loss_abs":5.6358e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.6358e-05,"stop_loss_ratio":0.1,"min_rate":6.262e-05,"max_rate":6.293388471177944e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515840900000.0,"close_timestamp":1515843300000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":21.141649048625794,"open_date":"2018-01-13 13:05:00+00:00","close_date":"2018-01-15 14:10:00+00:00","open_rate":4.73e-05,"close_rate":4.753709273182957e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":2945,"profit_ratio":0.0,"profit_abs":2.3709273182957117e-07,"exit_reason":"roi","initial_stop_loss_abs":4.257e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.257e-05,"stop_loss_ratio":0.1,"min_rate":4.73e-05,"max_rate":4.753709273182957e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515848700000.0,"close_timestamp":1516025400000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":16.49348507339601,"open_date":"2018-01-13 13:30:00+00:00","close_date":"2018-01-13 14:45:00+00:00","open_rate":6.063e-05,"close_rate":6.0933909774436085e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":75,"profit_ratio":-0.0,"profit_abs":3.039097744360846e-07,"exit_reason":"roi","initial_stop_loss_abs":5.4567e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.4567e-05,"stop_loss_ratio":0.1,"min_rate":6.063e-05,"max_rate":6.0933909774436085e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515850200000.0,"close_timestamp":1515854700000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":9.023641941887746,"open_date":"2018-01-13 13:40:00+00:00","close_date":"2018-01-13 23:30:00+00:00","open_rate":0.00011082,"close_rate":0.00011137548872180448,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":590,"profit_ratio":-0.0,"profit_abs":5.554887218044781e-07,"exit_reason":"roi","initial_stop_loss_abs":9.9738e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":9.9738e-05,"stop_loss_ratio":0.1,"min_rate":0.00011082,"max_rate":0.00011137548872180448,"is_open":false,"buy_tag":null,"open_timestamp":1515850800000.0,"close_timestamp":1515886200000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":16.863406408094438,"open_date":"2018-01-13 15:15:00+00:00","close_date":"2018-01-13 15:55:00+00:00","open_rate":5.93e-05,"close_rate":5.9597243107769415e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":2.9724310776941686e-07,"exit_reason":"roi","initial_stop_loss_abs":5.337e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.337e-05,"stop_loss_ratio":0.1,"min_rate":5.93e-05,"max_rate":5.9597243107769415e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515856500000.0,"close_timestamp":1515858900000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.020618543947292404,"open_date":"2018-01-13 16:30:00+00:00","close_date":"2018-01-13 17:10:00+00:00","open_rate":0.04850003,"close_rate":0.04874313791979949,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.00024310791979949287,"exit_reason":"roi","initial_stop_loss_abs":0.043650027,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.043650027,"stop_loss_ratio":0.1,"min_rate":0.04850003,"max_rate":0.04874313791979949,"is_open":false,"buy_tag":null,"open_timestamp":1515861000000.0,"close_timestamp":1515863400000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010178097365511457,"open_date":"2018-01-13 22:05:00+00:00","close_date":"2018-01-14 06:25:00+00:00","open_rate":0.09825019,"close_rate":0.09874267215538848,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":500,"profit_ratio":-0.0,"profit_abs":0.0004924821553884823,"exit_reason":"roi","initial_stop_loss_abs":0.088425171,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.088425171,"stop_loss_ratio":0.1,"min_rate":0.09825019,"max_rate":0.09874267215538848,"is_open":false,"buy_tag":null,"open_timestamp":1515881100000.0,"close_timestamp":1515911100000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":16.616816218012627,"open_date":"2018-01-14 00:20:00+00:00","close_date":"2018-01-14 22:55:00+00:00","open_rate":6.018e-05,"close_rate":6.048165413533834e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":1355,"profit_ratio":0.0,"profit_abs":3.0165413533833987e-07,"exit_reason":"roi","initial_stop_loss_abs":5.4162e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.4162e-05,"stop_loss_ratio":0.1,"min_rate":6.018e-05,"max_rate":6.048165413533834e-05,"is_open":false,"buy_tag":null,"open_timestamp":1515889200000.0,"close_timestamp":1515970500000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010246952581919518,"open_date":"2018-01-14 12:45:00+00:00","close_date":"2018-01-14 13:25:00+00:00","open_rate":0.09758999,"close_rate":0.0980791628822055,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.0004891728822054991,"exit_reason":"roi","initial_stop_loss_abs":0.087830991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.087830991,"stop_loss_ratio":0.1,"min_rate":0.09758999,"max_rate":0.0980791628822055,"is_open":false,"buy_tag":null,"open_timestamp":1515933900000.0,"close_timestamp":1515936300000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3215434083601286,"open_date":"2018-01-14 15:30:00+00:00","close_date":"2018-01-14 16:00:00+00:00","open_rate":0.00311,"close_rate":0.0031567669172932328,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":4.676691729323286e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002799,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002799,"stop_loss_ratio":0.1,"min_rate":0.00311,"max_rate":0.0031567669172932328,"is_open":false,"buy_tag":null,"open_timestamp":1515943800000.0,"close_timestamp":1515945600000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.32010140812609433,"open_date":"2018-01-14 20:45:00+00:00","close_date":"2018-01-14 22:15:00+00:00","open_rate":0.00312401,"close_rate":0.003139669197994987,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":90,"profit_ratio":-0.0,"profit_abs":1.5659197994987058e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002811609,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002811609,"stop_loss_ratio":0.1,"min_rate":0.00312401,"max_rate":0.003139669197994987,"is_open":false,"buy_tag":null,"open_timestamp":1515962700000.0,"close_timestamp":1515968100000.0},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.057247866085791646,"open_date":"2018-01-14 23:35:00+00:00","close_date":"2018-01-15 00:30:00+00:00","open_rate":0.0174679,"close_rate":0.017555458395989976,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":0.0,"profit_abs":8.755839598997492e-05,"exit_reason":"roi","initial_stop_loss_abs":0.015721110000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.015721110000000003,"stop_loss_ratio":0.1,"min_rate":0.0174679,"max_rate":0.017555458395989976,"is_open":false,"buy_tag":null,"open_timestamp":1515972900000.0,"close_timestamp":1515976200000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.013611282991367997,"open_date":"2018-01-14 23:45:00+00:00","close_date":"2018-01-15 00:25:00+00:00","open_rate":0.07346846,"close_rate":0.07383672295739348,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.00036826295739347814,"exit_reason":"roi","initial_stop_loss_abs":0.066121614,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.066121614,"stop_loss_ratio":0.1,"min_rate":0.07346846,"max_rate":0.07383672295739348,"is_open":false,"buy_tag":null,"open_timestamp":1515973500000.0,"close_timestamp":1515975900000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010204706410596568,"open_date":"2018-01-15 02:25:00+00:00","close_date":"2018-01-15 03:05:00+00:00","open_rate":0.097994,"close_rate":0.09848519799498744,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.0004911979949874384,"exit_reason":"roi","initial_stop_loss_abs":0.0881946,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0881946,"stop_loss_ratio":0.1,"min_rate":0.097994,"max_rate":0.09848519799498744,"is_open":false,"buy_tag":null,"open_timestamp":1515983100000.0,"close_timestamp":1515985500000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010353038616834042,"open_date":"2018-01-15 07:20:00+00:00","close_date":"2018-01-15 08:00:00+00:00","open_rate":0.09659,"close_rate":0.09707416040100247,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.0004841604010024786,"exit_reason":"roi","initial_stop_loss_abs":0.086931,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.086931,"stop_loss_ratio":0.1,"min_rate":0.09659,"max_rate":0.09707416040100247,"is_open":false,"buy_tag":null,"open_timestamp":1516000800000.0,"close_timestamp":1516003200000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.013016921998599,"open_date":"2018-01-15 08:20:00+00:00","close_date":"2018-01-15 08:55:00+00:00","open_rate":9.987e-05,"close_rate":0.00010137180451127818,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.501804511278178e-06,"exit_reason":"roi","initial_stop_loss_abs":8.9883e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.9883e-05,"stop_loss_ratio":0.1,"min_rate":9.987e-05,"max_rate":0.00010137180451127818,"is_open":false,"buy_tag":null,"open_timestamp":1516004400000.0,"close_timestamp":1516006500000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010537752023511832,"open_date":"2018-01-15 12:10:00+00:00","close_date":"2018-01-16 02:50:00+00:00","open_rate":0.0948969,"close_rate":0.09537257368421052,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":880,"profit_ratio":0.0,"profit_abs":0.0004756736842105175,"exit_reason":"roi","initial_stop_loss_abs":0.08540721000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08540721000000001,"stop_loss_ratio":0.1,"min_rate":0.0948969,"max_rate":0.09537257368421052,"is_open":false,"buy_tag":null,"open_timestamp":1516018200000.0,"close_timestamp":1516071000000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014084507042253523,"open_date":"2018-01-15 14:10:00+00:00","close_date":"2018-01-15 17:40:00+00:00","open_rate":0.071,"close_rate":0.07135588972431077,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":210,"profit_ratio":-0.0,"profit_abs":0.00035588972431077615,"exit_reason":"roi","initial_stop_loss_abs":0.0639,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0639,"stop_loss_ratio":0.1,"min_rate":0.071,"max_rate":0.07135588972431077,"is_open":false,"buy_tag":null,"open_timestamp":1516025400000.0,"close_timestamp":1516038000000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.021736763017766978,"open_date":"2018-01-15 14:30:00+00:00","close_date":"2018-01-15 15:10:00+00:00","open_rate":0.04600501,"close_rate":0.046235611553884705,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.00023060155388470588,"exit_reason":"roi","initial_stop_loss_abs":0.041404509,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.041404509,"stop_loss_ratio":0.1,"min_rate":0.04600501,"max_rate":0.046235611553884705,"is_open":false,"buy_tag":null,"open_timestamp":1516026600000.0,"close_timestamp":1516029000000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.595465140919686,"open_date":"2018-01-15 18:10:00+00:00","close_date":"2018-01-15 19:25:00+00:00","open_rate":9.438e-05,"close_rate":9.485308270676693e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":75,"profit_ratio":-0.0,"profit_abs":4.7308270676692514e-07,"exit_reason":"roi","initial_stop_loss_abs":8.4942e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.4942e-05,"stop_loss_ratio":0.1,"min_rate":9.438e-05,"max_rate":9.485308270676693e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516039800000.0,"close_timestamp":1516044300000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.032894726021471705,"open_date":"2018-01-15 18:35:00+00:00","close_date":"2018-01-15 19:15:00+00:00","open_rate":0.03040001,"close_rate":0.030552391002506264,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.0001523810025062626,"exit_reason":"roi","initial_stop_loss_abs":0.027360009,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.027360009,"stop_loss_ratio":0.1,"min_rate":0.03040001,"max_rate":0.030552391002506264,"is_open":false,"buy_tag":null,"open_timestamp":1516041300000.0,"close_timestamp":1516043700000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.13208840157615,"open_date":"2018-01-15 20:25:00+00:00","close_date":"2018-01-16 08:25:00+00:00","open_rate":5.837e-05,"close_rate":5.2533e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":720,"profit_ratio":-0.10448878,"profit_abs":-5.8369999999999985e-06,"exit_reason":"stop_loss","initial_stop_loss_abs":5.2533e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.2533e-05,"stop_loss_ratio":0.1,"min_rate":5.2533e-05,"max_rate":5.837e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516047900000.0,"close_timestamp":1516091100000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.021722130506560085,"open_date":"2018-01-15 20:40:00+00:00","close_date":"2018-01-15 22:00:00+00:00","open_rate":0.046036,"close_rate":0.04626675689223057,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.0,"profit_abs":0.00023075689223057277,"exit_reason":"roi","initial_stop_loss_abs":0.0414324,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0414324,"stop_loss_ratio":0.1,"min_rate":0.046036,"max_rate":0.04626675689223057,"is_open":false,"buy_tag":null,"open_timestamp":1516048800000.0,"close_timestamp":1516053600000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.34861425832316545,"open_date":"2018-01-16 00:30:00+00:00","close_date":"2018-01-16 01:10:00+00:00","open_rate":0.0028685,"close_rate":0.0028828784461152877,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":1.4378446115287727e-05,"exit_reason":"roi","initial_stop_loss_abs":0.00258165,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.00258165,"stop_loss_ratio":0.1,"min_rate":0.0028685,"max_rate":0.0028828784461152877,"is_open":false,"buy_tag":null,"open_timestamp":1516062600000.0,"close_timestamp":1516065000000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014854967241083492,"open_date":"2018-01-16 01:15:00+00:00","close_date":"2018-01-16 02:35:00+00:00","open_rate":0.06731755,"close_rate":0.0676549813283208,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":0.0,"profit_abs":0.00033743132832080025,"exit_reason":"roi","initial_stop_loss_abs":0.060585795000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060585795000000005,"stop_loss_ratio":0.1,"min_rate":0.06731755,"max_rate":0.0676549813283208,"is_open":false,"buy_tag":null,"open_timestamp":1516065300000.0,"close_timestamp":1516070100000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010848794492804754,"open_date":"2018-01-16 07:45:00+00:00","close_date":"2018-01-16 08:40:00+00:00","open_rate":0.09217614,"close_rate":0.09263817578947368,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":0.0,"profit_abs":0.0004620357894736804,"exit_reason":"roi","initial_stop_loss_abs":0.082958526,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.082958526,"stop_loss_ratio":0.1,"min_rate":0.09217614,"max_rate":0.09263817578947368,"is_open":false,"buy_tag":null,"open_timestamp":1516088700000.0,"close_timestamp":1516092000000.0},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06060606060606061,"open_date":"2018-01-16 08:35:00+00:00","close_date":"2018-01-16 08:55:00+00:00","open_rate":0.0165,"close_rate":0.016913533834586467,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":0.00041353383458646656,"exit_reason":"roi","initial_stop_loss_abs":0.01485,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.01485,"stop_loss_ratio":0.1,"min_rate":0.0165,"max_rate":0.016913533834586467,"is_open":false,"buy_tag":null,"open_timestamp":1516091700000.0,"close_timestamp":1516092900000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":12.57387149503332,"open_date":"2018-01-16 08:35:00+00:00","close_date":"2018-01-16 08:40:00+00:00","open_rate":7.953e-05,"close_rate":8.311781954887218e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":3.587819548872171e-06,"exit_reason":"roi","initial_stop_loss_abs":7.157700000000001e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":7.157700000000001e-05,"stop_loss_ratio":0.1,"min_rate":7.953e-05,"max_rate":8.311781954887218e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516091700000.0,"close_timestamp":1516092000000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.022122914915269236,"open_date":"2018-01-16 08:45:00+00:00","close_date":"2018-01-16 09:50:00+00:00","open_rate":0.045202,"close_rate":0.04542857644110275,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":65,"profit_ratio":-0.0,"profit_abs":0.00022657644110275071,"exit_reason":"roi","initial_stop_loss_abs":0.0406818,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0406818,"stop_loss_ratio":0.1,"min_rate":0.045202,"max_rate":0.04542857644110275,"is_open":false,"buy_tag":null,"open_timestamp":1516092300000.0,"close_timestamp":1516096200000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.054878048780488,"open_date":"2018-01-16 09:15:00+00:00","close_date":"2018-01-16 09:45:00+00:00","open_rate":5.248e-05,"close_rate":5.326917293233082e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":7.891729323308177e-07,"exit_reason":"roi","initial_stop_loss_abs":4.7232e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7232e-05,"stop_loss_ratio":0.1,"min_rate":5.248e-05,"max_rate":5.326917293233082e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516094100000.0,"close_timestamp":1516095900000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03457434486802627,"open_date":"2018-01-16 09:15:00+00:00","close_date":"2018-01-16 09:55:00+00:00","open_rate":0.02892318,"close_rate":0.02906815834586466,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":0.0001449783458646603,"exit_reason":"roi","initial_stop_loss_abs":0.026030862000000002,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.026030862000000002,"stop_loss_ratio":0.1,"min_rate":0.02892318,"max_rate":0.02906815834586466,"is_open":false,"buy_tag":null,"open_timestamp":1516094100000.0,"close_timestamp":1516096500000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.38735944164405,"open_date":"2018-01-16 09:50:00+00:00","close_date":"2018-01-16 10:10:00+00:00","open_rate":5.158e-05,"close_rate":5.287273182957392e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":1.2927318295739246e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6422e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6422e-05,"stop_loss_ratio":0.1,"min_rate":5.158e-05,"max_rate":5.287273182957392e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516096200000.0,"close_timestamp":1516097400000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.035357778286929785,"open_date":"2018-01-16 10:05:00+00:00","close_date":"2018-01-16 10:35:00+00:00","open_rate":0.02828232,"close_rate":0.02870761804511278,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":0.00042529804511277913,"exit_reason":"roi","initial_stop_loss_abs":0.025454088,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025454088,"stop_loss_ratio":0.1,"min_rate":0.02828232,"max_rate":0.02870761804511278,"is_open":false,"buy_tag":null,"open_timestamp":1516097100000.0,"close_timestamp":1516098900000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.022948496230938982,"open_date":"2018-01-16 10:05:00+00:00","close_date":"2018-01-16 10:40:00+00:00","open_rate":0.04357584,"close_rate":0.044231115789473675,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":0.0006552757894736777,"exit_reason":"roi","initial_stop_loss_abs":0.039218256,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.039218256,"stop_loss_ratio":0.1,"min_rate":0.04357584,"max_rate":0.044231115789473675,"is_open":false,"buy_tag":null,"open_timestamp":1516097100000.0,"close_timestamp":1516099200000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.64975755315181,"open_date":"2018-01-16 13:45:00+00:00","close_date":"2018-01-16 14:20:00+00:00","open_rate":5.362e-05,"close_rate":5.442631578947368e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":8.063157894736843e-07,"exit_reason":"roi","initial_stop_loss_abs":4.8258e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.8258e-05,"stop_loss_ratio":0.1,"min_rate":5.362e-05,"max_rate":5.442631578947368e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516110300000.0,"close_timestamp":1516112400000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.86080724254998,"open_date":"2018-01-16 17:30:00+00:00","close_date":"2018-01-16 18:25:00+00:00","open_rate":5.302e-05,"close_rate":5.328576441102756e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":2.6576441102756397e-07,"exit_reason":"roi","initial_stop_loss_abs":4.7718e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7718e-05,"stop_loss_ratio":0.1,"min_rate":5.302e-05,"max_rate":5.328576441102756e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516123800000.0,"close_timestamp":1516127100000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010952903718828448,"open_date":"2018-01-16 18:15:00+00:00","close_date":"2018-01-16 18:45:00+00:00","open_rate":0.09129999,"close_rate":0.09267292218045112,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":0.0013729321804511196,"exit_reason":"roi","initial_stop_loss_abs":0.082169991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.082169991,"stop_loss_ratio":0.1,"min_rate":0.09129999,"max_rate":0.09267292218045112,"is_open":false,"buy_tag":null,"open_timestamp":1516126500000.0,"close_timestamp":1516128300000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":26.26050420168067,"open_date":"2018-01-16 18:15:00+00:00","close_date":"2018-01-16 18:35:00+00:00","open_rate":3.808e-05,"close_rate":3.903438596491228e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":9.543859649122774e-07,"exit_reason":"roi","initial_stop_loss_abs":3.4272e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.4272e-05,"stop_loss_ratio":0.1,"min_rate":3.808e-05,"max_rate":3.903438596491228e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516126500000.0,"close_timestamp":1516127700000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.035574376772493324,"open_date":"2018-01-16 19:00:00+00:00","close_date":"2018-01-16 19:30:00+00:00","open_rate":0.02811012,"close_rate":0.028532828571428567,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":0.00042270857142856846,"exit_reason":"roi","initial_stop_loss_abs":0.025299108,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025299108,"stop_loss_ratio":0.1,"min_rate":0.02811012,"max_rate":0.028532828571428567,"is_open":false,"buy_tag":null,"open_timestamp":1516129200000.0,"close_timestamp":1516131000000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.387028357567759,"open_date":"2018-01-16 21:25:00+00:00","close_date":"2018-01-16 22:25:00+00:00","open_rate":0.00258379,"close_rate":0.002325411,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.10448878,"profit_abs":-0.000258379,"exit_reason":"stop_loss","initial_stop_loss_abs":0.002325411,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002325411,"stop_loss_ratio":0.1,"min_rate":0.002325411,"max_rate":0.00258379,"is_open":false,"buy_tag":null,"open_timestamp":1516137900000.0,"close_timestamp":1516141500000.0},{"pair":"NXT/BTC","stake_amount":0.001,"amount":39.07776475185619,"open_date":"2018-01-16 21:25:00+00:00","close_date":"2018-01-16 22:45:00+00:00","open_rate":2.559e-05,"close_rate":2.3031e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.10448878,"profit_abs":-2.5590000000000004e-06,"exit_reason":"stop_loss","initial_stop_loss_abs":2.3031e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.3031e-05,"stop_loss_ratio":0.1,"min_rate":2.3031e-05,"max_rate":2.559e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516137900000.0,"close_timestamp":1516142700000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":13.123359580052494,"open_date":"2018-01-16 21:35:00+00:00","close_date":"2018-01-16 22:25:00+00:00","open_rate":7.62e-05,"close_rate":6.858e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":-0.10448878,"profit_abs":-7.619999999999998e-06,"exit_reason":"stop_loss","initial_stop_loss_abs":6.858e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":6.858e-05,"stop_loss_ratio":0.1,"min_rate":6.858e-05,"max_rate":7.62e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516138500000.0,"close_timestamp":1516141500000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.4350777048780912,"open_date":"2018-01-16 22:30:00+00:00","close_date":"2018-01-16 22:35:00+00:00","open_rate":0.00229844,"close_rate":0.002402129022556391,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":0.00010368902255639091,"exit_reason":"roi","initial_stop_loss_abs":0.0020685960000000002,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0020685960000000002,"stop_loss_ratio":0.1,"min_rate":0.00229844,"max_rate":0.002402129022556391,"is_open":false,"buy_tag":null,"open_timestamp":1516141800000.0,"close_timestamp":1516142100000.0},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06622516556291391,"open_date":"2018-01-16 22:30:00+00:00","close_date":"2018-01-16 22:40:00+00:00","open_rate":0.0151,"close_rate":0.015781203007518795,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":10,"profit_ratio":0.03990025,"profit_abs":0.0006812030075187946,"exit_reason":"roi","initial_stop_loss_abs":0.013590000000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.013590000000000001,"stop_loss_ratio":0.1,"min_rate":0.0151,"max_rate":0.015781203007518795,"is_open":false,"buy_tag":null,"open_timestamp":1516141800000.0,"close_timestamp":1516142400000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.42431134269081283,"open_date":"2018-01-16 22:40:00+00:00","close_date":"2018-01-16 22:45:00+00:00","open_rate":0.00235676,"close_rate":0.00246308,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":0.00010632000000000003,"exit_reason":"roi","initial_stop_loss_abs":0.002121084,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002121084,"stop_loss_ratio":0.1,"min_rate":0.00235676,"max_rate":0.00246308,"is_open":false,"buy_tag":null,"open_timestamp":1516142400000.0,"close_timestamp":1516142700000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.01585559988076589,"open_date":"2018-01-16 22:45:00+00:00","close_date":"2018-01-16 23:05:00+00:00","open_rate":0.0630692,"close_rate":0.06464988170426066,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":0.0015806817042606502,"exit_reason":"roi","initial_stop_loss_abs":0.056762280000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.056762280000000005,"stop_loss_ratio":0.1,"min_rate":0.0630692,"max_rate":0.06464988170426066,"is_open":false,"buy_tag":null,"open_timestamp":1516142700000.0,"close_timestamp":1516143900000.0},{"pair":"NXT/BTC","stake_amount":0.001,"amount":45.45454545454545,"open_date":"2018-01-16 22:50:00+00:00","close_date":"2018-01-16 22:55:00+00:00","open_rate":2.2e-05,"close_rate":2.299248120300751e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":9.924812030075114e-07,"exit_reason":"roi","initial_stop_loss_abs":1.98e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":1.98e-05,"stop_loss_ratio":0.1,"min_rate":2.2e-05,"max_rate":2.299248120300751e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516143000000.0,"close_timestamp":1516143300000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":20.10454362685967,"open_date":"2018-01-17 03:30:00+00:00","close_date":"2018-01-17 04:00:00+00:00","open_rate":4.974e-05,"close_rate":5.048796992481203e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":7.479699248120277e-07,"exit_reason":"roi","initial_stop_loss_abs":4.4766000000000005e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4766000000000005e-05,"stop_loss_ratio":0.1,"min_rate":4.974e-05,"max_rate":5.048796992481203e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516159800000.0,"close_timestamp":1516161600000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":14.068655036578503,"open_date":"2018-01-17 03:55:00+00:00","close_date":"2018-01-17 04:15:00+00:00","open_rate":7.108e-05,"close_rate":7.28614536340852e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":1.7814536340851996e-06,"exit_reason":"roi","initial_stop_loss_abs":6.3972e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":6.3972e-05,"stop_loss_ratio":0.1,"min_rate":7.108e-05,"max_rate":7.28614536340852e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516161300000.0,"close_timestamp":1516162500000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.0231107002542177,"open_date":"2018-01-17 09:35:00+00:00","close_date":"2018-01-17 10:15:00+00:00","open_rate":0.04327,"close_rate":0.04348689223057644,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.0002168922305764362,"exit_reason":"roi","initial_stop_loss_abs":0.038943000000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.038943000000000005,"stop_loss_ratio":0.1,"min_rate":0.04327,"max_rate":0.04348689223057644,"is_open":false,"buy_tag":null,"open_timestamp":1516181700000.0,"close_timestamp":1516184100000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":20.012007204322593,"open_date":"2018-01-17 10:20:00+00:00","close_date":"2018-01-17 17:00:00+00:00","open_rate":4.997e-05,"close_rate":5.022047619047618e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":400,"profit_ratio":-0.0,"profit_abs":2.504761904761831e-07,"exit_reason":"roi","initial_stop_loss_abs":4.4973e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4973e-05,"stop_loss_ratio":0.1,"min_rate":4.997e-05,"max_rate":5.022047619047618e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516184400000.0,"close_timestamp":1516208400000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014626687444363738,"open_date":"2018-01-17 10:30:00+00:00","close_date":"2018-01-17 11:25:00+00:00","open_rate":0.06836818,"close_rate":0.06871087764411027,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":0.00034269764411026804,"exit_reason":"roi","initial_stop_loss_abs":0.061531362,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.061531362,"stop_loss_ratio":0.1,"min_rate":0.06836818,"max_rate":0.06871087764411027,"is_open":false,"buy_tag":null,"open_timestamp":1516185000000.0,"close_timestamp":1516188300000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":27.548209366391184,"open_date":"2018-01-17 10:30:00+00:00","close_date":"2018-01-17 11:10:00+00:00","open_rate":3.63e-05,"close_rate":3.648195488721804e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":1.8195488721804031e-07,"exit_reason":"roi","initial_stop_loss_abs":3.2670000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.2670000000000004e-05,"stop_loss_ratio":0.1,"min_rate":3.63e-05,"max_rate":3.648195488721804e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516185000000.0,"close_timestamp":1516187400000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03558718861209965,"open_date":"2018-01-17 12:30:00+00:00","close_date":"2018-01-17 22:05:00+00:00","open_rate":0.0281,"close_rate":0.02824085213032581,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":575,"profit_ratio":-0.0,"profit_abs":0.0001408521303258095,"exit_reason":"roi","initial_stop_loss_abs":0.02529,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.02529,"stop_loss_ratio":0.1,"min_rate":0.0281,"max_rate":0.02824085213032581,"is_open":false,"buy_tag":null,"open_timestamp":1516192200000.0,"close_timestamp":1516226700000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011559355963546878,"open_date":"2018-01-17 12:35:00+00:00","close_date":"2018-01-17 16:55:00+00:00","open_rate":0.08651001,"close_rate":0.08694364413533832,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":260,"profit_ratio":-0.0,"profit_abs":0.00043363413533832607,"exit_reason":"roi","initial_stop_loss_abs":0.077859009,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.077859009,"stop_loss_ratio":0.1,"min_rate":0.08651001,"max_rate":0.08694364413533832,"is_open":false,"buy_tag":null,"open_timestamp":1516192500000.0,"close_timestamp":1516208100000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.752529735487308,"open_date":"2018-01-18 05:00:00+00:00","close_date":"2018-01-18 05:55:00+00:00","open_rate":5.633e-05,"close_rate":5.6612355889724306e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":2.8235588972430847e-07,"exit_reason":"roi","initial_stop_loss_abs":5.0697e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0697e-05,"stop_loss_ratio":0.1,"min_rate":5.633e-05,"max_rate":5.6612355889724306e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516251600000.0,"close_timestamp":1516254900000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.01430923457900944,"open_date":"2018-01-18 05:20:00+00:00","close_date":"2018-01-18 05:55:00+00:00","open_rate":0.06988494,"close_rate":0.07093584135338346,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":0.0010509013533834544,"exit_reason":"roi","initial_stop_loss_abs":0.06289644600000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06289644600000001,"stop_loss_ratio":0.1,"min_rate":0.06988494,"max_rate":0.07093584135338346,"is_open":false,"buy_tag":null,"open_timestamp":1516252800000.0,"close_timestamp":1516254900000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.034265103697024,"open_date":"2018-01-18 07:35:00+00:00","close_date":"2018-01-18 08:15:00+00:00","open_rate":5.545e-05,"close_rate":5.572794486215538e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":2.779448621553787e-07,"exit_reason":"roi","initial_stop_loss_abs":4.9905e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.9905e-05,"stop_loss_ratio":0.1,"min_rate":5.545e-05,"max_rate":5.572794486215538e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516260900000.0,"close_timestamp":1516263300000.0},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06121723118136401,"open_date":"2018-01-18 09:00:00+00:00","close_date":"2018-01-18 09:40:00+00:00","open_rate":0.01633527,"close_rate":0.016417151052631574,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":8.188105263157511e-05,"exit_reason":"roi","initial_stop_loss_abs":0.014701743,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014701743,"stop_loss_ratio":0.1,"min_rate":0.01633527,"max_rate":0.016417151052631574,"is_open":false,"buy_tag":null,"open_timestamp":1516266000000.0,"close_timestamp":1516268400000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3707356136045141,"open_date":"2018-01-18 16:40:00+00:00","close_date":"2018-01-18 17:20:00+00:00","open_rate":0.00269734,"close_rate":0.002710860501253133,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":1.3520501253133123e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002427606,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002427606,"stop_loss_ratio":0.1,"min_rate":0.00269734,"max_rate":0.002710860501253133,"is_open":false,"buy_tag":null,"open_timestamp":1516293600000.0,"close_timestamp":1516296000000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":22.346368715083802,"open_date":"2018-01-18 18:05:00+00:00","close_date":"2018-01-18 18:30:00+00:00","open_rate":4.475e-05,"close_rate":4.587155388471177e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":1.1215538847117757e-06,"exit_reason":"roi","initial_stop_loss_abs":4.0274999999999996e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.0274999999999996e-05,"stop_loss_ratio":0.1,"min_rate":4.475e-05,"max_rate":4.587155388471177e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516298700000.0,"close_timestamp":1516300200000.0},{"pair":"NXT/BTC","stake_amount":0.001,"amount":35.842293906810035,"open_date":"2018-01-18 18:25:00+00:00","close_date":"2018-01-18 18:55:00+00:00","open_rate":2.79e-05,"close_rate":2.8319548872180444e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":4.1954887218044365e-07,"exit_reason":"roi","initial_stop_loss_abs":2.5110000000000002e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.5110000000000002e-05,"stop_loss_ratio":0.1,"min_rate":2.79e-05,"max_rate":2.8319548872180444e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516299900000.0,"close_timestamp":1516301700000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.022525942001105574,"open_date":"2018-01-18 20:10:00+00:00","close_date":"2018-01-18 20:50:00+00:00","open_rate":0.04439326,"close_rate":0.04461578260651629,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":0.00022252260651629135,"exit_reason":"roi","initial_stop_loss_abs":0.039953933999999997,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.039953933999999997,"stop_loss_ratio":0.1,"min_rate":0.04439326,"max_rate":0.04461578260651629,"is_open":false,"buy_tag":null,"open_timestamp":1516306200000.0,"close_timestamp":1516308600000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":22.271714922048996,"open_date":"2018-01-18 21:30:00+00:00","close_date":"2018-01-19 00:35:00+00:00","open_rate":4.49e-05,"close_rate":4.51250626566416e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":185,"profit_ratio":0.0,"profit_abs":2.2506265664159932e-07,"exit_reason":"roi","initial_stop_loss_abs":4.041e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.041e-05,"stop_loss_ratio":0.1,"min_rate":4.49e-05,"max_rate":4.51250626566416e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516311000000.0,"close_timestamp":1516322100000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03502626970227671,"open_date":"2018-01-18 21:55:00+00:00","close_date":"2018-01-19 05:05:00+00:00","open_rate":0.02855,"close_rate":0.028693107769423555,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":430,"profit_ratio":-0.0,"profit_abs":0.00014310776942355607,"exit_reason":"roi","initial_stop_loss_abs":0.025695,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025695,"stop_loss_ratio":0.1,"min_rate":0.02855,"max_rate":0.028693107769423555,"is_open":false,"buy_tag":null,"open_timestamp":1516312500000.0,"close_timestamp":1516338300000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.25327812284334,"open_date":"2018-01-18 22:10:00+00:00","close_date":"2018-01-18 22:50:00+00:00","open_rate":5.796e-05,"close_rate":5.8250526315789473e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":2.905263157894727e-07,"exit_reason":"roi","initial_stop_loss_abs":5.2164000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.2164000000000004e-05,"stop_loss_ratio":0.1,"min_rate":5.796e-05,"max_rate":5.8250526315789473e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516313400000.0,"close_timestamp":1516315800000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02303975994413319,"open_date":"2018-01-18 23:50:00+00:00","close_date":"2018-01-19 00:30:00+00:00","open_rate":0.04340323,"close_rate":0.04362079005012531,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":0.0002175600501253122,"exit_reason":"roi","initial_stop_loss_abs":0.039062907,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.039062907,"stop_loss_ratio":0.1,"min_rate":0.04340323,"max_rate":0.04362079005012531,"is_open":false,"buy_tag":null,"open_timestamp":1516319400000.0,"close_timestamp":1516321800000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02244943545282195,"open_date":"2018-01-19 16:45:00+00:00","close_date":"2018-01-19 17:35:00+00:00","open_rate":0.04454455,"close_rate":0.04476783095238095,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":0.0,"profit_abs":0.0002232809523809512,"exit_reason":"roi","initial_stop_loss_abs":0.040090095000000006,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.040090095000000006,"stop_loss_ratio":0.1,"min_rate":0.04454455,"max_rate":0.04476783095238095,"is_open":false,"buy_tag":null,"open_timestamp":1516380300000.0,"close_timestamp":1516383300000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.793594306049823,"open_date":"2018-01-19 17:15:00+00:00","close_date":"2018-01-19 19:55:00+00:00","open_rate":5.62e-05,"close_rate":5.648170426065162e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":160,"profit_ratio":-0.0,"profit_abs":2.817042606516199e-07,"exit_reason":"roi","initial_stop_loss_abs":5.058e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.058e-05,"stop_loss_ratio":0.1,"min_rate":5.62e-05,"max_rate":5.648170426065162e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516382100000.0,"close_timestamp":1516391700000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":23.046784973496194,"open_date":"2018-01-19 17:20:00+00:00","close_date":"2018-01-19 20:15:00+00:00","open_rate":4.339e-05,"close_rate":4.360749373433584e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":175,"profit_ratio":-0.0,"profit_abs":2.174937343358337e-07,"exit_reason":"roi","initial_stop_loss_abs":3.9051e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.9051e-05,"stop_loss_ratio":0.1,"min_rate":4.339e-05,"max_rate":4.360749373433584e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516382400000.0,"close_timestamp":1516392900000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":9.910802775024777,"open_date":"2018-01-20 04:45:00+00:00","close_date":"2018-01-20 17:35:00+00:00","open_rate":0.0001009,"close_rate":0.00010140576441102755,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":770,"profit_ratio":0.0,"profit_abs":5.057644110275549e-07,"exit_reason":"roi","initial_stop_loss_abs":9.081e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":9.081e-05,"stop_loss_ratio":0.1,"min_rate":0.0001009,"max_rate":0.00010140576441102755,"is_open":false,"buy_tag":null,"open_timestamp":1516423500000.0,"close_timestamp":1516469700000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3696789338459548,"open_date":"2018-01-20 04:50:00+00:00","close_date":"2018-01-20 15:15:00+00:00","open_rate":0.00270505,"close_rate":0.002718609147869674,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":625,"profit_ratio":-0.0,"profit_abs":1.3559147869673764e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002434545,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002434545,"stop_loss_ratio":0.1,"min_rate":0.00270505,"max_rate":0.002718609147869674,"is_open":false,"buy_tag":null,"open_timestamp":1516423800000.0,"close_timestamp":1516461300000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.033333311111125925,"open_date":"2018-01-20 04:50:00+00:00","close_date":"2018-01-20 07:00:00+00:00","open_rate":0.03000002,"close_rate":0.030150396040100245,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":130,"profit_ratio":-0.0,"profit_abs":0.00015037604010024672,"exit_reason":"roi","initial_stop_loss_abs":0.027000018,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.027000018,"stop_loss_ratio":0.1,"min_rate":0.03000002,"max_rate":0.030150396040100245,"is_open":false,"buy_tag":null,"open_timestamp":1516423800000.0,"close_timestamp":1516431600000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.315018315018317,"open_date":"2018-01-20 09:00:00+00:00","close_date":"2018-01-20 09:40:00+00:00","open_rate":5.46e-05,"close_rate":5.4873684210526304e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":2.736842105263053e-07,"exit_reason":"roi","initial_stop_loss_abs":4.914e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.914e-05,"stop_loss_ratio":0.1,"min_rate":5.46e-05,"max_rate":5.4873684210526304e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516438800000.0,"close_timestamp":1516441200000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03244412634781012,"open_date":"2018-01-20 18:25:00+00:00","close_date":"2018-01-25 03:50:00+00:00","open_rate":0.03082222,"close_rate":0.027739998,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":6325,"profit_ratio":-0.10448878,"profit_abs":-0.0030822220000000025,"exit_reason":"stop_loss","initial_stop_loss_abs":0.027739998000000002,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.027739998000000002,"stop_loss_ratio":0.1,"min_rate":0.027739998,"max_rate":0.03082222,"is_open":false,"buy_tag":null,"open_timestamp":1516472700000.0,"close_timestamp":1516852200000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011148273260677065,"open_date":"2018-01-20 22:25:00+00:00","close_date":"2018-01-20 23:15:00+00:00","open_rate":0.08969999,"close_rate":0.09014961401002504,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":-0.0,"profit_abs":0.00044962401002504593,"exit_reason":"roi","initial_stop_loss_abs":0.080729991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.080729991,"stop_loss_ratio":0.1,"min_rate":0.08969999,"max_rate":0.09014961401002504,"is_open":false,"buy_tag":null,"open_timestamp":1516487100000.0,"close_timestamp":1516490100000.0},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06125570520324337,"open_date":"2018-01-21 02:50:00+00:00","close_date":"2018-01-21 14:30:00+00:00","open_rate":0.01632501,"close_rate":0.01640683962406015,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":700,"profit_ratio":0.0,"profit_abs":8.182962406014932e-05,"exit_reason":"roi","initial_stop_loss_abs":0.014692509000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014692509000000001,"stop_loss_ratio":0.1,"min_rate":0.01632501,"max_rate":0.01640683962406015,"is_open":false,"buy_tag":null,"open_timestamp":1516503000000.0,"close_timestamp":1516545000000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.01417675579120474,"open_date":"2018-01-21 10:20:00+00:00","close_date":"2018-01-21 11:00:00+00:00","open_rate":0.070538,"close_rate":0.07089157393483708,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.00035357393483707866,"exit_reason":"roi","initial_stop_loss_abs":0.0634842,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0634842,"stop_loss_ratio":0.1,"min_rate":0.070538,"max_rate":0.07089157393483708,"is_open":false,"buy_tag":null,"open_timestamp":1516530000000.0,"close_timestamp":1516532400000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.864365214110546,"open_date":"2018-01-21 15:50:00+00:00","close_date":"2018-01-21 18:45:00+00:00","open_rate":5.301e-05,"close_rate":5.327571428571427e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":175,"profit_ratio":-0.0,"profit_abs":2.657142857142672e-07,"exit_reason":"roi","initial_stop_loss_abs":4.7709e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7709e-05,"stop_loss_ratio":0.1,"min_rate":5.301e-05,"max_rate":5.327571428571427e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516549800000.0,"close_timestamp":1516560300000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":25.284450063211125,"open_date":"2018-01-21 16:20:00+00:00","close_date":"2018-01-21 17:00:00+00:00","open_rate":3.955e-05,"close_rate":3.9748245614035085e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":1.9824561403508552e-07,"exit_reason":"roi","initial_stop_loss_abs":3.5595e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.5595e-05,"stop_loss_ratio":0.1,"min_rate":3.955e-05,"max_rate":3.9748245614035085e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516551600000.0,"close_timestamp":1516554000000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.38683971296493297,"open_date":"2018-01-21 21:15:00+00:00","close_date":"2018-01-21 21:45:00+00:00","open_rate":0.00258505,"close_rate":0.002623922932330827,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":3.8872932330826816e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002326545,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002326545,"stop_loss_ratio":0.1,"min_rate":0.00258505,"max_rate":0.002623922932330827,"is_open":false,"buy_tag":null,"open_timestamp":1516569300000.0,"close_timestamp":1516571100000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":25.621316935690498,"open_date":"2018-01-21 21:15:00+00:00","close_date":"2018-01-21 21:55:00+00:00","open_rate":3.903e-05,"close_rate":3.922563909774435e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":1.9563909774435151e-07,"exit_reason":"roi","initial_stop_loss_abs":3.5127e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.5127e-05,"stop_loss_ratio":0.1,"min_rate":3.903e-05,"max_rate":3.922563909774435e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516569300000.0,"close_timestamp":1516571700000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.098548510313215,"open_date":"2018-01-22 00:35:00+00:00","close_date":"2018-01-22 10:35:00+00:00","open_rate":5.236e-05,"close_rate":5.262245614035087e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":600,"profit_ratio":-0.0,"profit_abs":2.624561403508717e-07,"exit_reason":"roi","initial_stop_loss_abs":4.7124e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7124e-05,"stop_loss_ratio":0.1,"min_rate":5.236e-05,"max_rate":5.262245614035087e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516581300000.0,"close_timestamp":1516617300000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":11.076650420912717,"open_date":"2018-01-22 01:30:00+00:00","close_date":"2018-01-22 02:10:00+00:00","open_rate":9.028e-05,"close_rate":9.07325313283208e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":4.5253132832080657e-07,"exit_reason":"roi","initial_stop_loss_abs":8.1252e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.1252e-05,"stop_loss_ratio":0.1,"min_rate":9.028e-05,"max_rate":9.07325313283208e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516584600000.0,"close_timestamp":1516587000000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3721622627465575,"open_date":"2018-01-22 12:25:00+00:00","close_date":"2018-01-22 14:35:00+00:00","open_rate":0.002687,"close_rate":0.002700468671679198,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":130,"profit_ratio":-0.0,"profit_abs":1.3468671679197925e-05,"exit_reason":"roi","initial_stop_loss_abs":0.0024183000000000004,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0024183000000000004,"stop_loss_ratio":0.1,"min_rate":0.002687,"max_rate":0.002700468671679198,"is_open":false,"buy_tag":null,"open_timestamp":1516623900000.0,"close_timestamp":1516631700000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":23.99232245681382,"open_date":"2018-01-22 13:15:00+00:00","close_date":"2018-01-22 13:55:00+00:00","open_rate":4.168e-05,"close_rate":4.188892230576441e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":2.0892230576441054e-07,"exit_reason":"roi","initial_stop_loss_abs":3.7512e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.7512e-05,"stop_loss_ratio":0.1,"min_rate":4.168e-05,"max_rate":4.188892230576441e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516626900000.0,"close_timestamp":1516629300000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":11.336583153837434,"open_date":"2018-01-22 14:00:00+00:00","close_date":"2018-01-22 14:30:00+00:00","open_rate":8.821e-05,"close_rate":8.953646616541353e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.326466165413529e-06,"exit_reason":"roi","initial_stop_loss_abs":7.9389e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":7.9389e-05,"stop_loss_ratio":0.1,"min_rate":8.821e-05,"max_rate":8.953646616541353e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516629600000.0,"close_timestamp":1516631400000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.334880123743233,"open_date":"2018-01-22 15:55:00+00:00","close_date":"2018-01-22 16:40:00+00:00","open_rate":5.172e-05,"close_rate":5.1979248120300745e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":2.592481203007459e-07,"exit_reason":"roi","initial_stop_loss_abs":4.6548e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6548e-05,"stop_loss_ratio":0.1,"min_rate":5.172e-05,"max_rate":5.1979248120300745e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516636500000.0,"close_timestamp":1516639200000.0},{"pair":"NXT/BTC","stake_amount":0.001,"amount":33.04692663582287,"open_date":"2018-01-22 16:05:00+00:00","close_date":"2018-01-22 16:25:00+00:00","open_rate":3.026e-05,"close_rate":3.101839598997494e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":7.5839598997494e-07,"exit_reason":"roi","initial_stop_loss_abs":2.7234e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7234e-05,"stop_loss_ratio":0.1,"min_rate":3.026e-05,"max_rate":3.101839598997494e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516637100000.0,"close_timestamp":1516638300000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014156285390713478,"open_date":"2018-01-22 19:50:00+00:00","close_date":"2018-01-23 00:10:00+00:00","open_rate":0.07064,"close_rate":0.07099408521303258,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":260,"profit_ratio":0.0,"profit_abs":0.00035408521303258167,"exit_reason":"roi","initial_stop_loss_abs":0.063576,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.063576,"stop_loss_ratio":0.1,"min_rate":0.07064,"max_rate":0.07099408521303258,"is_open":false,"buy_tag":null,"open_timestamp":1516650600000.0,"close_timestamp":1516666200000.0},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06080938507725528,"open_date":"2018-01-22 21:25:00+00:00","close_date":"2018-01-22 22:05:00+00:00","open_rate":0.01644483,"close_rate":0.01652726022556391,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":8.243022556390922e-05,"exit_reason":"roi","initial_stop_loss_abs":0.014800347,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014800347,"stop_loss_ratio":0.1,"min_rate":0.01644483,"max_rate":0.01652726022556391,"is_open":false,"buy_tag":null,"open_timestamp":1516656300000.0,"close_timestamp":1516658700000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":23.08935580697299,"open_date":"2018-01-23 00:05:00+00:00","close_date":"2018-01-23 00:35:00+00:00","open_rate":4.331e-05,"close_rate":4.3961278195488714e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":6.512781954887175e-07,"exit_reason":"roi","initial_stop_loss_abs":3.8979e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.8979e-05,"stop_loss_ratio":0.1,"min_rate":4.331e-05,"max_rate":4.3961278195488714e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516665900000.0,"close_timestamp":1516667700000.0},{"pair":"NXT/BTC","stake_amount":0.001,"amount":31.250000000000004,"open_date":"2018-01-23 01:50:00+00:00","close_date":"2018-01-23 02:15:00+00:00","open_rate":3.2e-05,"close_rate":3.2802005012531326e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":8.020050125313278e-07,"exit_reason":"roi","initial_stop_loss_abs":2.88e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.88e-05,"stop_loss_ratio":0.1,"min_rate":3.2e-05,"max_rate":3.2802005012531326e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516672200000.0,"close_timestamp":1516673700000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010907854156754155,"open_date":"2018-01-23 04:25:00+00:00","close_date":"2018-01-23 05:15:00+00:00","open_rate":0.09167706,"close_rate":0.09213659413533835,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":0.0,"profit_abs":0.0004595341353383492,"exit_reason":"roi","initial_stop_loss_abs":0.08250935400000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08250935400000001,"stop_loss_ratio":0.1,"min_rate":0.09167706,"max_rate":0.09213659413533835,"is_open":false,"buy_tag":null,"open_timestamp":1516681500000.0,"close_timestamp":1516684500000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014440474918339115,"open_date":"2018-01-23 07:35:00+00:00","close_date":"2018-01-23 09:00:00+00:00","open_rate":0.0692498,"close_rate":0.06959691679197995,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":85,"profit_ratio":0.0,"profit_abs":0.0003471167919799484,"exit_reason":"roi","initial_stop_loss_abs":0.06232482,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06232482,"stop_loss_ratio":0.1,"min_rate":0.0692498,"max_rate":0.06959691679197995,"is_open":false,"buy_tag":null,"open_timestamp":1516692900000.0,"close_timestamp":1516698000000.0},{"pair":"NXT/BTC","stake_amount":0.001,"amount":31.426775612822127,"open_date":"2018-01-23 10:50:00+00:00","close_date":"2018-01-23 13:05:00+00:00","open_rate":3.182e-05,"close_rate":3.197949874686716e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":135,"profit_ratio":0.0,"profit_abs":1.594987468671663e-07,"exit_reason":"roi","initial_stop_loss_abs":2.8638e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.8638e-05,"stop_loss_ratio":0.1,"min_rate":3.182e-05,"max_rate":3.197949874686716e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516704600000.0,"close_timestamp":1516712700000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024461839530332683,"open_date":"2018-01-23 11:05:00+00:00","close_date":"2018-01-23 16:05:00+00:00","open_rate":0.04088,"close_rate":0.04108491228070175,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":300,"profit_ratio":-0.0,"profit_abs":0.0002049122807017481,"exit_reason":"roi","initial_stop_loss_abs":0.036792,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036792,"stop_loss_ratio":0.1,"min_rate":0.04088,"max_rate":0.04108491228070175,"is_open":false,"buy_tag":null,"open_timestamp":1516705500000.0,"close_timestamp":1516723500000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.417475728155342,"open_date":"2018-01-23 14:55:00+00:00","close_date":"2018-01-23 15:35:00+00:00","open_rate":5.15e-05,"close_rate":5.175814536340851e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":2.5814536340851513e-07,"exit_reason":"roi","initial_stop_loss_abs":4.635e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.635e-05,"stop_loss_ratio":0.1,"min_rate":5.15e-05,"max_rate":5.175814536340851e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516719300000.0,"close_timestamp":1516721700000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011023294646713328,"open_date":"2018-01-23 16:35:00+00:00","close_date":"2018-01-24 00:05:00+00:00","open_rate":0.09071698,"close_rate":0.09117170170426064,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":450,"profit_ratio":0.0,"profit_abs":0.00045472170426064107,"exit_reason":"roi","initial_stop_loss_abs":0.081645282,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.081645282,"stop_loss_ratio":0.1,"min_rate":0.09071698,"max_rate":0.09117170170426064,"is_open":false,"buy_tag":null,"open_timestamp":1516725300000.0,"close_timestamp":1516752300000.0},{"pair":"NXT/BTC","stake_amount":0.001,"amount":31.969309462915604,"open_date":"2018-01-23 17:25:00+00:00","close_date":"2018-01-23 18:45:00+00:00","open_rate":3.128e-05,"close_rate":3.1436791979949865e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.0,"profit_abs":1.5679197994986587e-07,"exit_reason":"roi","initial_stop_loss_abs":2.8152e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.8152e-05,"stop_loss_ratio":0.1,"min_rate":3.128e-05,"max_rate":3.1436791979949865e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516728300000.0,"close_timestamp":1516733100000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.465724751439037,"open_date":"2018-01-23 20:15:00+00:00","close_date":"2018-01-23 22:00:00+00:00","open_rate":9.555e-05,"close_rate":9.602894736842104e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":105,"profit_ratio":-0.0,"profit_abs":4.789473684210343e-07,"exit_reason":"roi","initial_stop_loss_abs":8.5995e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.5995e-05,"stop_loss_ratio":0.1,"min_rate":9.555e-05,"max_rate":9.602894736842104e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516738500000.0,"close_timestamp":1516744800000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02450979791426522,"open_date":"2018-01-23 22:30:00+00:00","close_date":"2018-01-23 23:10:00+00:00","open_rate":0.04080001,"close_rate":0.0410045213283208,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.00020451132832080554,"exit_reason":"roi","initial_stop_loss_abs":0.036720009,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036720009,"stop_loss_ratio":0.1,"min_rate":0.04080001,"max_rate":0.0410045213283208,"is_open":false,"buy_tag":null,"open_timestamp":1516746600000.0,"close_timestamp":1516749000000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.368584156498162,"open_date":"2018-01-23 23:50:00+00:00","close_date":"2018-01-24 03:35:00+00:00","open_rate":5.163e-05,"close_rate":5.18887969924812e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":225,"profit_ratio":-0.0,"profit_abs":2.587969924812037e-07,"exit_reason":"roi","initial_stop_loss_abs":4.6467e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6467e-05,"stop_loss_ratio":0.1,"min_rate":5.163e-05,"max_rate":5.18887969924812e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516751400000.0,"close_timestamp":1516764900000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024747691102289384,"open_date":"2018-01-24 00:20:00+00:00","close_date":"2018-01-24 01:50:00+00:00","open_rate":0.04040781,"close_rate":0.04061035541353383,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":90,"profit_ratio":0.0,"profit_abs":0.0002025454135338306,"exit_reason":"roi","initial_stop_loss_abs":0.036367029,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036367029,"stop_loss_ratio":0.1,"min_rate":0.04040781,"max_rate":0.04061035541353383,"is_open":false,"buy_tag":null,"open_timestamp":1516753200000.0,"close_timestamp":1516758600000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.485580670303975,"open_date":"2018-01-24 06:45:00+00:00","close_date":"2018-01-24 07:25:00+00:00","open_rate":5.132e-05,"close_rate":5.157724310776942e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":2.5724310776941724e-07,"exit_reason":"roi","initial_stop_loss_abs":4.6188000000000006e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6188000000000006e-05,"stop_loss_ratio":0.1,"min_rate":5.132e-05,"max_rate":5.157724310776942e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516776300000.0,"close_timestamp":1516778700000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.23816852635629,"open_date":"2018-01-24 14:15:00+00:00","close_date":"2018-01-24 14:25:00+00:00","open_rate":5.198e-05,"close_rate":5.432496240601503e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":10,"profit_ratio":0.03990025,"profit_abs":2.344962406015033e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6782e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6782e-05,"stop_loss_ratio":0.1,"min_rate":5.198e-05,"max_rate":5.432496240601503e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516803300000.0,"close_timestamp":1516803900000.0},{"pair":"NXT/BTC","stake_amount":0.001,"amount":32.74394237066143,"open_date":"2018-01-24 14:50:00+00:00","close_date":"2018-01-24 16:35:00+00:00","open_rate":3.054e-05,"close_rate":3.069308270676692e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":105,"profit_ratio":-0.0,"profit_abs":1.5308270676691466e-07,"exit_reason":"roi","initial_stop_loss_abs":2.7486000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7486000000000004e-05,"stop_loss_ratio":0.1,"min_rate":3.054e-05,"max_rate":3.069308270676692e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516805400000.0,"close_timestamp":1516811700000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.795638562020944,"open_date":"2018-01-24 15:10:00+00:00","close_date":"2018-01-24 16:15:00+00:00","open_rate":9.263e-05,"close_rate":9.309431077694236e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":65,"profit_ratio":0.0,"profit_abs":4.6431077694236234e-07,"exit_reason":"roi","initial_stop_loss_abs":8.3367e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.3367e-05,"stop_loss_ratio":0.1,"min_rate":9.263e-05,"max_rate":9.309431077694236e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516806600000.0,"close_timestamp":1516810500000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.13565469713457,"open_date":"2018-01-24 22:40:00+00:00","close_date":"2018-01-24 23:25:00+00:00","open_rate":5.514e-05,"close_rate":5.54163909774436e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":2.7639097744360576e-07,"exit_reason":"roi","initial_stop_loss_abs":4.9625999999999995e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.9625999999999995e-05,"stop_loss_ratio":0.1,"min_rate":5.514e-05,"max_rate":5.54163909774436e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516833600000.0,"close_timestamp":1516836300000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":20.3210729526519,"open_date":"2018-01-25 00:50:00+00:00","close_date":"2018-01-25 01:30:00+00:00","open_rate":4.921e-05,"close_rate":4.9456666666666664e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":2.4666666666666543e-07,"exit_reason":"roi","initial_stop_loss_abs":4.4289e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4289e-05,"stop_loss_ratio":0.1,"min_rate":4.921e-05,"max_rate":4.9456666666666664e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516841400000.0,"close_timestamp":1516843800000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.38461538461538464,"open_date":"2018-01-25 08:15:00+00:00","close_date":"2018-01-25 12:15:00+00:00","open_rate":0.0026,"close_rate":0.002613032581453634,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":240,"profit_ratio":0.0,"profit_abs":1.3032581453634e-05,"exit_reason":"roi","initial_stop_loss_abs":0.00234,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.00234,"stop_loss_ratio":0.1,"min_rate":0.0026,"max_rate":0.002613032581453634,"is_open":false,"buy_tag":null,"open_timestamp":1516868100000.0,"close_timestamp":1516882500000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03571593119825878,"open_date":"2018-01-25 10:25:00+00:00","close_date":"2018-01-25 16:15:00+00:00","open_rate":0.02799871,"close_rate":0.028139054411027563,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":350,"profit_ratio":-0.0,"profit_abs":0.00014034441102756326,"exit_reason":"roi","initial_stop_loss_abs":0.025198839,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025198839,"stop_loss_ratio":0.1,"min_rate":0.02799871,"max_rate":0.028139054411027563,"is_open":false,"buy_tag":null,"open_timestamp":1516875900000.0,"close_timestamp":1516896900000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024516401717913302,"open_date":"2018-01-25 11:00:00+00:00","close_date":"2018-01-25 11:45:00+00:00","open_rate":0.04078902,"close_rate":0.0409934762406015,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":0.00020445624060149575,"exit_reason":"roi","initial_stop_loss_abs":0.036710118,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036710118,"stop_loss_ratio":0.1,"min_rate":0.04078902,"max_rate":0.0409934762406015,"is_open":false,"buy_tag":null,"open_timestamp":1516878000000.0,"close_timestamp":1516880700000.0},{"pair":"NXT/BTC","stake_amount":0.001,"amount":34.602076124567475,"open_date":"2018-01-25 13:05:00+00:00","close_date":"2018-01-25 13:45:00+00:00","open_rate":2.89e-05,"close_rate":2.904486215538847e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":1.4486215538846723e-07,"exit_reason":"roi","initial_stop_loss_abs":2.601e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.601e-05,"stop_loss_ratio":0.1,"min_rate":2.89e-05,"max_rate":2.904486215538847e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516885500000.0,"close_timestamp":1516887900000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02432912439481303,"open_date":"2018-01-25 13:20:00+00:00","close_date":"2018-01-25 14:05:00+00:00","open_rate":0.041103,"close_rate":0.04130903007518797,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":0.00020603007518796984,"exit_reason":"roi","initial_stop_loss_abs":0.0369927,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0369927,"stop_loss_ratio":0.1,"min_rate":0.041103,"max_rate":0.04130903007518797,"is_open":false,"buy_tag":null,"open_timestamp":1516886400000.0,"close_timestamp":1516889100000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":18.422991893883566,"open_date":"2018-01-25 15:45:00+00:00","close_date":"2018-01-25 16:15:00+00:00","open_rate":5.428e-05,"close_rate":5.509624060150376e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":8.162406015037611e-07,"exit_reason":"roi","initial_stop_loss_abs":4.8852000000000006e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.8852000000000006e-05,"stop_loss_ratio":0.1,"min_rate":5.428e-05,"max_rate":5.509624060150376e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516895100000.0,"close_timestamp":1516896900000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":18.47063169560399,"open_date":"2018-01-25 17:45:00+00:00","close_date":"2018-01-25 23:15:00+00:00","open_rate":5.414e-05,"close_rate":5.441137844611528e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":330,"profit_ratio":-0.0,"profit_abs":2.713784461152774e-07,"exit_reason":"roi","initial_stop_loss_abs":4.8726e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.8726e-05,"stop_loss_ratio":0.1,"min_rate":5.414e-05,"max_rate":5.441137844611528e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516902300000.0,"close_timestamp":1516922100000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024150056861308878,"open_date":"2018-01-25 21:15:00+00:00","close_date":"2018-01-25 21:55:00+00:00","open_rate":0.04140777,"close_rate":0.0416153277443609,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.0002075577443608964,"exit_reason":"roi","initial_stop_loss_abs":0.037266993000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.037266993000000005,"stop_loss_ratio":0.1,"min_rate":0.04140777,"max_rate":0.0416153277443609,"is_open":false,"buy_tag":null,"open_timestamp":1516914900000.0,"close_timestamp":1516917300000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3932224183965176,"open_date":"2018-01-26 02:05:00+00:00","close_date":"2018-01-26 02:45:00+00:00","open_rate":0.00254309,"close_rate":0.002555837318295739,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":1.2747318295739177e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002288781,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002288781,"stop_loss_ratio":0.1,"min_rate":0.00254309,"max_rate":0.002555837318295739,"is_open":false,"buy_tag":null,"open_timestamp":1516932300000.0,"close_timestamp":1516934700000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.834849295523455,"open_date":"2018-01-26 02:55:00+00:00","close_date":"2018-01-26 15:10:00+00:00","open_rate":5.607e-05,"close_rate":5.6351052631578935e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":735,"profit_ratio":-0.0,"profit_abs":2.810526315789381e-07,"exit_reason":"roi","initial_stop_loss_abs":5.0463e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0463e-05,"stop_loss_ratio":0.1,"min_rate":5.607e-05,"max_rate":5.6351052631578935e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516935300000.0,"close_timestamp":1516979400000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.39400171784748983,"open_date":"2018-01-26 06:10:00+00:00","close_date":"2018-01-26 09:25:00+00:00","open_rate":0.00253806,"close_rate":0.0025507821052631577,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":195,"profit_ratio":0.0,"profit_abs":1.2722105263157733e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002284254,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002284254,"stop_loss_ratio":0.1,"min_rate":0.00253806,"max_rate":0.0025507821052631577,"is_open":false,"buy_tag":null,"open_timestamp":1516947000000.0,"close_timestamp":1516958700000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024096385542168672,"open_date":"2018-01-26 07:25:00+00:00","close_date":"2018-01-26 09:55:00+00:00","open_rate":0.0415,"close_rate":0.04170802005012531,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":150,"profit_ratio":-0.0,"profit_abs":0.00020802005012530989,"exit_reason":"roi","initial_stop_loss_abs":0.03735,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.03735,"stop_loss_ratio":0.1,"min_rate":0.0415,"max_rate":0.04170802005012531,"is_open":false,"buy_tag":null,"open_timestamp":1516951500000.0,"close_timestamp":1516960500000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":18.793459875963165,"open_date":"2018-01-26 09:55:00+00:00","close_date":"2018-01-26 10:25:00+00:00","open_rate":5.321e-05,"close_rate":5.401015037593984e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":8.00150375939842e-07,"exit_reason":"roi","initial_stop_loss_abs":4.7889e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7889e-05,"stop_loss_ratio":0.1,"min_rate":5.321e-05,"max_rate":5.401015037593984e-05,"is_open":false,"buy_tag":null,"open_timestamp":1516960500000.0,"close_timestamp":1516962300000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.036074437437185386,"open_date":"2018-01-26 16:05:00+00:00","close_date":"2018-01-26 16:45:00+00:00","open_rate":0.02772046,"close_rate":0.02785940967418546,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.00013894967418546025,"exit_reason":"roi","initial_stop_loss_abs":0.024948414,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.024948414,"stop_loss_ratio":0.1,"min_rate":0.02772046,"max_rate":0.02785940967418546,"is_open":false,"buy_tag":null,"open_timestamp":1516982700000.0,"close_timestamp":1516985100000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010569326272036914,"open_date":"2018-01-26 23:35:00+00:00","close_date":"2018-01-27 00:15:00+00:00","open_rate":0.09461341,"close_rate":0.09508766268170424,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":0.00047425268170424306,"exit_reason":"roi","initial_stop_loss_abs":0.085152069,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.085152069,"stop_loss_ratio":0.1,"min_rate":0.09461341,"max_rate":0.09508766268170424,"is_open":false,"buy_tag":null,"open_timestamp":1517009700000.0,"close_timestamp":1517012100000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":17.809439002671414,"open_date":"2018-01-27 00:35:00+00:00","close_date":"2018-01-27 01:30:00+00:00","open_rate":5.615e-05,"close_rate":5.643145363408521e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":2.814536340852038e-07,"exit_reason":"roi","initial_stop_loss_abs":5.0535e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0535e-05,"stop_loss_ratio":0.1,"min_rate":5.615e-05,"max_rate":5.643145363408521e-05,"is_open":false,"buy_tag":null,"open_timestamp":1517013300000.0,"close_timestamp":1517016600000.0},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.998560115190784,"open_date":"2018-01-27 00:45:00+00:00","close_date":"2018-01-30 04:45:00+00:00","open_rate":5.556e-05,"close_rate":5.144e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":4560,"profit_ratio":-0.07877175,"profit_abs":-4.120000000000001e-06,"exit_reason":"force_exit","initial_stop_loss_abs":5.0004000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0004000000000004e-05,"stop_loss_ratio":0.1,"min_rate":5.144e-05,"max_rate":5.556e-05,"is_open":false,"buy_tag":null,"open_timestamp":1517013900000.0,"close_timestamp":1517287500000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014492751522789634,"open_date":"2018-01-27 02:30:00+00:00","close_date":"2018-01-27 11:25:00+00:00","open_rate":0.06900001,"close_rate":0.06934587471177944,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":535,"profit_ratio":-0.0,"profit_abs":0.0003458647117794422,"exit_reason":"roi","initial_stop_loss_abs":0.062100009000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.062100009000000005,"stop_loss_ratio":0.1,"min_rate":0.06900001,"max_rate":0.06934587471177944,"is_open":false,"buy_tag":null,"open_timestamp":1517020200000.0,"close_timestamp":1517052300000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010582027378879437,"open_date":"2018-01-27 06:25:00+00:00","close_date":"2018-01-27 07:05:00+00:00","open_rate":0.09449985,"close_rate":0.0949735334586466,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.0004736834586466093,"exit_reason":"roi","initial_stop_loss_abs":0.085049865,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.085049865,"stop_loss_ratio":0.1,"min_rate":0.09449985,"max_rate":0.0949735334586466,"is_open":false,"buy_tag":null,"open_timestamp":1517034300000.0,"close_timestamp":1517036700000.0},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02434885085598385,"open_date":"2018-01-27 09:40:00+00:00","close_date":"2018-01-30 04:40:00+00:00","open_rate":0.0410697,"close_rate":0.03928809,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":4020,"profit_ratio":-0.04815133,"profit_abs":-0.001781610000000003,"exit_reason":"force_exit","initial_stop_loss_abs":0.03696273,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.03696273,"stop_loss_ratio":0.1,"min_rate":0.03928809,"max_rate":0.0410697,"is_open":false,"buy_tag":null,"open_timestamp":1517046000000.0,"close_timestamp":1517287200000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03508771929824561,"open_date":"2018-01-27 11:45:00+00:00","close_date":"2018-01-27 12:30:00+00:00","open_rate":0.0285,"close_rate":0.02864285714285714,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":0.00014285714285713902,"exit_reason":"roi","initial_stop_loss_abs":0.025650000000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025650000000000003,"stop_loss_ratio":0.1,"min_rate":0.0285,"max_rate":0.02864285714285714,"is_open":false,"buy_tag":null,"open_timestamp":1517053500000.0,"close_timestamp":1517056200000.0},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.034887307020861215,"open_date":"2018-01-27 12:35:00+00:00","close_date":"2018-01-27 15:25:00+00:00","open_rate":0.02866372,"close_rate":0.02880739779448621,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":170,"profit_ratio":-0.0,"profit_abs":0.00014367779448621124,"exit_reason":"roi","initial_stop_loss_abs":0.025797348,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025797348,"stop_loss_ratio":0.1,"min_rate":0.02866372,"max_rate":0.02880739779448621,"is_open":false,"buy_tag":null,"open_timestamp":1517056500000.0,"close_timestamp":1517066700000.0},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010484268355332824,"open_date":"2018-01-27 15:50:00+00:00","close_date":"2018-01-27 16:50:00+00:00","open_rate":0.095381,"close_rate":0.09585910025062656,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.0,"profit_abs":0.00047810025062657024,"exit_reason":"roi","initial_stop_loss_abs":0.0858429,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0858429,"stop_loss_ratio":0.1,"min_rate":0.095381,"max_rate":0.09585910025062656,"is_open":false,"buy_tag":null,"open_timestamp":1517068200000.0,"close_timestamp":1517071800000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014794886650455417,"open_date":"2018-01-27 17:05:00+00:00","close_date":"2018-01-27 17:45:00+00:00","open_rate":0.06759092,"close_rate":0.06792972160401002,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.00033880160401002224,"exit_reason":"roi","initial_stop_loss_abs":0.060831828,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060831828,"stop_loss_ratio":0.1,"min_rate":0.06759092,"max_rate":0.06792972160401002,"is_open":false,"buy_tag":null,"open_timestamp":1517072700000.0,"close_timestamp":1517075100000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.38684569885609726,"open_date":"2018-01-27 23:40:00+00:00","close_date":"2018-01-28 01:05:00+00:00","open_rate":0.00258501,"close_rate":0.002597967443609022,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":85,"profit_ratio":-0.0,"profit_abs":1.2957443609021985e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002326509,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002326509,"stop_loss_ratio":0.1,"min_rate":0.00258501,"max_rate":0.002597967443609022,"is_open":false,"buy_tag":null,"open_timestamp":1517096400000.0,"close_timestamp":1517101500000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014928710926711672,"open_date":"2018-01-28 02:25:00+00:00","close_date":"2018-01-28 08:10:00+00:00","open_rate":0.06698502,"close_rate":0.0673207845112782,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":345,"profit_ratio":-0.0,"profit_abs":0.00033576451127818874,"exit_reason":"roi","initial_stop_loss_abs":0.060286518000000004,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060286518000000004,"stop_loss_ratio":0.1,"min_rate":0.06698502,"max_rate":0.0673207845112782,"is_open":false,"buy_tag":null,"open_timestamp":1517106300000.0,"close_timestamp":1517127000000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014767187899175548,"open_date":"2018-01-28 10:25:00+00:00","close_date":"2018-01-28 16:30:00+00:00","open_rate":0.0677177,"close_rate":0.06805713709273183,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":365,"profit_ratio":-0.0,"profit_abs":0.0003394370927318202,"exit_reason":"roi","initial_stop_loss_abs":0.06094593000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06094593000000001,"stop_loss_ratio":0.1,"min_rate":0.0677177,"max_rate":0.06805713709273183,"is_open":false,"buy_tag":null,"open_timestamp":1517135100000.0,"close_timestamp":1517157000000.0},{"pair":"XLM/BTC","stake_amount":0.001,"amount":19.175455417066157,"open_date":"2018-01-28 20:35:00+00:00","close_date":"2018-01-28 21:35:00+00:00","open_rate":5.215e-05,"close_rate":5.2411403508771925e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":0.0,"profit_abs":2.6140350877192417e-07,"exit_reason":"roi","initial_stop_loss_abs":4.6935000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6935000000000004e-05,"stop_loss_ratio":0.1,"min_rate":5.215e-05,"max_rate":5.2411403508771925e-05,"is_open":false,"buy_tag":null,"open_timestamp":1517171700000.0,"close_timestamp":1517175300000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.36521808998243305,"open_date":"2018-01-28 22:00:00+00:00","close_date":"2018-01-28 22:30:00+00:00","open_rate":0.00273809,"close_rate":0.002779264285714285,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":4.117428571428529e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002464281,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002464281,"stop_loss_ratio":0.1,"min_rate":0.00273809,"max_rate":0.002779264285714285,"is_open":false,"buy_tag":null,"open_timestamp":1517176800000.0,"close_timestamp":1517178600000.0},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3641236272539253,"open_date":"2018-01-29 00:00:00+00:00","close_date":"2018-01-29 00:30:00+00:00","open_rate":0.00274632,"close_rate":0.002787618045112782,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":4.129804511278194e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002471688,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002471688,"stop_loss_ratio":0.1,"min_rate":0.00274632,"max_rate":0.002787618045112782,"is_open":false,"buy_tag":null,"open_timestamp":1517184000000.0,"close_timestamp":1517185800000.0},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.061634117689115045,"open_date":"2018-01-29 02:15:00+00:00","close_date":"2018-01-29 03:00:00+00:00","open_rate":0.01622478,"close_rate":0.016306107218045113,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":0.0,"profit_abs":8.132721804511231e-05,"exit_reason":"roi","initial_stop_loss_abs":0.014602302000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014602302000000001,"stop_loss_ratio":0.1,"min_rate":0.01622478,"max_rate":0.016306107218045113,"is_open":false,"buy_tag":null,"open_timestamp":1517192100000.0,"close_timestamp":1517194800000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014492753623188404,"open_date":"2018-01-29 03:05:00+00:00","close_date":"2018-01-29 03:45:00+00:00","open_rate":0.069,"close_rate":0.06934586466165413,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.00034586466165412166,"exit_reason":"roi","initial_stop_loss_abs":0.06210000000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06210000000000001,"stop_loss_ratio":0.1,"min_rate":0.069,"max_rate":0.06934586466165413,"is_open":false,"buy_tag":null,"open_timestamp":1517195100000.0,"close_timestamp":1517197500000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":11.42204454597373,"open_date":"2018-01-29 05:20:00+00:00","close_date":"2018-01-29 06:55:00+00:00","open_rate":8.755e-05,"close_rate":8.798884711779448e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":95,"profit_ratio":-0.0,"profit_abs":4.3884711779447504e-07,"exit_reason":"roi","initial_stop_loss_abs":7.879500000000001e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":7.879500000000001e-05,"stop_loss_ratio":0.1,"min_rate":8.755e-05,"max_rate":8.798884711779448e-05,"is_open":false,"buy_tag":null,"open_timestamp":1517203200000.0,"close_timestamp":1517208900000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014650376815016871,"open_date":"2018-01-29 07:00:00+00:00","close_date":"2018-01-29 19:25:00+00:00","open_rate":0.06825763,"close_rate":0.06859977350877192,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":745,"profit_ratio":-0.0,"profit_abs":0.00034214350877191657,"exit_reason":"roi","initial_stop_loss_abs":0.061431867,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.061431867,"stop_loss_ratio":0.1,"min_rate":0.06825763,"max_rate":0.06859977350877192,"is_open":false,"buy_tag":null,"open_timestamp":1517209200000.0,"close_timestamp":1517253900000.0},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014894490408841846,"open_date":"2018-01-29 19:45:00+00:00","close_date":"2018-01-29 20:25:00+00:00","open_rate":0.06713892,"close_rate":0.06747545593984962,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":0.0003365359398496137,"exit_reason":"roi","initial_stop_loss_abs":0.060425028000000006,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060425028000000006,"stop_loss_ratio":0.1,"min_rate":0.06713892,"max_rate":0.06747545593984962,"is_open":false,"buy_tag":null,"open_timestamp":1517255100000.0,"close_timestamp":1517257500000.0},{"pair":"TRX/BTC","stake_amount":0.001,"amount":11.193194537721066,"open_date":"2018-01-29 23:30:00+00:00","close_date":"2018-01-30 04:45:00+00:00","open_rate":8.934e-05,"close_rate":8.8e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":315,"profit_ratio":-0.0199116,"profit_abs":-1.3399999999999973e-06,"exit_reason":"force_exit","initial_stop_loss_abs":8.0406e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.0406e-05,"stop_loss_ratio":0.1,"min_rate":8.8e-05,"max_rate":8.934e-05,"is_open":false,"buy_tag":null,"open_timestamp":1517268600000.0,"close_timestamp":1517287500000.0}],"locks":[],"best_pair":{"key":"LTC/BTC","trades":8,"profit_mean":0.00748129625,"profit_mean_pct":0.748129625,"profit_sum":0.05985037,"profit_sum_pct":5.99,"profit_total_abs":0.0015944746365914707,"profit_total":0.15944746365914708,"profit_total_pct":15.94,"duration_avg":"1:59:00","wins":8,"draws":0,"losses":0},"worst_pair":{"key":"XMR/BTC","trades":16,"profit_mean":-0.0027899012500000007,"profit_mean_pct":-0.2789901250000001,"profit_sum":-0.04463842000000001,"profit_sum_pct":-4.46,"profit_total_abs":0.0006671885263157366,"profit_total":0.06671885263157366,"profit_total_pct":6.67,"duration_avg":"8:41:00","wins":15,"draws":0,"losses":1},"results_per_pair":[{"key":"ETH/BTC","trades":21,"profit_mean":0.0009500057142857142,"profit_mean_pct":0.09500057142857142,"profit_sum":0.01995012,"profit_sum_pct":2.0,"profit_total_abs":0.011505731278195264,"profit_total":1.1505731278195264,"profit_total_pct":115.06,"duration_avg":"2:17:00","wins":21,"draws":0,"losses":0},{"key":"DASH/BTC","trades":16,"profit_mean":0.0018703237499999997,"profit_mean_pct":0.18703237499999997,"profit_sum":0.029925179999999996,"profit_sum_pct":2.99,"profit_total_abs":0.007475052681704161,"profit_total":0.7475052681704161,"profit_total_pct":74.75,"duration_avg":"3:03:00","wins":16,"draws":0,"losses":0},{"key":"ZEC/BTC","trades":21,"profit_mean":-0.00039290904761904774,"profit_mean_pct":-0.03929090476190478,"profit_sum":-0.008251090000000003,"profit_sum_pct":-0.83,"profit_total_abs":0.004452605639097655,"profit_total":0.4452605639097655,"profit_total_pct":44.53,"duration_avg":"4:17:00","wins":20,"draws":0,"losses":1},{"key":"LTC/BTC","trades":8,"profit_mean":0.00748129625,"profit_mean_pct":0.748129625,"profit_sum":0.05985037,"profit_sum_pct":5.99,"profit_total_abs":0.0015944746365914707,"profit_total":0.15944746365914708,"profit_total_pct":15.94,"duration_avg":"1:59:00","wins":8,"draws":0,"losses":0},{"key":"XMR/BTC","trades":16,"profit_mean":-0.0027899012500000007,"profit_mean_pct":-0.2789901250000001,"profit_sum":-0.04463842000000001,"profit_sum_pct":-4.46,"profit_total_abs":0.0006671885263157366,"profit_total":0.06671885263157366,"profit_total_pct":6.67,"duration_avg":"8:41:00","wins":15,"draws":0,"losses":1},{"key":"ETC/BTC","trades":20,"profit_mean":0.0022568569999999997,"profit_mean_pct":0.22568569999999996,"profit_sum":0.04513713999999999,"profit_sum_pct":4.51,"profit_total_abs":0.00036538235338345404,"profit_total":0.0365382353383454,"profit_total_pct":3.65,"duration_avg":"1:45:00","wins":19,"draws":0,"losses":1},{"key":"TRX/BTC","trades":15,"profit_mean":0.0023467073333333323,"profit_mean_pct":0.23467073333333321,"profit_sum":0.035200609999999986,"profit_sum_pct":3.52,"profit_total_abs":1.1329523809523682e-05,"profit_total":0.0011329523809523682,"profit_total_pct":0.11,"duration_avg":"2:28:00","wins":13,"draws":0,"losses":2},{"key":"XLM/BTC","trades":21,"profit_mean":0.0026243899999999994,"profit_mean_pct":0.2624389999999999,"profit_sum":0.05511218999999999,"profit_sum_pct":5.51,"profit_total_abs":7.340779448621465e-06,"profit_total":0.0007340779448621465,"profit_total_pct":0.07,"duration_avg":"3:21:00","wins":20,"draws":0,"losses":1},{"key":"ADA/BTC","trades":29,"profit_mean":-0.0011598141379310352,"profit_mean_pct":-0.11598141379310352,"profit_sum":-0.03363461000000002,"profit_sum_pct":-3.36,"profit_total_abs":4.916634085212862e-06,"profit_total":0.0004916634085212862,"profit_total_pct":0.05,"duration_avg":"5:35:00","wins":27,"draws":0,"losses":2},{"key":"NXT/BTC","trades":12,"profit_mean":-0.0012261025000000006,"profit_mean_pct":-0.12261025000000006,"profit_sum":-0.014713230000000008,"profit_sum_pct":-1.47,"profit_total_abs":1.4774411027568458e-06,"profit_total":0.00014774411027568458,"profit_total_pct":0.01,"duration_avg":"0:57:00","wins":11,"draws":0,"losses":1},{"key":"TOTAL","trades":179,"profit_mean":0.0008041243575418989,"profit_mean_pct":0.0804124357541899,"profit_sum":0.1439382599999999,"profit_sum_pct":14.39,"profit_total_abs":0.026085499493733857,"profit_total":2.6085499493733857,"profit_total_pct":260.85,"duration_avg":"3:40:00","wins":170,"draws":0,"losses":9}],"results_per_enter_tag":[{"key":"TOTAL","trades":179,"profit_mean":0.0008041243575418989,"profit_mean_pct":0.0804124357541899,"profit_sum":0.1439382599999999,"profit_sum_pct":14.39,"profit_total_abs":0.026085499493733857,"profit_total":2.6085499493733857,"profit_total_pct":260.85,"duration_avg":"3:40:00","wins":170,"draws":0,"losses":9}],"exit_reason_summary":[{"exit_reason":"roi","trades":170,"wins":170,"draws":0,"losses":0,"profit_mean":0.005398268352941177,"profit_mean_pct":0.54,"profit_sum":0.91770562,"profit_sum_pct":91.77,"profit_total_abs":0.031232837493733862,"profit_total":0.30590187333333335,"profit_total_pct":30.59},{"exit_reason":"stop_loss","trades":6,"wins":0,"draws":0,"losses":6,"profit_mean":-0.10448878000000002,"profit_mean_pct":-10.45,"profit_sum":-0.6269326800000001,"profit_sum_pct":-62.69,"profit_total_abs":-0.0033602680000000026,"profit_total":-0.20897756000000003,"profit_total_pct":-20.9},{"exit_reason":"force_exit","trades":3,"wins":0,"draws":0,"losses":3,"profit_mean":-0.04894489333333333,"profit_mean_pct":-4.89,"profit_sum":-0.14683468,"profit_sum_pct":-14.68,"profit_total_abs":-0.001787070000000003,"profit_total":-0.04894489333333333,"profit_total_pct":-4.89}],"left_open_trades":[{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"total_trades":179,"total_volume":0.17900000000000005,"avg_stake_amount":0.0010000000000000002,"profit_mean":0.0008041243575418989,"profit_median":0.0,"profit_total":2.6085499493733857,"profit_total_abs":0.026085499493733857,"backtest_start":"2018-01-10 07:15:00","backtest_start_ts":1515568500000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":"2020-10-01 18:00:00+00:00","backtest_run_end_ts":"2020-10-01 18:01:00+00:00","trades_per_day":9.42,"market_change":1.22,"pairlist":[],"stake_amount":0.001,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":0.01,"dry_run_wallet":0.01,"final_balance":0.03608549949373386,"rejected_signals":0,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timeframe_detail":"","timerange":"","enable_protections":false,"strategy_name":"StrategyTestV2","stoploss":0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":false,"ignore_roi_if_entry_signal":false,"backtest_best_day":0.17955111999999998,"backtest_worst_day":-0.14683468,"backtest_best_day_abs":0.0071570099,"backtest_worst_day_abs":-0.0023093218,"winning_days":19,"draw_days":0,"losing_days":2,"daily_profit":[["2018-01-10",0.0025815306],["2018-01-11",0.0049356655],["2018-01-12",0.0006395218],["2018-01-13",0.0002574589],["2018-01-14",0.0010443828],["2018-01-15",0.0024030209],["2018-01-16",0.0071570099],["2018-01-17",0.001137038],["2018-01-18",0.0013712174],["2018-01-19",0.000584673],["2018-01-20",0.0006143386],["2018-01-21",0.0004749361],["2018-01-22",9.91669e-05],["2018-01-23",0.0015726664],["2018-01-24",0.0006610219],["2018-01-25",-0.0023093218],["2018-01-26",0.0003735204],["2018-01-27",0.0023975191],["2018-01-28",0.0007295947],["2018-01-29",0.0011476082],["2018-01-30",-0.00178707]],"wins":48,"losses":9,"draws":122,"holding_avg":"3:40:00","holding_avg_s":13200.0,"winner_holding_avg":"0:24:00","winner_holding_avg_s":1440.0,"loser_holding_avg":"1 day, 5:57:00","loser_holding_avg_s":107820.0,"max_drawdown":0.21142322000000008,"max_drawdown_account":0.08674033488183289,"max_drawdown_abs":0.0030822220000000025,"drawdown_start":"2018-01-25 01:30:00","drawdown_start_ts":1516843800000.0,"drawdown_end":"2018-01-25 03:50:00","drawdown_end_ts":1516852200000.0,"max_drawdown_low":0.02245167355388436,"max_drawdown_high":0.025533895553884363,"csum_min":0.01000434887218045,"csum_max":0.03608683949373386}},"strategy_comparison":[{"key":"StrategyTestV2","trades":179,"profit_mean":0.0008041243575418989,"profit_mean_pct":0.0804124357541899,"profit_sum":0.1439382599999999,"profit_sum_pct":14.39,"profit_total_abs":0.026085499493733857,"profit_total":2.6085499493733857,"profit_total_pct":260.85,"duration_avg":"3:40:00","wins":170,"draws":0,"losses":9,"max_drawdown_account":0.08674033488183289,"max_drawdown_abs":"0.00308222"}]} +{"metadata":{"StrategyTestV3":{"run_id":"asdf","backtest_start_time":"2020-10-01 18:00:00+00:00"}},"strategy":{"StrategyTestV3":{"trades":[{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.37344398340249,"open_date":"2018-01-10 07:15:00+00:00","close_date":"2018-01-10 07:20:00+00:00","open_rate":9.64e-05,"close_rate":0.00010074887218045112,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":8.676e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.676e-05,"stop_loss_ratio":0.1,"min_rate":9.64e-05,"max_rate":0.00010074887218045112,"is_open":false,"open_timestamp":1515568500000.0,"close_timestamp":1515568800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":21.026072329688816,"open_date":"2018-01-10 07:15:00+00:00","close_date":"2018-01-10 07:30:00+00:00","open_rate":4.756e-05,"close_rate":4.9705563909774425e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":15,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":4.2804e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.2804e-05,"stop_loss_ratio":0.1,"min_rate":4.756e-05,"max_rate":4.9705563909774425e-05,"is_open":false,"open_timestamp":1515568500000.0,"close_timestamp":1515569400000.0,"is_short":false,"leverage":1.0,"enter_tag":"buy_tag","orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":29.94908655286014,"open_date":"2018-01-10 07:25:00+00:00","close_date":"2018-01-10 07:35:00+00:00","open_rate":3.339e-05,"close_rate":3.489631578947368e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":10,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":3.0050999999999997e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.0050999999999997e-05,"stop_loss_ratio":0.1,"min_rate":3.339e-05,"max_rate":3.489631578947368e-05,"is_open":false,"open_timestamp":1515569100000.0,"close_timestamp":1515569700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.313531353135314,"open_date":"2018-01-10 07:25:00+00:00","close_date":"2018-01-10 07:40:00+00:00","open_rate":9.696e-05,"close_rate":0.00010133413533834584,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":15,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":8.7264e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.7264e-05,"stop_loss_ratio":0.1,"min_rate":9.696e-05,"max_rate":0.00010133413533834584,"is_open":false,"open_timestamp":1515569100000.0,"close_timestamp":1515570000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010604453870625663,"open_date":"2018-01-10 07:35:00+00:00","close_date":"2018-01-10 08:35:00+00:00","open_rate":0.0943,"close_rate":0.09477268170426063,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.08487,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08487,"stop_loss_ratio":0.1,"min_rate":0.0943,"max_rate":0.09477268170426063,"is_open":false,"open_timestamp":1515569700000.0,"close_timestamp":1515573300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03677001860930642,"open_date":"2018-01-10 07:40:00+00:00","close_date":"2018-01-10 08:10:00+00:00","open_rate":0.02719607,"close_rate":0.02760503345864661,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.024476463,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.024476463,"stop_loss_ratio":0.1,"min_rate":0.02719607,"max_rate":0.02760503345864661,"is_open":false,"open_timestamp":1515570000000.0,"close_timestamp":1515571800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.021575196463739,"open_date":"2018-01-10 08:15:00+00:00","close_date":"2018-01-10 09:55:00+00:00","open_rate":0.04634952,"close_rate":0.046581848421052625,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":100,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.041714568,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.041714568,"stop_loss_ratio":0.1,"min_rate":0.04634952,"max_rate":0.046581848421052625,"is_open":false,"open_timestamp":1515572100000.0,"close_timestamp":1515578100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":32.615786040443574,"open_date":"2018-01-10 14:45:00+00:00","close_date":"2018-01-10 15:50:00+00:00","open_rate":3.066e-05,"close_rate":3.081368421052631e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":65,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":2.7594e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7594e-05,"stop_loss_ratio":0.1,"min_rate":3.066e-05,"max_rate":3.081368421052631e-05,"is_open":false,"open_timestamp":1515595500000.0,"close_timestamp":1515599400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.05917194776300452,"open_date":"2018-01-10 16:35:00+00:00","close_date":"2018-01-10 17:15:00+00:00","open_rate":0.0168999,"close_rate":0.016984611278195488,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.01520991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.01520991,"stop_loss_ratio":0.1,"min_rate":0.0168999,"max_rate":0.016984611278195488,"is_open":false,"open_timestamp":1515602100000.0,"close_timestamp":1515604500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010949822656672253,"open_date":"2018-01-10 16:40:00+00:00","close_date":"2018-01-10 17:20:00+00:00","open_rate":0.09132568,"close_rate":0.0917834528320802,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.08219311200000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08219311200000001,"stop_loss_ratio":0.1,"min_rate":0.09132568,"max_rate":0.0917834528320802,"is_open":false,"open_timestamp":1515602400000.0,"close_timestamp":1515604800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011238476768326556,"open_date":"2018-01-10 18:50:00+00:00","close_date":"2018-01-10 19:45:00+00:00","open_rate":0.08898003,"close_rate":0.08942604518796991,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.080082027,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.080082027,"stop_loss_ratio":0.1,"min_rate":0.08898003,"max_rate":0.08942604518796991,"is_open":false,"open_timestamp":1515610200000.0,"close_timestamp":1515613500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011682232072680309,"open_date":"2018-01-10 22:15:00+00:00","close_date":"2018-01-10 23:00:00+00:00","open_rate":0.08560008,"close_rate":0.08602915308270676,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.077040072,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.077040072,"stop_loss_ratio":0.1,"min_rate":0.08560008,"max_rate":0.08602915308270676,"is_open":false,"open_timestamp":1515622500000.0,"close_timestamp":1515625200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.4014726015023105,"open_date":"2018-01-10 22:50:00+00:00","close_date":"2018-01-10 23:20:00+00:00","open_rate":0.00249083,"close_rate":0.0025282860902255634,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002241747,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002241747,"stop_loss_ratio":0.1,"min_rate":0.00249083,"max_rate":0.0025282860902255634,"is_open":false,"open_timestamp":1515624600000.0,"close_timestamp":1515626400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":33.090668431502316,"open_date":"2018-01-10 23:15:00+00:00","close_date":"2018-01-11 00:15:00+00:00","open_rate":3.022e-05,"close_rate":3.037147869674185e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":2.7198e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7198e-05,"stop_loss_ratio":0.1,"min_rate":3.022e-05,"max_rate":3.037147869674185e-05,"is_open":false,"open_timestamp":1515626100000.0,"close_timestamp":1515629700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.41034058268362744,"open_date":"2018-01-10 23:40:00+00:00","close_date":"2018-01-11 00:05:00+00:00","open_rate":0.002437,"close_rate":0.0024980776942355883,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":0.0021933,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0021933,"stop_loss_ratio":0.1,"min_rate":0.002437,"max_rate":0.0024980776942355883,"is_open":false,"open_timestamp":1515627600000.0,"close_timestamp":1515629100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02095643931654345,"open_date":"2018-01-11 00:00:00+00:00","close_date":"2018-01-11 00:35:00+00:00","open_rate":0.04771803,"close_rate":0.04843559436090225,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.042946227,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.042946227,"stop_loss_ratio":0.1,"min_rate":0.04771803,"max_rate":0.04843559436090225,"is_open":false,"open_timestamp":1515628800000.0,"close_timestamp":1515630900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":27.389756231169542,"open_date":"2018-01-11 03:40:00+00:00","close_date":"2018-01-11 04:25:00+00:00","open_rate":3.651e-05,"close_rate":3.2859000000000005e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.10448878,"profit_abs":-9.999999999999994e-05,"exit_reason":"stop_loss","initial_stop_loss_abs":3.2859000000000005e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.2859000000000005e-05,"stop_loss_ratio":0.1,"min_rate":3.2859000000000005e-05,"max_rate":3.651e-05,"is_open":false,"open_timestamp":1515642000000.0,"close_timestamp":1515644700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011332594070446804,"open_date":"2018-01-11 03:55:00+00:00","close_date":"2018-01-11 04:25:00+00:00","open_rate":0.08824105,"close_rate":0.08956798308270676,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.079416945,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.079416945,"stop_loss_ratio":0.1,"min_rate":0.08824105,"max_rate":0.08956798308270676,"is_open":false,"open_timestamp":1515642900000.0,"close_timestamp":1515644700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.411522633744856,"open_date":"2018-01-11 04:00:00+00:00","close_date":"2018-01-11 04:50:00+00:00","open_rate":0.00243,"close_rate":0.002442180451127819,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002187,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002187,"stop_loss_ratio":0.1,"min_rate":0.00243,"max_rate":0.002442180451127819,"is_open":false,"open_timestamp":1515643200000.0,"close_timestamp":1515646200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.022001890402423376,"open_date":"2018-01-11 04:30:00+00:00","close_date":"2018-01-11 04:55:00+00:00","open_rate":0.04545064,"close_rate":0.046589753784461146,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":0.040905576,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.040905576,"stop_loss_ratio":0.1,"min_rate":0.04545064,"max_rate":0.046589753784461146,"is_open":false,"open_timestamp":1515645000000.0,"close_timestamp":1515646500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":29.655990510083036,"open_date":"2018-01-11 04:30:00+00:00","close_date":"2018-01-11 04:50:00+00:00","open_rate":3.372e-05,"close_rate":3.456511278195488e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":3.0348e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.0348e-05,"stop_loss_ratio":0.1,"min_rate":3.372e-05,"max_rate":3.456511278195488e-05,"is_open":false,"open_timestamp":1515645000000.0,"close_timestamp":1515646200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.037821482602118005,"open_date":"2018-01-11 04:55:00+00:00","close_date":"2018-01-11 05:15:00+00:00","open_rate":0.02644,"close_rate":0.02710265664160401,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":0.023796,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.023796,"stop_loss_ratio":0.1,"min_rate":0.02644,"max_rate":0.02710265664160401,"is_open":false,"open_timestamp":1515646500000.0,"close_timestamp":1515647700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011348161597821153,"open_date":"2018-01-11 11:20:00+00:00","close_date":"2018-01-11 12:00:00+00:00","open_rate":0.08812,"close_rate":0.08856170426065162,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.079308,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.079308,"stop_loss_ratio":0.1,"min_rate":0.08812,"max_rate":0.08856170426065162,"is_open":false,"open_timestamp":1515669600000.0,"close_timestamp":1515672000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.037263696923919086,"open_date":"2018-01-11 11:35:00+00:00","close_date":"2018-01-11 12:15:00+00:00","open_rate":0.02683577,"close_rate":0.026970285137844607,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.024152193,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.024152193,"stop_loss_ratio":0.1,"min_rate":0.02683577,"max_rate":0.026970285137844607,"is_open":false,"open_timestamp":1515670500000.0,"close_timestamp":1515672900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":20.329335230737954,"open_date":"2018-01-11 14:00:00+00:00","close_date":"2018-01-11 14:25:00+00:00","open_rate":4.919e-05,"close_rate":5.04228320802005e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":4.4271e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4271e-05,"stop_loss_ratio":0.1,"min_rate":4.919e-05,"max_rate":5.04228320802005e-05,"is_open":false,"open_timestamp":1515679200000.0,"close_timestamp":1515680700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.01138317402960718,"open_date":"2018-01-11 19:25:00+00:00","close_date":"2018-01-11 20:35:00+00:00","open_rate":0.08784896,"close_rate":0.08828930566416039,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":70,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.079064064,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.079064064,"stop_loss_ratio":0.1,"min_rate":0.08784896,"max_rate":0.08828930566416039,"is_open":false,"open_timestamp":1515698700000.0,"close_timestamp":1515702900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.58863858961802,"open_date":"2018-01-11 22:35:00+00:00","close_date":"2018-01-11 23:30:00+00:00","open_rate":5.105e-05,"close_rate":5.130588972431077e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.5945e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.5945e-05,"stop_loss_ratio":0.1,"min_rate":5.105e-05,"max_rate":5.130588972431077e-05,"is_open":false,"open_timestamp":1515710100000.0,"close_timestamp":1515713400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":25.252525252525253,"open_date":"2018-01-11 22:55:00+00:00","close_date":"2018-01-11 23:25:00+00:00","open_rate":3.96e-05,"close_rate":4.019548872180451e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":3.5640000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.5640000000000004e-05,"stop_loss_ratio":0.1,"min_rate":3.96e-05,"max_rate":4.019548872180451e-05,"is_open":false,"open_timestamp":1515711300000.0,"close_timestamp":1515713100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":34.66204506065858,"open_date":"2018-01-11 22:55:00+00:00","close_date":"2018-01-11 23:35:00+00:00","open_rate":2.885e-05,"close_rate":2.899461152882205e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":2.5965e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.5965e-05,"stop_loss_ratio":0.1,"min_rate":2.885e-05,"max_rate":2.899461152882205e-05,"is_open":false,"open_timestamp":1515711300000.0,"close_timestamp":1515713700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03780718336483932,"open_date":"2018-01-11 23:30:00+00:00","close_date":"2018-01-12 00:05:00+00:00","open_rate":0.02645,"close_rate":0.026847744360902256,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.023805000000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.023805000000000003,"stop_loss_ratio":0.1,"min_rate":0.02645,"max_rate":0.026847744360902256,"is_open":false,"open_timestamp":1515713400000.0,"close_timestamp":1515715500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.020833333333333332,"open_date":"2018-01-11 23:55:00+00:00","close_date":"2018-01-12 01:15:00+00:00","open_rate":0.048,"close_rate":0.04824060150375939,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0432,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0432,"stop_loss_ratio":0.1,"min_rate":0.048,"max_rate":0.04824060150375939,"is_open":false,"open_timestamp":1515714900000.0,"close_timestamp":1515719700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":21.31287297527707,"open_date":"2018-01-12 21:15:00+00:00","close_date":"2018-01-12 21:40:00+00:00","open_rate":4.692e-05,"close_rate":4.809593984962405e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":4.2228e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.2228e-05,"stop_loss_ratio":0.1,"min_rate":4.692e-05,"max_rate":4.809593984962405e-05,"is_open":false,"open_timestamp":1515791700000.0,"close_timestamp":1515793200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.38915654211062944,"open_date":"2018-01-13 00:55:00+00:00","close_date":"2018-01-13 06:20:00+00:00","open_rate":0.00256966,"close_rate":0.0025825405012531327,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":325,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002312694,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002312694,"stop_loss_ratio":0.1,"min_rate":0.00256966,"max_rate":0.0025825405012531327,"is_open":false,"open_timestamp":1515804900000.0,"close_timestamp":1515824400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":15.96933886937081,"open_date":"2018-01-13 10:55:00+00:00","close_date":"2018-01-13 11:35:00+00:00","open_rate":6.262e-05,"close_rate":6.293388471177944e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.6358e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.6358e-05,"stop_loss_ratio":0.1,"min_rate":6.262e-05,"max_rate":6.293388471177944e-05,"is_open":false,"open_timestamp":1515840900000.0,"close_timestamp":1515843300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":21.14164904862579,"open_date":"2018-01-13 13:05:00+00:00","close_date":"2018-01-15 14:10:00+00:00","open_rate":4.73e-05,"close_rate":4.753709273182957e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":2945,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.257e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.257e-05,"stop_loss_ratio":0.1,"min_rate":4.73e-05,"max_rate":4.753709273182957e-05,"is_open":false,"open_timestamp":1515848700000.0,"close_timestamp":1516025400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":16.49348507339601,"open_date":"2018-01-13 13:30:00+00:00","close_date":"2018-01-13 14:45:00+00:00","open_rate":6.063e-05,"close_rate":6.0933909774436085e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":75,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.4567e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.4567e-05,"stop_loss_ratio":0.1,"min_rate":6.063e-05,"max_rate":6.0933909774436085e-05,"is_open":false,"open_timestamp":1515850200000.0,"close_timestamp":1515854700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":9.023641941887746,"open_date":"2018-01-13 13:40:00+00:00","close_date":"2018-01-13 23:30:00+00:00","open_rate":0.00011082,"close_rate":0.00011137548872180448,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":590,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":9.9738e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":9.9738e-05,"stop_loss_ratio":0.1,"min_rate":0.00011082,"max_rate":0.00011137548872180448,"is_open":false,"open_timestamp":1515850800000.0,"close_timestamp":1515886200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":16.863406408094438,"open_date":"2018-01-13 15:15:00+00:00","close_date":"2018-01-13 15:55:00+00:00","open_rate":5.93e-05,"close_rate":5.9597243107769415e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.337e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.337e-05,"stop_loss_ratio":0.1,"min_rate":5.93e-05,"max_rate":5.9597243107769415e-05,"is_open":false,"open_timestamp":1515856500000.0,"close_timestamp":1515858900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.020618543947292404,"open_date":"2018-01-13 16:30:00+00:00","close_date":"2018-01-13 17:10:00+00:00","open_rate":0.04850003,"close_rate":0.04874313791979949,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.043650027,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.043650027,"stop_loss_ratio":0.1,"min_rate":0.04850003,"max_rate":0.04874313791979949,"is_open":false,"open_timestamp":1515861000000.0,"close_timestamp":1515863400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010178097365511457,"open_date":"2018-01-13 22:05:00+00:00","close_date":"2018-01-14 06:25:00+00:00","open_rate":0.09825019,"close_rate":0.09874267215538848,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":500,"profit_ratio":-0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.088425171,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.088425171,"stop_loss_ratio":0.1,"min_rate":0.09825019,"max_rate":0.09874267215538848,"is_open":false,"open_timestamp":1515881100000.0,"close_timestamp":1515911100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":16.616816218012627,"open_date":"2018-01-14 00:20:00+00:00","close_date":"2018-01-14 22:55:00+00:00","open_rate":6.018e-05,"close_rate":6.048165413533834e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":1355,"profit_ratio":0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":5.4162e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.4162e-05,"stop_loss_ratio":0.1,"min_rate":6.018e-05,"max_rate":6.048165413533834e-05,"is_open":false,"open_timestamp":1515889200000.0,"close_timestamp":1515970500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010246952581919518,"open_date":"2018-01-14 12:45:00+00:00","close_date":"2018-01-14 13:25:00+00:00","open_rate":0.09758999,"close_rate":0.0980791628822055,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.087830991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.087830991,"stop_loss_ratio":0.1,"min_rate":0.09758999,"max_rate":0.0980791628822055,"is_open":false,"open_timestamp":1515933900000.0,"close_timestamp":1515936300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3215434083601286,"open_date":"2018-01-14 15:30:00+00:00","close_date":"2018-01-14 16:00:00+00:00","open_rate":0.00311,"close_rate":0.0031567669172932328,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002799,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002799,"stop_loss_ratio":0.1,"min_rate":0.00311,"max_rate":0.0031567669172932328,"is_open":false,"open_timestamp":1515943800000.0,"close_timestamp":1515945600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.32010140812609433,"open_date":"2018-01-14 20:45:00+00:00","close_date":"2018-01-14 22:15:00+00:00","open_rate":0.00312401,"close_rate":0.003139669197994987,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":90,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002811609,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002811609,"stop_loss_ratio":0.1,"min_rate":0.00312401,"max_rate":0.003139669197994987,"is_open":false,"open_timestamp":1515962700000.0,"close_timestamp":1515968100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.057247866085791646,"open_date":"2018-01-14 23:35:00+00:00","close_date":"2018-01-15 00:30:00+00:00","open_rate":0.0174679,"close_rate":0.017555458395989976,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.015721110000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.015721110000000003,"stop_loss_ratio":0.1,"min_rate":0.0174679,"max_rate":0.017555458395989976,"is_open":false,"open_timestamp":1515972900000.0,"close_timestamp":1515976200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.013611282991367995,"open_date":"2018-01-14 23:45:00+00:00","close_date":"2018-01-15 00:25:00+00:00","open_rate":0.07346846,"close_rate":0.07383672295739348,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.066121614,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.066121614,"stop_loss_ratio":0.1,"min_rate":0.07346846,"max_rate":0.07383672295739348,"is_open":false,"open_timestamp":1515973500000.0,"close_timestamp":1515975900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010204706410596568,"open_date":"2018-01-15 02:25:00+00:00","close_date":"2018-01-15 03:05:00+00:00","open_rate":0.097994,"close_rate":0.09848519799498744,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0881946,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0881946,"stop_loss_ratio":0.1,"min_rate":0.097994,"max_rate":0.09848519799498744,"is_open":false,"open_timestamp":1515983100000.0,"close_timestamp":1515985500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010353038616834042,"open_date":"2018-01-15 07:20:00+00:00","close_date":"2018-01-15 08:00:00+00:00","open_rate":0.09659,"close_rate":0.09707416040100247,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.086931,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.086931,"stop_loss_ratio":0.1,"min_rate":0.09659,"max_rate":0.09707416040100247,"is_open":false,"open_timestamp":1516000800000.0,"close_timestamp":1516003200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.0130169219986,"open_date":"2018-01-15 08:20:00+00:00","close_date":"2018-01-15 08:55:00+00:00","open_rate":9.987e-05,"close_rate":0.00010137180451127818,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":8.9883e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.9883e-05,"stop_loss_ratio":0.1,"min_rate":9.987e-05,"max_rate":0.00010137180451127818,"is_open":false,"open_timestamp":1516004400000.0,"close_timestamp":1516006500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010537752023511832,"open_date":"2018-01-15 12:10:00+00:00","close_date":"2018-01-16 02:50:00+00:00","open_rate":0.0948969,"close_rate":0.09537257368421052,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":880,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.08540721000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08540721000000001,"stop_loss_ratio":0.1,"min_rate":0.0948969,"max_rate":0.09537257368421052,"is_open":false,"open_timestamp":1516018200000.0,"close_timestamp":1516071000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014084507042253523,"open_date":"2018-01-15 14:10:00+00:00","close_date":"2018-01-15 17:40:00+00:00","open_rate":0.071,"close_rate":0.07135588972431077,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":210,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0639,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0639,"stop_loss_ratio":0.1,"min_rate":0.071,"max_rate":0.07135588972431077,"is_open":false,"open_timestamp":1516025400000.0,"close_timestamp":1516038000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.021736763017766975,"open_date":"2018-01-15 14:30:00+00:00","close_date":"2018-01-15 15:10:00+00:00","open_rate":0.04600501,"close_rate":0.046235611553884705,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.041404509,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.041404509,"stop_loss_ratio":0.1,"min_rate":0.04600501,"max_rate":0.046235611553884705,"is_open":false,"open_timestamp":1516026600000.0,"close_timestamp":1516029000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.595465140919686,"open_date":"2018-01-15 18:10:00+00:00","close_date":"2018-01-15 19:25:00+00:00","open_rate":9.438e-05,"close_rate":9.485308270676693e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":75,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":8.4942e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.4942e-05,"stop_loss_ratio":0.1,"min_rate":9.438e-05,"max_rate":9.485308270676693e-05,"is_open":false,"open_timestamp":1516039800000.0,"close_timestamp":1516044300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.032894726021471705,"open_date":"2018-01-15 18:35:00+00:00","close_date":"2018-01-15 19:15:00+00:00","open_rate":0.03040001,"close_rate":0.030552391002506264,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.027360009,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.027360009,"stop_loss_ratio":0.1,"min_rate":0.03040001,"max_rate":0.030552391002506264,"is_open":false,"open_timestamp":1516041300000.0,"close_timestamp":1516043700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.13208840157615,"open_date":"2018-01-15 20:25:00+00:00","close_date":"2018-01-16 08:25:00+00:00","open_rate":5.837e-05,"close_rate":5.2533e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":720,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000005,"exit_reason":"stop_loss","initial_stop_loss_abs":5.2533e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.2533e-05,"stop_loss_ratio":0.1,"min_rate":5.2533e-05,"max_rate":5.837e-05,"is_open":false,"open_timestamp":1516047900000.0,"close_timestamp":1516091100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.021722130506560085,"open_date":"2018-01-15 20:40:00+00:00","close_date":"2018-01-15 22:00:00+00:00","open_rate":0.046036,"close_rate":0.04626675689223057,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0414324,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0414324,"stop_loss_ratio":0.1,"min_rate":0.046036,"max_rate":0.04626675689223057,"is_open":false,"open_timestamp":1516048800000.0,"close_timestamp":1516053600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.34861425832316545,"open_date":"2018-01-16 00:30:00+00:00","close_date":"2018-01-16 01:10:00+00:00","open_rate":0.0028685,"close_rate":0.0028828784461152877,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.00258165,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.00258165,"stop_loss_ratio":0.1,"min_rate":0.0028685,"max_rate":0.0028828784461152877,"is_open":false,"open_timestamp":1516062600000.0,"close_timestamp":1516065000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014854967241083492,"open_date":"2018-01-16 01:15:00+00:00","close_date":"2018-01-16 02:35:00+00:00","open_rate":0.06731755,"close_rate":0.0676549813283208,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.060585795000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060585795000000005,"stop_loss_ratio":0.1,"min_rate":0.06731755,"max_rate":0.0676549813283208,"is_open":false,"open_timestamp":1516065300000.0,"close_timestamp":1516070100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010848794492804754,"open_date":"2018-01-16 07:45:00+00:00","close_date":"2018-01-16 08:40:00+00:00","open_rate":0.09217614,"close_rate":0.09263817578947368,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.082958526,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.082958526,"stop_loss_ratio":0.1,"min_rate":0.09217614,"max_rate":0.09263817578947368,"is_open":false,"open_timestamp":1516088700000.0,"close_timestamp":1516092000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06060606060606061,"open_date":"2018-01-16 08:35:00+00:00","close_date":"2018-01-16 08:55:00+00:00","open_rate":0.0165,"close_rate":0.016913533834586467,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":0.01485,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.01485,"stop_loss_ratio":0.1,"min_rate":0.0165,"max_rate":0.016913533834586467,"is_open":false,"open_timestamp":1516091700000.0,"close_timestamp":1516092900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":12.57387149503332,"open_date":"2018-01-16 08:35:00+00:00","close_date":"2018-01-16 08:40:00+00:00","open_rate":7.953e-05,"close_rate":8.311781954887218e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":7.157700000000001e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":7.157700000000001e-05,"stop_loss_ratio":0.1,"min_rate":7.953e-05,"max_rate":8.311781954887218e-05,"is_open":false,"open_timestamp":1516091700000.0,"close_timestamp":1516092000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.022122914915269236,"open_date":"2018-01-16 08:45:00+00:00","close_date":"2018-01-16 09:50:00+00:00","open_rate":0.045202,"close_rate":0.04542857644110275,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":65,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0406818,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0406818,"stop_loss_ratio":0.1,"min_rate":0.045202,"max_rate":0.04542857644110275,"is_open":false,"open_timestamp":1516092300000.0,"close_timestamp":1516096200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.054878048780488,"open_date":"2018-01-16 09:15:00+00:00","close_date":"2018-01-16 09:45:00+00:00","open_rate":5.248e-05,"close_rate":5.326917293233082e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":4.7232e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7232e-05,"stop_loss_ratio":0.1,"min_rate":5.248e-05,"max_rate":5.326917293233082e-05,"is_open":false,"open_timestamp":1516094100000.0,"close_timestamp":1516095900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03457434486802627,"open_date":"2018-01-16 09:15:00+00:00","close_date":"2018-01-16 09:55:00+00:00","open_rate":0.02892318,"close_rate":0.02906815834586466,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.026030862,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.026030862,"stop_loss_ratio":0.1,"min_rate":0.02892318,"max_rate":0.02906815834586466,"is_open":false,"open_timestamp":1516094100000.0,"close_timestamp":1516096500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.38735944164405,"open_date":"2018-01-16 09:50:00+00:00","close_date":"2018-01-16 10:10:00+00:00","open_rate":5.158e-05,"close_rate":5.287273182957392e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":4.6422e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6422e-05,"stop_loss_ratio":0.1,"min_rate":5.158e-05,"max_rate":5.287273182957392e-05,"is_open":false,"open_timestamp":1516096200000.0,"close_timestamp":1516097400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.022948496230938985,"open_date":"2018-01-16 10:05:00+00:00","close_date":"2018-01-16 10:40:00+00:00","open_rate":0.04357584,"close_rate":0.044231115789473675,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.039218256,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.039218256,"stop_loss_ratio":0.1,"min_rate":0.04357584,"max_rate":0.044231115789473675,"is_open":false,"open_timestamp":1516097100000.0,"close_timestamp":1516099200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.035357778286929785,"open_date":"2018-01-16 10:05:00+00:00","close_date":"2018-01-16 10:35:00+00:00","open_rate":0.02828232,"close_rate":0.02870761804511278,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.025454088,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025454088,"stop_loss_ratio":0.1,"min_rate":0.02828232,"max_rate":0.02870761804511278,"is_open":false,"open_timestamp":1516097100000.0,"close_timestamp":1516098900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.64975755315181,"open_date":"2018-01-16 13:45:00+00:00","close_date":"2018-01-16 14:20:00+00:00","open_rate":5.362e-05,"close_rate":5.442631578947368e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":4.8258e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.8258e-05,"stop_loss_ratio":0.1,"min_rate":5.362e-05,"max_rate":5.442631578947368e-05,"is_open":false,"open_timestamp":1516110300000.0,"close_timestamp":1516112400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.86080724254998,"open_date":"2018-01-16 17:30:00+00:00","close_date":"2018-01-16 18:25:00+00:00","open_rate":5.302e-05,"close_rate":5.328576441102756e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.7718e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7718e-05,"stop_loss_ratio":0.1,"min_rate":5.302e-05,"max_rate":5.328576441102756e-05,"is_open":false,"open_timestamp":1516123800000.0,"close_timestamp":1516127100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010952903718828448,"open_date":"2018-01-16 18:15:00+00:00","close_date":"2018-01-16 18:45:00+00:00","open_rate":0.09129999,"close_rate":0.09267292218045112,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.082169991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.082169991,"stop_loss_ratio":0.1,"min_rate":0.09129999,"max_rate":0.09267292218045112,"is_open":false,"open_timestamp":1516126500000.0,"close_timestamp":1516128300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":26.26050420168067,"open_date":"2018-01-16 18:15:00+00:00","close_date":"2018-01-16 18:35:00+00:00","open_rate":3.808e-05,"close_rate":3.903438596491228e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":3.4272e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.4272e-05,"stop_loss_ratio":0.1,"min_rate":3.808e-05,"max_rate":3.903438596491228e-05,"is_open":false,"open_timestamp":1516126500000.0,"close_timestamp":1516127700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.035574376772493324,"open_date":"2018-01-16 19:00:00+00:00","close_date":"2018-01-16 19:30:00+00:00","open_rate":0.02811012,"close_rate":0.028532828571428567,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.025299108,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025299108,"stop_loss_ratio":0.1,"min_rate":0.02811012,"max_rate":0.028532828571428567,"is_open":false,"open_timestamp":1516129200000.0,"close_timestamp":1516131000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.387028357567759,"open_date":"2018-01-16 21:25:00+00:00","close_date":"2018-01-16 22:25:00+00:00","open_rate":0.00258379,"close_rate":0.002325411,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000005,"exit_reason":"stop_loss","initial_stop_loss_abs":0.002325411,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002325411,"stop_loss_ratio":0.1,"min_rate":0.002325411,"max_rate":0.00258379,"is_open":false,"open_timestamp":1516137900000.0,"close_timestamp":1516141500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":39.07776475185619,"open_date":"2018-01-16 21:25:00+00:00","close_date":"2018-01-16 22:45:00+00:00","open_rate":2.559e-05,"close_rate":2.3031e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000005,"exit_reason":"stop_loss","initial_stop_loss_abs":2.3031e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.3031e-05,"stop_loss_ratio":0.1,"min_rate":2.3031e-05,"max_rate":2.559e-05,"is_open":false,"open_timestamp":1516137900000.0,"close_timestamp":1516142700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":13.123359580052494,"open_date":"2018-01-16 21:35:00+00:00","close_date":"2018-01-16 22:25:00+00:00","open_rate":7.62e-05,"close_rate":6.858e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000005,"exit_reason":"stop_loss","initial_stop_loss_abs":6.858e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":6.858e-05,"stop_loss_ratio":0.1,"min_rate":6.858e-05,"max_rate":7.62e-05,"is_open":false,"open_timestamp":1516138500000.0,"close_timestamp":1516141500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06622516556291391,"open_date":"2018-01-16 22:30:00+00:00","close_date":"2018-01-16 22:40:00+00:00","open_rate":0.0151,"close_rate":0.015781203007518795,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":10,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":0.01359,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.01359,"stop_loss_ratio":0.1,"min_rate":0.0151,"max_rate":0.015781203007518795,"is_open":false,"open_timestamp":1516141800000.0,"close_timestamp":1516142400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.4350777048780912,"open_date":"2018-01-16 22:30:00+00:00","close_date":"2018-01-16 22:35:00+00:00","open_rate":0.00229844,"close_rate":0.002402129022556391,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.511278195488727e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002068596,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002068596,"stop_loss_ratio":0.1,"min_rate":0.00229844,"max_rate":0.002402129022556391,"is_open":false,"open_timestamp":1516141800000.0,"close_timestamp":1516142100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.4243113426908128,"open_date":"2018-01-16 22:40:00+00:00","close_date":"2018-01-16 22:45:00+00:00","open_rate":0.00235676,"close_rate":0.00246308,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.511278195488727e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002121084,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002121084,"stop_loss_ratio":0.1,"min_rate":0.00235676,"max_rate":0.00246308,"is_open":false,"open_timestamp":1516142400000.0,"close_timestamp":1516142700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.01585559988076589,"open_date":"2018-01-16 22:45:00+00:00","close_date":"2018-01-16 23:05:00+00:00","open_rate":0.0630692,"close_rate":0.06464988170426066,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":0.056762280000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.056762280000000005,"stop_loss_ratio":0.1,"min_rate":0.0630692,"max_rate":0.06464988170426066,"is_open":false,"open_timestamp":1516142700000.0,"close_timestamp":1516143900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":45.45454545454545,"open_date":"2018-01-16 22:50:00+00:00","close_date":"2018-01-16 22:55:00+00:00","open_rate":2.2e-05,"close_rate":2.299248120300751e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.511278195488684e-05,"exit_reason":"roi","initial_stop_loss_abs":1.98e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":1.98e-05,"stop_loss_ratio":0.1,"min_rate":2.2e-05,"max_rate":2.299248120300751e-05,"is_open":false,"open_timestamp":1516143000000.0,"close_timestamp":1516143300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":20.10454362685967,"open_date":"2018-01-17 03:30:00+00:00","close_date":"2018-01-17 04:00:00+00:00","open_rate":4.974e-05,"close_rate":5.048796992481203e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":4.4766000000000005e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4766000000000005e-05,"stop_loss_ratio":0.1,"min_rate":4.974e-05,"max_rate":5.048796992481203e-05,"is_open":false,"open_timestamp":1516159800000.0,"close_timestamp":1516161600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":14.068655036578503,"open_date":"2018-01-17 03:55:00+00:00","close_date":"2018-01-17 04:15:00+00:00","open_rate":7.108e-05,"close_rate":7.28614536340852e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":6.3972e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":6.3972e-05,"stop_loss_ratio":0.1,"min_rate":7.108e-05,"max_rate":7.28614536340852e-05,"is_open":false,"open_timestamp":1516161300000.0,"close_timestamp":1516162500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.0231107002542177,"open_date":"2018-01-17 09:35:00+00:00","close_date":"2018-01-17 10:15:00+00:00","open_rate":0.04327,"close_rate":0.04348689223057644,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.038943000000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.038943000000000005,"stop_loss_ratio":0.1,"min_rate":0.04327,"max_rate":0.04348689223057644,"is_open":false,"open_timestamp":1516181700000.0,"close_timestamp":1516184100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":20.012007204322593,"open_date":"2018-01-17 10:20:00+00:00","close_date":"2018-01-17 17:00:00+00:00","open_rate":4.997e-05,"close_rate":5.022047619047618e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":400,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":4.4973e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4973e-05,"stop_loss_ratio":0.1,"min_rate":4.997e-05,"max_rate":5.022047619047618e-05,"is_open":false,"open_timestamp":1516184400000.0,"close_timestamp":1516208400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014626687444363738,"open_date":"2018-01-17 10:30:00+00:00","close_date":"2018-01-17 11:25:00+00:00","open_rate":0.06836818,"close_rate":0.06871087764411027,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.061531362,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.061531362,"stop_loss_ratio":0.1,"min_rate":0.06836818,"max_rate":0.06871087764411027,"is_open":false,"open_timestamp":1516185000000.0,"close_timestamp":1516188300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":27.548209366391184,"open_date":"2018-01-17 10:30:00+00:00","close_date":"2018-01-17 11:10:00+00:00","open_rate":3.63e-05,"close_rate":3.648195488721804e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":3.2670000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.2670000000000004e-05,"stop_loss_ratio":0.1,"min_rate":3.63e-05,"max_rate":3.648195488721804e-05,"is_open":false,"open_timestamp":1516185000000.0,"close_timestamp":1516187400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03558718861209965,"open_date":"2018-01-17 12:30:00+00:00","close_date":"2018-01-17 22:05:00+00:00","open_rate":0.0281,"close_rate":0.02824085213032581,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":575,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.02529,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.02529,"stop_loss_ratio":0.1,"min_rate":0.0281,"max_rate":0.02824085213032581,"is_open":false,"open_timestamp":1516192200000.0,"close_timestamp":1516226700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011559355963546878,"open_date":"2018-01-17 12:35:00+00:00","close_date":"2018-01-17 16:55:00+00:00","open_rate":0.08651001,"close_rate":0.08694364413533832,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":260,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.077859009,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.077859009,"stop_loss_ratio":0.1,"min_rate":0.08651001,"max_rate":0.08694364413533832,"is_open":false,"open_timestamp":1516192500000.0,"close_timestamp":1516208100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.752529735487308,"open_date":"2018-01-18 05:00:00+00:00","close_date":"2018-01-18 05:55:00+00:00","open_rate":5.633e-05,"close_rate":5.6612355889724306e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.0697e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0697e-05,"stop_loss_ratio":0.1,"min_rate":5.633e-05,"max_rate":5.6612355889724306e-05,"is_open":false,"open_timestamp":1516251600000.0,"close_timestamp":1516254900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.01430923457900944,"open_date":"2018-01-18 05:20:00+00:00","close_date":"2018-01-18 05:55:00+00:00","open_rate":0.06988494,"close_rate":0.07093584135338346,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.06289644600000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06289644600000001,"stop_loss_ratio":0.1,"min_rate":0.06988494,"max_rate":0.07093584135338346,"is_open":false,"open_timestamp":1516252800000.0,"close_timestamp":1516254900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.034265103697024,"open_date":"2018-01-18 07:35:00+00:00","close_date":"2018-01-18 08:15:00+00:00","open_rate":5.545e-05,"close_rate":5.572794486215538e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":4.9905e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.9905e-05,"stop_loss_ratio":0.1,"min_rate":5.545e-05,"max_rate":5.572794486215538e-05,"is_open":false,"open_timestamp":1516260900000.0,"close_timestamp":1516263300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06121723118136401,"open_date":"2018-01-18 09:00:00+00:00","close_date":"2018-01-18 09:40:00+00:00","open_rate":0.01633527,"close_rate":0.016417151052631574,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.014701743,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014701743,"stop_loss_ratio":0.1,"min_rate":0.01633527,"max_rate":0.016417151052631574,"is_open":false,"open_timestamp":1516266000000.0,"close_timestamp":1516268400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3707356136045141,"open_date":"2018-01-18 16:40:00+00:00","close_date":"2018-01-18 17:20:00+00:00","open_rate":0.00269734,"close_rate":0.002710860501253133,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002427606,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002427606,"stop_loss_ratio":0.1,"min_rate":0.00269734,"max_rate":0.002710860501253133,"is_open":false,"open_timestamp":1516293600000.0,"close_timestamp":1516296000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":22.3463687150838,"open_date":"2018-01-18 18:05:00+00:00","close_date":"2018-01-18 18:30:00+00:00","open_rate":4.475e-05,"close_rate":4.587155388471177e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":4.0275e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.0275e-05,"stop_loss_ratio":0.1,"min_rate":4.475e-05,"max_rate":4.587155388471177e-05,"is_open":false,"open_timestamp":1516298700000.0,"close_timestamp":1516300200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":35.842293906810035,"open_date":"2018-01-18 18:25:00+00:00","close_date":"2018-01-18 18:55:00+00:00","open_rate":2.79e-05,"close_rate":2.8319548872180444e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":2.511e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.511e-05,"stop_loss_ratio":0.1,"min_rate":2.79e-05,"max_rate":2.8319548872180444e-05,"is_open":false,"open_timestamp":1516299900000.0,"close_timestamp":1516301700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.022525942001105578,"open_date":"2018-01-18 20:10:00+00:00","close_date":"2018-01-18 20:50:00+00:00","open_rate":0.04439326,"close_rate":0.04461578260651629,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.039953934,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.039953934,"stop_loss_ratio":0.1,"min_rate":0.04439326,"max_rate":0.04461578260651629,"is_open":false,"open_timestamp":1516306200000.0,"close_timestamp":1516308600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":22.271714922048996,"open_date":"2018-01-18 21:30:00+00:00","close_date":"2018-01-19 00:35:00+00:00","open_rate":4.49e-05,"close_rate":4.51250626566416e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":185,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.041e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.041e-05,"stop_loss_ratio":0.1,"min_rate":4.49e-05,"max_rate":4.51250626566416e-05,"is_open":false,"open_timestamp":1516311000000.0,"close_timestamp":1516322100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03502626970227671,"open_date":"2018-01-18 21:55:00+00:00","close_date":"2018-01-19 05:05:00+00:00","open_rate":0.02855,"close_rate":0.028693107769423555,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":430,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.025695,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025695,"stop_loss_ratio":0.1,"min_rate":0.02855,"max_rate":0.028693107769423555,"is_open":false,"open_timestamp":1516312500000.0,"close_timestamp":1516338300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.25327812284334,"open_date":"2018-01-18 22:10:00+00:00","close_date":"2018-01-18 22:50:00+00:00","open_rate":5.796e-05,"close_rate":5.8250526315789473e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.2164e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.2164e-05,"stop_loss_ratio":0.1,"min_rate":5.796e-05,"max_rate":5.8250526315789473e-05,"is_open":false,"open_timestamp":1516313400000.0,"close_timestamp":1516315800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02303975994413319,"open_date":"2018-01-18 23:50:00+00:00","close_date":"2018-01-19 00:30:00+00:00","open_rate":0.04340323,"close_rate":0.04362079005012531,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.039062907,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.039062907,"stop_loss_ratio":0.1,"min_rate":0.04340323,"max_rate":0.04362079005012531,"is_open":false,"open_timestamp":1516319400000.0,"close_timestamp":1516321800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02244943545282195,"open_date":"2018-01-19 16:45:00+00:00","close_date":"2018-01-19 17:35:00+00:00","open_rate":0.04454455,"close_rate":0.04476783095238095,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.040090095000000006,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.040090095000000006,"stop_loss_ratio":0.1,"min_rate":0.04454455,"max_rate":0.04476783095238095,"is_open":false,"open_timestamp":1516380300000.0,"close_timestamp":1516383300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.793594306049823,"open_date":"2018-01-19 17:15:00+00:00","close_date":"2018-01-19 19:55:00+00:00","open_rate":5.62e-05,"close_rate":5.648170426065162e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":160,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":5.058e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.058e-05,"stop_loss_ratio":0.1,"min_rate":5.62e-05,"max_rate":5.648170426065162e-05,"is_open":false,"open_timestamp":1516382100000.0,"close_timestamp":1516391700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":23.04678497349619,"open_date":"2018-01-19 17:20:00+00:00","close_date":"2018-01-19 20:15:00+00:00","open_rate":4.339e-05,"close_rate":4.360749373433584e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":175,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":3.9051e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.9051e-05,"stop_loss_ratio":0.1,"min_rate":4.339e-05,"max_rate":4.360749373433584e-05,"is_open":false,"open_timestamp":1516382400000.0,"close_timestamp":1516392900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":9.910802775024775,"open_date":"2018-01-20 04:45:00+00:00","close_date":"2018-01-20 17:35:00+00:00","open_rate":0.0001009,"close_rate":0.00010140576441102755,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":770,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":9.081e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":9.081e-05,"stop_loss_ratio":0.1,"min_rate":0.0001009,"max_rate":0.00010140576441102755,"is_open":false,"open_timestamp":1516423500000.0,"close_timestamp":1516469700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3696789338459548,"open_date":"2018-01-20 04:50:00+00:00","close_date":"2018-01-20 15:15:00+00:00","open_rate":0.00270505,"close_rate":0.002718609147869674,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":625,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002434545,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002434545,"stop_loss_ratio":0.1,"min_rate":0.00270505,"max_rate":0.002718609147869674,"is_open":false,"open_timestamp":1516423800000.0,"close_timestamp":1516461300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.033333311111125925,"open_date":"2018-01-20 04:50:00+00:00","close_date":"2018-01-20 07:00:00+00:00","open_rate":0.03000002,"close_rate":0.030150396040100245,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":130,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.027000018,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.027000018,"stop_loss_ratio":0.1,"min_rate":0.03000002,"max_rate":0.030150396040100245,"is_open":false,"open_timestamp":1516423800000.0,"close_timestamp":1516431600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.315018315018317,"open_date":"2018-01-20 09:00:00+00:00","close_date":"2018-01-20 09:40:00+00:00","open_rate":5.46e-05,"close_rate":5.4873684210526304e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.914e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.914e-05,"stop_loss_ratio":0.1,"min_rate":5.46e-05,"max_rate":5.4873684210526304e-05,"is_open":false,"open_timestamp":1516438800000.0,"close_timestamp":1516441200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03244412634781012,"open_date":"2018-01-20 18:25:00+00:00","close_date":"2018-01-25 03:50:00+00:00","open_rate":0.03082222,"close_rate":0.027739998,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":6325,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000015,"exit_reason":"stop_loss","initial_stop_loss_abs":0.027739998,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.027739998,"stop_loss_ratio":0.1,"min_rate":0.027739998,"max_rate":0.03082222,"is_open":false,"open_timestamp":1516472700000.0,"close_timestamp":1516852200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011148273260677063,"open_date":"2018-01-20 22:25:00+00:00","close_date":"2018-01-20 23:15:00+00:00","open_rate":0.08969999,"close_rate":0.09014961401002504,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.080729991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.080729991,"stop_loss_ratio":0.1,"min_rate":0.08969999,"max_rate":0.09014961401002504,"is_open":false,"open_timestamp":1516487100000.0,"close_timestamp":1516490100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06125570520324337,"open_date":"2018-01-21 02:50:00+00:00","close_date":"2018-01-21 14:30:00+00:00","open_rate":0.01632501,"close_rate":0.01640683962406015,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":700,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.014692509,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014692509,"stop_loss_ratio":0.1,"min_rate":0.01632501,"max_rate":0.01640683962406015,"is_open":false,"open_timestamp":1516503000000.0,"close_timestamp":1516545000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.01417675579120474,"open_date":"2018-01-21 10:20:00+00:00","close_date":"2018-01-21 11:00:00+00:00","open_rate":0.070538,"close_rate":0.07089157393483708,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0634842,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0634842,"stop_loss_ratio":0.1,"min_rate":0.070538,"max_rate":0.07089157393483708,"is_open":false,"open_timestamp":1516530000000.0,"close_timestamp":1516532400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.864365214110546,"open_date":"2018-01-21 15:50:00+00:00","close_date":"2018-01-21 18:45:00+00:00","open_rate":5.301e-05,"close_rate":5.327571428571427e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":175,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":4.7709e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7709e-05,"stop_loss_ratio":0.1,"min_rate":5.301e-05,"max_rate":5.327571428571427e-05,"is_open":false,"open_timestamp":1516549800000.0,"close_timestamp":1516560300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":25.284450063211125,"open_date":"2018-01-21 16:20:00+00:00","close_date":"2018-01-21 17:00:00+00:00","open_rate":3.955e-05,"close_rate":3.9748245614035085e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":3.5595e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.5595e-05,"stop_loss_ratio":0.1,"min_rate":3.955e-05,"max_rate":3.9748245614035085e-05,"is_open":false,"open_timestamp":1516551600000.0,"close_timestamp":1516554000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.38683971296493297,"open_date":"2018-01-21 21:15:00+00:00","close_date":"2018-01-21 21:45:00+00:00","open_rate":0.00258505,"close_rate":0.002623922932330827,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002326545,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002326545,"stop_loss_ratio":0.1,"min_rate":0.00258505,"max_rate":0.002623922932330827,"is_open":false,"open_timestamp":1516569300000.0,"close_timestamp":1516571100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":25.621316935690498,"open_date":"2018-01-21 21:15:00+00:00","close_date":"2018-01-21 21:55:00+00:00","open_rate":3.903e-05,"close_rate":3.922563909774435e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":3.5127e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.5127e-05,"stop_loss_ratio":0.1,"min_rate":3.903e-05,"max_rate":3.922563909774435e-05,"is_open":false,"open_timestamp":1516569300000.0,"close_timestamp":1516571700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.098548510313215,"open_date":"2018-01-22 00:35:00+00:00","close_date":"2018-01-22 10:35:00+00:00","open_rate":5.236e-05,"close_rate":5.262245614035087e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":600,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":4.7124e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7124e-05,"stop_loss_ratio":0.1,"min_rate":5.236e-05,"max_rate":5.262245614035087e-05,"is_open":false,"open_timestamp":1516581300000.0,"close_timestamp":1516617300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":11.076650420912715,"open_date":"2018-01-22 01:30:00+00:00","close_date":"2018-01-22 02:10:00+00:00","open_rate":9.028e-05,"close_rate":9.07325313283208e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":8.1252e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.1252e-05,"stop_loss_ratio":0.1,"min_rate":9.028e-05,"max_rate":9.07325313283208e-05,"is_open":false,"open_timestamp":1516584600000.0,"close_timestamp":1516587000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3721622627465575,"open_date":"2018-01-22 12:25:00+00:00","close_date":"2018-01-22 14:35:00+00:00","open_rate":0.002687,"close_rate":0.002700468671679198,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":130,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0024183000000000004,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0024183000000000004,"stop_loss_ratio":0.1,"min_rate":0.002687,"max_rate":0.002700468671679198,"is_open":false,"open_timestamp":1516623900000.0,"close_timestamp":1516631700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":23.99232245681382,"open_date":"2018-01-22 13:15:00+00:00","close_date":"2018-01-22 13:55:00+00:00","open_rate":4.168e-05,"close_rate":4.188892230576441e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":3.7512e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.7512e-05,"stop_loss_ratio":0.1,"min_rate":4.168e-05,"max_rate":4.188892230576441e-05,"is_open":false,"open_timestamp":1516626900000.0,"close_timestamp":1516629300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":11.336583153837434,"open_date":"2018-01-22 14:00:00+00:00","close_date":"2018-01-22 14:30:00+00:00","open_rate":8.821e-05,"close_rate":8.953646616541353e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":7.9389e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":7.9389e-05,"stop_loss_ratio":0.1,"min_rate":8.821e-05,"max_rate":8.953646616541353e-05,"is_open":false,"open_timestamp":1516629600000.0,"close_timestamp":1516631400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.334880123743233,"open_date":"2018-01-22 15:55:00+00:00","close_date":"2018-01-22 16:40:00+00:00","open_rate":5.172e-05,"close_rate":5.1979248120300745e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6548e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6548e-05,"stop_loss_ratio":0.1,"min_rate":5.172e-05,"max_rate":5.1979248120300745e-05,"is_open":false,"open_timestamp":1516636500000.0,"close_timestamp":1516639200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":33.04692663582287,"open_date":"2018-01-22 16:05:00+00:00","close_date":"2018-01-22 16:25:00+00:00","open_rate":3.026e-05,"close_rate":3.101839598997494e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":2.7234e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7234e-05,"stop_loss_ratio":0.1,"min_rate":3.026e-05,"max_rate":3.101839598997494e-05,"is_open":false,"open_timestamp":1516637100000.0,"close_timestamp":1516638300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014156285390713478,"open_date":"2018-01-22 19:50:00+00:00","close_date":"2018-01-23 00:10:00+00:00","open_rate":0.07064,"close_rate":0.07099408521303258,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":260,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.063576,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.063576,"stop_loss_ratio":0.1,"min_rate":0.07064,"max_rate":0.07099408521303258,"is_open":false,"open_timestamp":1516650600000.0,"close_timestamp":1516666200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06080938507725528,"open_date":"2018-01-22 21:25:00+00:00","close_date":"2018-01-22 22:05:00+00:00","open_rate":0.01644483,"close_rate":0.01652726022556391,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.014800347,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014800347,"stop_loss_ratio":0.1,"min_rate":0.01644483,"max_rate":0.01652726022556391,"is_open":false,"open_timestamp":1516656300000.0,"close_timestamp":1516658700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":23.08935580697299,"open_date":"2018-01-23 00:05:00+00:00","close_date":"2018-01-23 00:35:00+00:00","open_rate":4.331e-05,"close_rate":4.3961278195488714e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":3.8979e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.8979e-05,"stop_loss_ratio":0.1,"min_rate":4.331e-05,"max_rate":4.3961278195488714e-05,"is_open":false,"open_timestamp":1516665900000.0,"close_timestamp":1516667700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":31.250000000000004,"open_date":"2018-01-23 01:50:00+00:00","close_date":"2018-01-23 02:15:00+00:00","open_rate":3.2e-05,"close_rate":3.2802005012531326e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":2.88e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.88e-05,"stop_loss_ratio":0.1,"min_rate":3.2e-05,"max_rate":3.2802005012531326e-05,"is_open":false,"open_timestamp":1516672200000.0,"close_timestamp":1516673700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010907854156754156,"open_date":"2018-01-23 04:25:00+00:00","close_date":"2018-01-23 05:15:00+00:00","open_rate":0.09167706,"close_rate":0.09213659413533835,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.08250935400000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08250935400000001,"stop_loss_ratio":0.1,"min_rate":0.09167706,"max_rate":0.09213659413533835,"is_open":false,"open_timestamp":1516681500000.0,"close_timestamp":1516684500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014440474918339117,"open_date":"2018-01-23 07:35:00+00:00","close_date":"2018-01-23 09:00:00+00:00","open_rate":0.0692498,"close_rate":0.06959691679197995,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":85,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.06232482,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06232482,"stop_loss_ratio":0.1,"min_rate":0.0692498,"max_rate":0.06959691679197995,"is_open":false,"open_timestamp":1516692900000.0,"close_timestamp":1516698000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":31.426775612822127,"open_date":"2018-01-23 10:50:00+00:00","close_date":"2018-01-23 13:05:00+00:00","open_rate":3.182e-05,"close_rate":3.197949874686716e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":135,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":2.8638e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.8638e-05,"stop_loss_ratio":0.1,"min_rate":3.182e-05,"max_rate":3.197949874686716e-05,"is_open":false,"open_timestamp":1516704600000.0,"close_timestamp":1516712700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024461839530332683,"open_date":"2018-01-23 11:05:00+00:00","close_date":"2018-01-23 16:05:00+00:00","open_rate":0.04088,"close_rate":0.04108491228070175,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":300,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.036792,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036792,"stop_loss_ratio":0.1,"min_rate":0.04088,"max_rate":0.04108491228070175,"is_open":false,"open_timestamp":1516705500000.0,"close_timestamp":1516723500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.417475728155345,"open_date":"2018-01-23 14:55:00+00:00","close_date":"2018-01-23 15:35:00+00:00","open_rate":5.15e-05,"close_rate":5.175814536340851e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.635e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.635e-05,"stop_loss_ratio":0.1,"min_rate":5.15e-05,"max_rate":5.175814536340851e-05,"is_open":false,"open_timestamp":1516719300000.0,"close_timestamp":1516721700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011023294646713328,"open_date":"2018-01-23 16:35:00+00:00","close_date":"2018-01-24 00:05:00+00:00","open_rate":0.09071698,"close_rate":0.09117170170426064,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":450,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.081645282,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.081645282,"stop_loss_ratio":0.1,"min_rate":0.09071698,"max_rate":0.09117170170426064,"is_open":false,"open_timestamp":1516725300000.0,"close_timestamp":1516752300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":31.969309462915604,"open_date":"2018-01-23 17:25:00+00:00","close_date":"2018-01-23 18:45:00+00:00","open_rate":3.128e-05,"close_rate":3.1436791979949865e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":2.8152e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.8152e-05,"stop_loss_ratio":0.1,"min_rate":3.128e-05,"max_rate":3.1436791979949865e-05,"is_open":false,"open_timestamp":1516728300000.0,"close_timestamp":1516733100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.465724751439035,"open_date":"2018-01-23 20:15:00+00:00","close_date":"2018-01-23 22:00:00+00:00","open_rate":9.555e-05,"close_rate":9.602894736842104e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":105,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":8.5995e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.5995e-05,"stop_loss_ratio":0.1,"min_rate":9.555e-05,"max_rate":9.602894736842104e-05,"is_open":false,"open_timestamp":1516738500000.0,"close_timestamp":1516744800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02450979791426522,"open_date":"2018-01-23 22:30:00+00:00","close_date":"2018-01-23 23:10:00+00:00","open_rate":0.04080001,"close_rate":0.0410045213283208,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.036720009,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036720009,"stop_loss_ratio":0.1,"min_rate":0.04080001,"max_rate":0.0410045213283208,"is_open":false,"open_timestamp":1516746600000.0,"close_timestamp":1516749000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.36858415649816,"open_date":"2018-01-23 23:50:00+00:00","close_date":"2018-01-24 03:35:00+00:00","open_rate":5.163e-05,"close_rate":5.18887969924812e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":225,"profit_ratio":-0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6467e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6467e-05,"stop_loss_ratio":0.1,"min_rate":5.163e-05,"max_rate":5.18887969924812e-05,"is_open":false,"open_timestamp":1516751400000.0,"close_timestamp":1516764900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024747691102289384,"open_date":"2018-01-24 00:20:00+00:00","close_date":"2018-01-24 01:50:00+00:00","open_rate":0.04040781,"close_rate":0.04061035541353383,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":90,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.036367029,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036367029,"stop_loss_ratio":0.1,"min_rate":0.04040781,"max_rate":0.04061035541353383,"is_open":false,"open_timestamp":1516753200000.0,"close_timestamp":1516758600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.485580670303975,"open_date":"2018-01-24 06:45:00+00:00","close_date":"2018-01-24 07:25:00+00:00","open_rate":5.132e-05,"close_rate":5.157724310776942e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6188000000000006e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6188000000000006e-05,"stop_loss_ratio":0.1,"min_rate":5.132e-05,"max_rate":5.157724310776942e-05,"is_open":false,"open_timestamp":1516776300000.0,"close_timestamp":1516778700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.23816852635629,"open_date":"2018-01-24 14:15:00+00:00","close_date":"2018-01-24 14:25:00+00:00","open_rate":5.198e-05,"close_rate":5.432496240601503e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":10,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":4.6782e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6782e-05,"stop_loss_ratio":0.1,"min_rate":5.198e-05,"max_rate":5.432496240601503e-05,"is_open":false,"open_timestamp":1516803300000.0,"close_timestamp":1516803900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":32.74394237066143,"open_date":"2018-01-24 14:50:00+00:00","close_date":"2018-01-24 16:35:00+00:00","open_rate":3.054e-05,"close_rate":3.069308270676692e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":105,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":2.7486000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7486000000000004e-05,"stop_loss_ratio":0.1,"min_rate":3.054e-05,"max_rate":3.069308270676692e-05,"is_open":false,"open_timestamp":1516805400000.0,"close_timestamp":1516811700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.795638562020944,"open_date":"2018-01-24 15:10:00+00:00","close_date":"2018-01-24 16:15:00+00:00","open_rate":9.263e-05,"close_rate":9.309431077694236e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":65,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":8.3367e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.3367e-05,"stop_loss_ratio":0.1,"min_rate":9.263e-05,"max_rate":9.309431077694236e-05,"is_open":false,"open_timestamp":1516806600000.0,"close_timestamp":1516810500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.13565469713457,"open_date":"2018-01-24 22:40:00+00:00","close_date":"2018-01-24 23:25:00+00:00","open_rate":5.514e-05,"close_rate":5.54163909774436e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.962599999999999e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.962599999999999e-05,"stop_loss_ratio":0.1,"min_rate":5.514e-05,"max_rate":5.54163909774436e-05,"is_open":false,"open_timestamp":1516833600000.0,"close_timestamp":1516836300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":20.3210729526519,"open_date":"2018-01-25 00:50:00+00:00","close_date":"2018-01-25 01:30:00+00:00","open_rate":4.921e-05,"close_rate":4.9456666666666664e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.4289e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4289e-05,"stop_loss_ratio":0.1,"min_rate":4.921e-05,"max_rate":4.9456666666666664e-05,"is_open":false,"open_timestamp":1516841400000.0,"close_timestamp":1516843800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.38461538461538464,"open_date":"2018-01-25 08:15:00+00:00","close_date":"2018-01-25 12:15:00+00:00","open_rate":0.0026,"close_rate":0.002613032581453634,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":240,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.00234,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.00234,"stop_loss_ratio":0.1,"min_rate":0.0026,"max_rate":0.002613032581453634,"is_open":false,"open_timestamp":1516868100000.0,"close_timestamp":1516882500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03571593119825878,"open_date":"2018-01-25 10:25:00+00:00","close_date":"2018-01-25 16:15:00+00:00","open_rate":0.02799871,"close_rate":0.028139054411027563,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":350,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.025198839,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025198839,"stop_loss_ratio":0.1,"min_rate":0.02799871,"max_rate":0.028139054411027563,"is_open":false,"open_timestamp":1516875900000.0,"close_timestamp":1516896900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024516401717913305,"open_date":"2018-01-25 11:00:00+00:00","close_date":"2018-01-25 11:45:00+00:00","open_rate":0.04078902,"close_rate":0.0409934762406015,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.036710118,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036710118,"stop_loss_ratio":0.1,"min_rate":0.04078902,"max_rate":0.0409934762406015,"is_open":false,"open_timestamp":1516878000000.0,"close_timestamp":1516880700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":34.602076124567475,"open_date":"2018-01-25 13:05:00+00:00","close_date":"2018-01-25 13:45:00+00:00","open_rate":2.89e-05,"close_rate":2.904486215538847e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":2.601e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.601e-05,"stop_loss_ratio":0.1,"min_rate":2.89e-05,"max_rate":2.904486215538847e-05,"is_open":false,"open_timestamp":1516885500000.0,"close_timestamp":1516887900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02432912439481303,"open_date":"2018-01-25 13:20:00+00:00","close_date":"2018-01-25 14:05:00+00:00","open_rate":0.041103,"close_rate":0.04130903007518797,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0369927,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0369927,"stop_loss_ratio":0.1,"min_rate":0.041103,"max_rate":0.04130903007518797,"is_open":false,"open_timestamp":1516886400000.0,"close_timestamp":1516889100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":18.42299189388357,"open_date":"2018-01-25 15:45:00+00:00","close_date":"2018-01-25 16:15:00+00:00","open_rate":5.428e-05,"close_rate":5.509624060150376e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":4.8852000000000006e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.8852000000000006e-05,"stop_loss_ratio":0.1,"min_rate":5.428e-05,"max_rate":5.509624060150376e-05,"is_open":false,"open_timestamp":1516895100000.0,"close_timestamp":1516896900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":18.47063169560399,"open_date":"2018-01-25 17:45:00+00:00","close_date":"2018-01-25 23:15:00+00:00","open_rate":5.414e-05,"close_rate":5.441137844611528e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":330,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.8726e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.8726e-05,"stop_loss_ratio":0.1,"min_rate":5.414e-05,"max_rate":5.441137844611528e-05,"is_open":false,"open_timestamp":1516902300000.0,"close_timestamp":1516922100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02415005686130888,"open_date":"2018-01-25 21:15:00+00:00","close_date":"2018-01-25 21:55:00+00:00","open_rate":0.04140777,"close_rate":0.0416153277443609,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.037266993000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.037266993000000005,"stop_loss_ratio":0.1,"min_rate":0.04140777,"max_rate":0.0416153277443609,"is_open":false,"open_timestamp":1516914900000.0,"close_timestamp":1516917300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3932224183965176,"open_date":"2018-01-26 02:05:00+00:00","close_date":"2018-01-26 02:45:00+00:00","open_rate":0.00254309,"close_rate":0.002555837318295739,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002288781,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002288781,"stop_loss_ratio":0.1,"min_rate":0.00254309,"max_rate":0.002555837318295739,"is_open":false,"open_timestamp":1516932300000.0,"close_timestamp":1516934700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.834849295523455,"open_date":"2018-01-26 02:55:00+00:00","close_date":"2018-01-26 15:10:00+00:00","open_rate":5.607e-05,"close_rate":5.6351052631578935e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":735,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.0463e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0463e-05,"stop_loss_ratio":0.1,"min_rate":5.607e-05,"max_rate":5.6351052631578935e-05,"is_open":false,"open_timestamp":1516935300000.0,"close_timestamp":1516979400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.39400171784748983,"open_date":"2018-01-26 06:10:00+00:00","close_date":"2018-01-26 09:25:00+00:00","open_rate":0.00253806,"close_rate":0.0025507821052631577,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":195,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002284254,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002284254,"stop_loss_ratio":0.1,"min_rate":0.00253806,"max_rate":0.0025507821052631577,"is_open":false,"open_timestamp":1516947000000.0,"close_timestamp":1516958700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024096385542168672,"open_date":"2018-01-26 07:25:00+00:00","close_date":"2018-01-26 09:55:00+00:00","open_rate":0.0415,"close_rate":0.04170802005012531,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":150,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.03735,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.03735,"stop_loss_ratio":0.1,"min_rate":0.0415,"max_rate":0.04170802005012531,"is_open":false,"open_timestamp":1516951500000.0,"close_timestamp":1516960500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":18.793459875963165,"open_date":"2018-01-26 09:55:00+00:00","close_date":"2018-01-26 10:25:00+00:00","open_rate":5.321e-05,"close_rate":5.401015037593984e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":4.7889e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7889e-05,"stop_loss_ratio":0.1,"min_rate":5.321e-05,"max_rate":5.401015037593984e-05,"is_open":false,"open_timestamp":1516960500000.0,"close_timestamp":1516962300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.036074437437185386,"open_date":"2018-01-26 16:05:00+00:00","close_date":"2018-01-26 16:45:00+00:00","open_rate":0.02772046,"close_rate":0.02785940967418546,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.024948414,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.024948414,"stop_loss_ratio":0.1,"min_rate":0.02772046,"max_rate":0.02785940967418546,"is_open":false,"open_timestamp":1516982700000.0,"close_timestamp":1516985100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010569326272036914,"open_date":"2018-01-26 23:35:00+00:00","close_date":"2018-01-27 00:15:00+00:00","open_rate":0.09461341,"close_rate":0.09508766268170424,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.085152069,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.085152069,"stop_loss_ratio":0.1,"min_rate":0.09461341,"max_rate":0.09508766268170424,"is_open":false,"open_timestamp":1517009700000.0,"close_timestamp":1517012100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":17.809439002671414,"open_date":"2018-01-27 00:35:00+00:00","close_date":"2018-01-27 01:30:00+00:00","open_rate":5.615e-05,"close_rate":5.643145363408521e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":5.0535e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0535e-05,"stop_loss_ratio":0.1,"min_rate":5.615e-05,"max_rate":5.643145363408521e-05,"is_open":false,"open_timestamp":1517013300000.0,"close_timestamp":1517016600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.998560115190784,"open_date":"2018-01-27 00:45:00+00:00","close_date":"2018-01-30 04:45:00+00:00","open_rate":5.556e-05,"close_rate":5.144e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":4560,"profit_ratio":-0.07877175,"profit_abs":-7.415406767458598e-05,"exit_reason":"force_exit","initial_stop_loss_abs":5.0004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0004e-05,"stop_loss_ratio":0.1,"min_rate":5.144e-05,"max_rate":5.556e-05,"is_open":false,"open_timestamp":1517013900000.0,"close_timestamp":1517287500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014492751522789634,"open_date":"2018-01-27 02:30:00+00:00","close_date":"2018-01-27 11:25:00+00:00","open_rate":0.06900001,"close_rate":0.06934587471177944,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":535,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.062100009000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.062100009000000005,"stop_loss_ratio":0.1,"min_rate":0.06900001,"max_rate":0.06934587471177944,"is_open":false,"open_timestamp":1517020200000.0,"close_timestamp":1517052300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010582027378879436,"open_date":"2018-01-27 06:25:00+00:00","close_date":"2018-01-27 07:05:00+00:00","open_rate":0.09449985,"close_rate":0.0949735334586466,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.085049865,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.085049865,"stop_loss_ratio":0.1,"min_rate":0.09449985,"max_rate":0.0949735334586466,"is_open":false,"open_timestamp":1517034300000.0,"close_timestamp":1517036700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02434885085598385,"open_date":"2018-01-27 09:40:00+00:00","close_date":"2018-01-30 04:40:00+00:00","open_rate":0.0410697,"close_rate":0.03928809,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":4020,"profit_ratio":-0.04815133,"profit_abs":-4.338015617352949e-05,"exit_reason":"force_exit","initial_stop_loss_abs":0.03696273,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.03696273,"stop_loss_ratio":0.1,"min_rate":0.03928809,"max_rate":0.0410697,"is_open":false,"open_timestamp":1517046000000.0,"close_timestamp":1517287200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03508771929824561,"open_date":"2018-01-27 11:45:00+00:00","close_date":"2018-01-27 12:30:00+00:00","open_rate":0.0285,"close_rate":0.02864285714285714,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.025650000000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025650000000000003,"stop_loss_ratio":0.1,"min_rate":0.0285,"max_rate":0.02864285714285714,"is_open":false,"open_timestamp":1517053500000.0,"close_timestamp":1517056200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.034887307020861215,"open_date":"2018-01-27 12:35:00+00:00","close_date":"2018-01-27 15:25:00+00:00","open_rate":0.02866372,"close_rate":0.02880739779448621,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":170,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.025797348,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025797348,"stop_loss_ratio":0.1,"min_rate":0.02866372,"max_rate":0.02880739779448621,"is_open":false,"open_timestamp":1517056500000.0,"close_timestamp":1517066700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010484268355332824,"open_date":"2018-01-27 15:50:00+00:00","close_date":"2018-01-27 16:50:00+00:00","open_rate":0.095381,"close_rate":0.09585910025062656,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0858429,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0858429,"stop_loss_ratio":0.1,"min_rate":0.095381,"max_rate":0.09585910025062656,"is_open":false,"open_timestamp":1517068200000.0,"close_timestamp":1517071800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014794886650455415,"open_date":"2018-01-27 17:05:00+00:00","close_date":"2018-01-27 17:45:00+00:00","open_rate":0.06759092,"close_rate":0.06792972160401002,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.060831828,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060831828,"stop_loss_ratio":0.1,"min_rate":0.06759092,"max_rate":0.06792972160401002,"is_open":false,"open_timestamp":1517072700000.0,"close_timestamp":1517075100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.38684569885609726,"open_date":"2018-01-27 23:40:00+00:00","close_date":"2018-01-28 01:05:00+00:00","open_rate":0.00258501,"close_rate":0.002597967443609022,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":85,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002326509,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002326509,"stop_loss_ratio":0.1,"min_rate":0.00258501,"max_rate":0.002597967443609022,"is_open":false,"open_timestamp":1517096400000.0,"close_timestamp":1517101500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014928710926711672,"open_date":"2018-01-28 02:25:00+00:00","close_date":"2018-01-28 08:10:00+00:00","open_rate":0.06698502,"close_rate":0.0673207845112782,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":345,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.060286518,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060286518,"stop_loss_ratio":0.1,"min_rate":0.06698502,"max_rate":0.0673207845112782,"is_open":false,"open_timestamp":1517106300000.0,"close_timestamp":1517127000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014767187899175548,"open_date":"2018-01-28 10:25:00+00:00","close_date":"2018-01-28 16:30:00+00:00","open_rate":0.0677177,"close_rate":0.06805713709273183,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":365,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.06094593000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06094593000000001,"stop_loss_ratio":0.1,"min_rate":0.0677177,"max_rate":0.06805713709273183,"is_open":false,"open_timestamp":1517135100000.0,"close_timestamp":1517157000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":19.175455417066157,"open_date":"2018-01-28 20:35:00+00:00","close_date":"2018-01-28 21:35:00+00:00","open_rate":5.215e-05,"close_rate":5.2411403508771925e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6935e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6935e-05,"stop_loss_ratio":0.1,"min_rate":5.215e-05,"max_rate":5.2411403508771925e-05,"is_open":false,"open_timestamp":1517171700000.0,"close_timestamp":1517175300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.36521808998243305,"open_date":"2018-01-28 22:00:00+00:00","close_date":"2018-01-28 22:30:00+00:00","open_rate":0.00273809,"close_rate":0.002779264285714285,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002464281,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002464281,"stop_loss_ratio":0.1,"min_rate":0.00273809,"max_rate":0.002779264285714285,"is_open":false,"open_timestamp":1517176800000.0,"close_timestamp":1517178600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3641236272539253,"open_date":"2018-01-29 00:00:00+00:00","close_date":"2018-01-29 00:30:00+00:00","open_rate":0.00274632,"close_rate":0.002787618045112782,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002471688,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002471688,"stop_loss_ratio":0.1,"min_rate":0.00274632,"max_rate":0.002787618045112782,"is_open":false,"open_timestamp":1517184000000.0,"close_timestamp":1517185800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.061634117689115045,"open_date":"2018-01-29 02:15:00+00:00","close_date":"2018-01-29 03:00:00+00:00","open_rate":0.01622478,"close_rate":0.016306107218045113,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.014602302,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014602302,"stop_loss_ratio":0.1,"min_rate":0.01622478,"max_rate":0.016306107218045113,"is_open":false,"open_timestamp":1517192100000.0,"close_timestamp":1517194800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014492753623188404,"open_date":"2018-01-29 03:05:00+00:00","close_date":"2018-01-29 03:45:00+00:00","open_rate":0.069,"close_rate":0.06934586466165413,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.06210000000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06210000000000001,"stop_loss_ratio":0.1,"min_rate":0.069,"max_rate":0.06934586466165413,"is_open":false,"open_timestamp":1517195100000.0,"close_timestamp":1517197500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":11.42204454597373,"open_date":"2018-01-29 05:20:00+00:00","close_date":"2018-01-29 06:55:00+00:00","open_rate":8.755e-05,"close_rate":8.798884711779448e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":95,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":7.879500000000001e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":7.879500000000001e-05,"stop_loss_ratio":0.1,"min_rate":8.755e-05,"max_rate":8.798884711779448e-05,"is_open":false,"open_timestamp":1517203200000.0,"close_timestamp":1517208900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014650376815016871,"open_date":"2018-01-29 07:00:00+00:00","close_date":"2018-01-29 19:25:00+00:00","open_rate":0.06825763,"close_rate":0.06859977350877192,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":745,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.061431867,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.061431867,"stop_loss_ratio":0.1,"min_rate":0.06825763,"max_rate":0.06859977350877192,"is_open":false,"open_timestamp":1517209200000.0,"close_timestamp":1517253900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014894490408841846,"open_date":"2018-01-29 19:45:00+00:00","close_date":"2018-01-29 20:25:00+00:00","open_rate":0.06713892,"close_rate":0.06747545593984962,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.060425028000000006,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060425028000000006,"stop_loss_ratio":0.1,"min_rate":0.06713892,"max_rate":0.06747545593984962,"is_open":false,"open_timestamp":1517255100000.0,"close_timestamp":1517257500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":11.193194537721066,"open_date":"2018-01-29 23:30:00+00:00","close_date":"2018-01-30 04:45:00+00:00","open_rate":8.934e-05,"close_rate":8.8e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":315,"profit_ratio":-0.0199116,"profit_abs":-1.4998880680546292e-05,"exit_reason":"force_exit","initial_stop_loss_abs":8.0406e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.0406e-05,"stop_loss_ratio":0.1,"min_rate":8.8e-05,"max_rate":8.934e-05,"is_open":false,"open_timestamp":1517268600000.0,"close_timestamp":1517287500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null}],"locks":[],"best_pair":{"key":"LTC/BTC","trades":8,"profit_mean":0.00748129625,"profit_mean_pct":0.748129625,"profit_sum":0.05985037,"profit_sum_pct":5.99,"profit_total_abs":0.00010025062656641558,"profit_total":0.010025062656641558,"profit_total_pct":1.0,"duration_avg":"1:59:00","wins":8,"draws":0,"losses":0},"worst_pair":{"key":"XMR/BTC","trades":16,"profit_mean":-0.0027899012500000007,"profit_mean_pct":-0.2789901250000001,"profit_sum":-0.04463842000000001,"profit_sum_pct":-4.46,"profit_total_abs":3.533834586465928e-05,"profit_total":0.003533834586465928,"profit_total_pct":0.35,"duration_avg":"8:41:00","wins":15,"draws":0,"losses":1},"results_per_pair":[{"key":"XLM/BTC","trades":21,"profit_mean":0.0026243899999999994,"profit_mean_pct":0.2624389999999999,"profit_sum":0.05511218999999999,"profit_sum_pct":5.51,"profit_total_abs":0.00016065162907268006,"profit_total":0.016065162907268005,"profit_total_pct":1.61,"duration_avg":"3:21:00","wins":20,"draws":0,"losses":1},{"key":"ETC/BTC","trades":20,"profit_mean":0.0022568569999999997,"profit_mean_pct":0.22568569999999996,"profit_sum":0.04513713999999999,"profit_sum_pct":4.51,"profit_total_abs":0.00014561403508771753,"profit_total":0.014561403508771753,"profit_total_pct":1.46,"duration_avg":"1:45:00","wins":19,"draws":0,"losses":1},{"key":"ETH/BTC","trades":21,"profit_mean":0.0009500057142857142,"profit_mean_pct":0.09500057142857142,"profit_sum":0.01995012,"profit_sum_pct":2.0,"profit_total_abs":0.00012531328320801774,"profit_total":0.012531328320801774,"profit_total_pct":1.25,"duration_avg":"2:17:00","wins":21,"draws":0,"losses":0},{"key":"ADA/BTC","trades":29,"profit_mean":-0.0011598141379310352,"profit_mean_pct":-0.11598141379310352,"profit_sum":-0.03363461000000002,"profit_sum_pct":-3.36,"profit_total_abs":0.00011156021803969656,"profit_total":0.011156021803969657,"profit_total_pct":1.12,"duration_avg":"5:35:00","wins":27,"draws":0,"losses":2},{"key":"TRX/BTC","trades":15,"profit_mean":0.0023467073333333323,"profit_mean_pct":0.23467073333333321,"profit_sum":0.035200609999999986,"profit_sum_pct":3.52,"profit_total_abs":0.00011056502909388873,"profit_total":0.011056502909388873,"profit_total_pct":1.11,"duration_avg":"2:28:00","wins":13,"draws":0,"losses":2},{"key":"DASH/BTC","trades":16,"profit_mean":0.0018703237499999997,"profit_mean_pct":0.18703237499999997,"profit_sum":0.029925179999999996,"profit_sum_pct":2.99,"profit_total_abs":0.0001102756892230564,"profit_total":0.01102756892230564,"profit_total_pct":1.1,"duration_avg":"3:03:00","wins":16,"draws":0,"losses":0},{"key":"LTC/BTC","trades":8,"profit_mean":0.00748129625,"profit_mean_pct":0.748129625,"profit_sum":0.05985037,"profit_sum_pct":5.99,"profit_total_abs":0.00010025062656641558,"profit_total":0.010025062656641558,"profit_total_pct":1.0,"duration_avg":"1:59:00","wins":8,"draws":0,"losses":0},{"key":"ZEC/BTC","trades":21,"profit_mean":-0.00039290904761904774,"profit_mean_pct":-0.03929090476190478,"profit_sum":-0.008251090000000003,"profit_sum_pct":-0.83,"profit_total_abs":9.697072101945111e-05,"profit_total":0.009697072101945111,"profit_total_pct":0.97,"duration_avg":"4:17:00","wins":20,"draws":0,"losses":1},{"key":"NXT/BTC","trades":12,"profit_mean":-0.0012261025000000006,"profit_mean_pct":-0.12261025000000006,"profit_sum":-0.014713230000000008,"profit_sum_pct":-1.47,"profit_total_abs":4.536340852130151e-05,"profit_total":0.004536340852130151,"profit_total_pct":0.45,"duration_avg":"0:57:00","wins":11,"draws":0,"losses":1},{"key":"XMR/BTC","trades":16,"profit_mean":-0.0027899012500000007,"profit_mean_pct":-0.2789901250000001,"profit_sum":-0.04463842000000001,"profit_sum_pct":-4.46,"profit_total_abs":3.533834586465928e-05,"profit_total":0.003533834586465928,"profit_total_pct":0.35,"duration_avg":"8:41:00","wins":15,"draws":0,"losses":1},{"key":"TOTAL","trades":179,"profit_mean":0.0008041243575418989,"profit_mean_pct":0.0804124357541899,"profit_sum":0.1439382599999999,"profit_sum_pct":14.39,"profit_total_abs":0.0010419029856968845,"profit_total":0.10419029856968845,"profit_total_pct":10.42,"duration_avg":"3:40:00","wins":170,"draws":0,"losses":9}],"results_per_enter_tag":[{"key":"buy_tag","trades":1,"profit_mean":0.03990025,"profit_mean_pct":3.9900249999999997,"profit_sum":0.03990025,"profit_sum_pct":3.99,"profit_total_abs":4.5112781954887056e-05,"profit_total":0.004511278195488706,"profit_total_pct":0.45,"duration_avg":"0:15:00","wins":1,"draws":0,"losses":0},{"key":"TOTAL","trades":179,"profit_mean":0.0008041243575418989,"profit_mean_pct":0.0804124357541899,"profit_sum":0.1439382599999999,"profit_sum_pct":14.39,"profit_total_abs":0.0010419029856968845,"profit_total":0.10419029856968845,"profit_total_pct":10.42,"duration_avg":"3:40:00","wins":170,"draws":0,"losses":9}],"exit_reason_summary":[{"exit_reason":"roi","trades":170,"wins":170,"draws":0,"losses":0,"profit_mean":0.005398268352941177,"profit_mean_pct":0.54,"profit_sum":0.91770562,"profit_sum_pct":91.77,"profit_total_abs":0.0017744360902255465,"profit_total":0.30590187333333335,"profit_total_pct":30.59},{"exit_reason":"stop_loss","trades":6,"wins":0,"draws":0,"losses":6,"profit_mean":-0.10448878000000002,"profit_mean_pct":-10.45,"profit_sum":-0.6269326800000001,"profit_sum_pct":-62.69,"profit_total_abs":-0.0006000000000000003,"profit_total":-0.20897756000000003,"profit_total_pct":-20.9},{"exit_reason":"force_exit","trades":3,"wins":0,"draws":0,"losses":3,"profit_mean":-0.04894489333333333,"profit_mean_pct":-4.89,"profit_sum":-0.14683468,"profit_sum_pct":-14.68,"profit_total_abs":-0.00013253310452866177,"profit_total":-0.04894489333333333,"profit_total_pct":-4.89}],"left_open_trades":[{"key":"TRX/BTC","trades":1,"profit_mean":-0.0199116,"profit_mean_pct":-1.9911600000000003,"profit_sum":-0.0199116,"profit_sum_pct":-1.99,"profit_total_abs":-1.4998880680546292e-05,"profit_total":-0.0014998880680546292,"profit_total_pct":-0.15,"duration_avg":"5:15:00","wins":0,"draws":0,"losses":1},{"key":"ZEC/BTC","trades":1,"profit_mean":-0.04815133,"profit_mean_pct":-4.815133,"profit_sum":-0.04815133,"profit_sum_pct":-4.82,"profit_total_abs":-4.338015617352949e-05,"profit_total":-0.004338015617352949,"profit_total_pct":-0.43,"duration_avg":"2 days, 19:00:00","wins":0,"draws":0,"losses":1},{"key":"ADA/BTC","trades":1,"profit_mean":-0.07877175,"profit_mean_pct":-7.877175,"profit_sum":-0.07877175,"profit_sum_pct":-7.88,"profit_total_abs":-7.415406767458598e-05,"profit_total":-0.007415406767458598,"profit_total_pct":-0.74,"duration_avg":"3 days, 4:00:00","wins":0,"draws":0,"losses":1},{"key":"TOTAL","trades":3,"profit_mean":-0.04894489333333333,"profit_mean_pct":-4.894489333333333,"profit_sum":-0.14683468,"profit_sum_pct":-14.68,"profit_total_abs":-0.00013253310452866177,"profit_total":-0.013253310452866176,"profit_total_pct":-1.33,"duration_avg":"2 days, 1:25:00","wins":0,"draws":0,"losses":3}],"total_trades":179,"trade_count_long":179,"trade_count_short":0,"total_volume":0.17900000000000005,"avg_stake_amount":0.0010000000000000002,"profit_mean":0.0008041243575418989,"profit_median":0.0,"profit_total":0.10419029856968845,"profit_total_long":0.10419029856968845,"profit_total_short":0.0,"profit_total_abs":0.0010419029856968845,"profit_total_long_abs":0.0010419029856968845,"profit_total_short_abs":0.0,"cagr":5.712688499973264,"profit_factor":2.4223288739520954,"backtest_start":"2018-01-10 07:15:00","backtest_start_ts":1515568500000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":"2020-10-01 18:00:00+00:00","backtest_run_end_ts":"2020-10-01 18:01:00+00:00","trades_per_day":9.42,"market_change":1.22,"pairlist":["TRX/BTC","ADA/BTC","XLM/BTC","ETH/BTC","XMR/BTC","ZEC/BTC","NXT/BTC","LTC/BTC","ETC/BTC","DASH/BTC"],"stake_amount":0.001,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":0.01,"dry_run_wallet":0.01,"final_balance":0.011041902985696884,"rejected_signals":0,"timedout_entry_orders":0,"timedout_exit_orders":0,"canceled_trade_entries":0,"canceled_entry_orders":0,"replaced_entry_orders":0,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timeframe_detail":"","timerange":"","enable_protections":false,"strategy_name":"StrategyTestV3","stoploss":0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":false,"ignore_roi_if_entry_signal":false,"backtest_best_day":0.17955111999999998,"backtest_worst_day":-0.14683468,"backtest_best_day_abs":0.000245614,"backtest_worst_day_abs":-0.0001325331,"winning_days":19,"draw_days":0,"losing_days":2,"daily_profit":[["2018-01-10",0.000245614],["2018-01-11",0.0001055138],["2018-01-12",4.51128e-05],["2018-01-13",3.00752e-05],["2018-01-14",3.50877e-05],["2018-01-15",6.51629e-05],["2018-01-16",5.11278e-05],["2018-01-17",7.01754e-05],["2018-01-18",8.5213e-05],["2018-01-19",3.00752e-05],["2018-01-20",2.50627e-05],["2018-01-21",4.01003e-05],["2018-01-22",7.01754e-05],["2018-01-23",8.5213e-05],["2018-01-24",8.02005e-05],["2018-01-25",-4.48622e-05],["2018-01-26",4.01003e-05],["2018-01-27",4.01003e-05],["2018-01-28",3.50877e-05],["2018-01-29",4.01003e-05],["2018-01-30",-0.0001325331]],"wins":48,"losses":9,"draws":122,"holding_avg":"3:40:00","holding_avg_s":13200.0,"winner_holding_avg":"0:24:00","winner_holding_avg_s":1440.0,"loser_holding_avg":"1 day, 5:57:00","loser_holding_avg_s":107820.0,"max_drawdown":0.21142322000000008,"max_drawdown_account":0.018740312808228732,"max_relative_drawdown":0.018740312808228732,"max_drawdown_abs":0.0002000000000000001,"drawdown_start":"2018-01-16 19:30:00","drawdown_start_ts":1516131000000.0,"drawdown_end":"2018-01-16 22:25:00","drawdown_end_ts":1516141500000.0,"max_drawdown_low":0.0004721804511278108,"max_drawdown_high":0.0006721804511278109,"csum_min":0.010045112781954888,"csum_max":0.011069172932330812}},"strategy_comparison":[{"key":"StrategyTestV3","trades":179,"profit_mean":0.0008041243575418989,"profit_mean_pct":0.0804124357541899,"profit_sum":0.1439382599999999,"profit_sum_pct":14.39,"profit_total_abs":0.0010419029856968845,"profit_total":0.10419029856968845,"profit_total_pct":10.42,"duration_avg":"3:40:00","wins":170,"draws":0,"losses":9,"max_drawdown_account":0.018740312808228732,"max_drawdown_abs":"0.0002"}]} From 882e68c68b8fc0ec2d501d58a01a404c1b7d9084 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 26 Dec 2022 15:30:39 +0100 Subject: [PATCH 411/421] Rename backtest-result from new to "not new". --- tests/commands/test_commands.py | 2 +- tests/data/test_btanalysis.py | 16 ++++++++-------- tests/optimize/test_optimize_reports.py | 8 ++++---- tests/rpc/test_rpc_apiserver.py | 2 +- tests/test_plotting.py | 12 ++++++------ .../testdata/backtest_results/.last_result.json | 2 +- ...test-result_new.json => backtest-result.json} | 0 ...t_new.meta.json => backtest-result.meta.json} | 0 8 files changed, 21 insertions(+), 21 deletions(-) rename tests/testdata/backtest_results/{backtest-result_new.json => backtest-result.json} (100%) rename tests/testdata/backtest_results/{backtest-result_new.meta.json => backtest-result.meta.json} (100%) diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index a1d73f7ef..d568f48f6 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -1529,7 +1529,7 @@ def test_backtesting_show(mocker, testdatadir, capsys): args = [ "backtesting-show", "--export-filename", - f"{testdatadir / 'backtest_results/backtest-result_new.json'}", + f"{testdatadir / 'backtest_results/backtest-result.json'}", "--show-pair-list" ] pargs = get_args(args) diff --git a/tests/data/test_btanalysis.py b/tests/data/test_btanalysis.py index 95de6b53e..1cc1aa0c9 100644 --- a/tests/data/test_btanalysis.py +++ b/tests/data/test_btanalysis.py @@ -30,10 +30,10 @@ def test_get_latest_backtest_filename(testdatadir, mocker): testdir_bt = testdatadir / "backtest_results" res = get_latest_backtest_filename(testdir_bt) - assert res == 'backtest-result_new.json' + assert res == 'backtest-result.json' res = get_latest_backtest_filename(str(testdir_bt)) - assert res == 'backtest-result_new.json' + assert res == 'backtest-result.json' mocker.patch("freqtrade.data.btanalysis.json_load", return_value={}) @@ -81,7 +81,7 @@ def test_load_backtest_data_old_format(testdatadir, mocker): def test_load_backtest_data_new_format(testdatadir): - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) assert isinstance(bt_data, DataFrame) assert set(bt_data.columns) == set(BT_DATA_COLUMNS) @@ -182,7 +182,7 @@ def test_extract_trades_of_period(testdatadir): def test_analyze_trade_parallelism(testdatadir): - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) res = analyze_trade_parallelism(bt_data, "5m") @@ -256,7 +256,7 @@ def test_combine_dataframes_with_mean_no_data(testdatadir): def test_create_cum_profit(testdatadir): - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) timerange = TimeRange.parse_timerange("20180110-20180112") @@ -272,7 +272,7 @@ def test_create_cum_profit(testdatadir): def test_create_cum_profit1(testdatadir): - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) # Move close-time to "off" the candle, to make sure the logic still works bt_data['close_date'] = bt_data.loc[:, 'close_date'] + DateOffset(seconds=20) @@ -294,7 +294,7 @@ def test_create_cum_profit1(testdatadir): def test_calculate_max_drawdown(testdatadir): - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) _, hdate, lowdate, hval, lval, drawdown = calculate_max_drawdown( bt_data, value_col="profit_abs") @@ -318,7 +318,7 @@ def test_calculate_max_drawdown(testdatadir): def test_calculate_csum(testdatadir): - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) csum_min, csum_max = calculate_csum(bt_data) diff --git a/tests/optimize/test_optimize_reports.py b/tests/optimize/test_optimize_reports.py index 403075795..549202284 100644 --- a/tests/optimize/test_optimize_reports.py +++ b/tests/optimize/test_optimize_reports.py @@ -308,7 +308,7 @@ def test_generate_pair_metrics(): def test_generate_daily_stats(testdatadir): - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) res = generate_daily_stats(bt_data) assert isinstance(res, dict) @@ -328,7 +328,7 @@ def test_generate_daily_stats(testdatadir): def test_generate_trading_stats(testdatadir): - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) res = generate_trading_stats(bt_data) assert isinstance(res, dict) @@ -444,7 +444,7 @@ def test_generate_edge_table(): def test_generate_periodic_breakdown_stats(testdatadir): - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename).to_dict(orient='records') res = generate_periodic_breakdown_stats(bt_data, 'day') @@ -472,7 +472,7 @@ def test__get_resample_from_period(): def test_show_sorted_pairlist(testdatadir, default_conf, capsys): - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_stats(filename) default_conf['backtest_show_pair_list'] = True diff --git a/tests/rpc/test_rpc_apiserver.py b/tests/rpc/test_rpc_apiserver.py index aea8ea059..2a2a38196 100644 --- a/tests/rpc/test_rpc_apiserver.py +++ b/tests/rpc/test_rpc_apiserver.py @@ -1709,7 +1709,7 @@ def test_api_backtest_history(botclient, mocker, testdatadir): mocker.patch('freqtrade.data.btanalysis._get_backtest_files', return_value=[ testdatadir / 'backtest_results/backtest-result_multistrat.json', - testdatadir / 'backtest_results/backtest-result_new.json' + testdatadir / 'backtest_results/backtest-result.json' ]) rc = client_get(client, f"{BASE_URI}/backtest/history") diff --git a/tests/test_plotting.py b/tests/test_plotting.py index 64089c4c6..7662ea7f1 100644 --- a/tests/test_plotting.py +++ b/tests/test_plotting.py @@ -46,7 +46,7 @@ def test_init_plotscript(default_conf, mocker, testdatadir): default_conf['trade_source'] = "file" default_conf['timeframe'] = "5m" default_conf["datadir"] = testdatadir - default_conf['exportfilename'] = testdatadir / "backtest-result_new.json" + default_conf['exportfilename'] = testdatadir / "backtest-result.json" supported_markets = ["TRX/BTC", "ADA/BTC"] ret = init_plotscript(default_conf, supported_markets) assert "ohlcv" in ret @@ -158,7 +158,7 @@ def test_plot_trades(testdatadir, caplog): assert fig == fig1 assert log_has("No trades found.", caplog) pair = "ADA/BTC" - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" trades = load_backtest_data(filename) trades = trades.loc[trades['pair'] == pair] @@ -299,7 +299,7 @@ def test_generate_plot_file(mocker, caplog): def test_add_profit(testdatadir): - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) timerange = TimeRange.parse_timerange("20180110-20180112") @@ -319,7 +319,7 @@ def test_add_profit(testdatadir): def test_generate_profit_graph(testdatadir): - filename = testdatadir / "backtest_results/backtest-result_new.json" + filename = testdatadir / "backtest_results/backtest-result.json" trades = load_backtest_data(filename) timerange = TimeRange.parse_timerange("20180110-20180112") pairs = ["TRX/BTC", "XLM/BTC"] @@ -395,7 +395,7 @@ def test_load_and_plot_trades(default_conf, mocker, caplog, testdatadir): default_conf['trade_source'] = 'file' default_conf["datadir"] = testdatadir - default_conf['exportfilename'] = testdatadir / "backtest-result_new.json" + default_conf['exportfilename'] = testdatadir / "backtest-result.json" default_conf['indicators1'] = ["sma5", "ema10"] default_conf['indicators2'] = ["macd"] default_conf['pairs'] = ["ETH/BTC", "LTC/BTC"] @@ -466,7 +466,7 @@ def test_plot_profit(default_conf, mocker, testdatadir): match=r"No trades found, cannot generate Profit-plot.*"): plot_profit(default_conf) - default_conf['exportfilename'] = testdatadir / "backtest_results/backtest-result_new.json" + default_conf['exportfilename'] = testdatadir / "backtest_results/backtest-result.json" plot_profit(default_conf) diff --git a/tests/testdata/backtest_results/.last_result.json b/tests/testdata/backtest_results/.last_result.json index 98448e10f..7ebab4613 100644 --- a/tests/testdata/backtest_results/.last_result.json +++ b/tests/testdata/backtest_results/.last_result.json @@ -1 +1 @@ -{"latest_backtest":"backtest-result_new.json"} +{"latest_backtest":"backtest-result.json"} diff --git a/tests/testdata/backtest_results/backtest-result_new.json b/tests/testdata/backtest_results/backtest-result.json similarity index 100% rename from tests/testdata/backtest_results/backtest-result_new.json rename to tests/testdata/backtest_results/backtest-result.json diff --git a/tests/testdata/backtest_results/backtest-result_new.meta.json b/tests/testdata/backtest_results/backtest-result.meta.json similarity index 100% rename from tests/testdata/backtest_results/backtest-result_new.meta.json rename to tests/testdata/backtest_results/backtest-result.meta.json From 20901c833adaa272ce8d9802521188daac13acdd Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Tue, 27 Dec 2022 10:08:09 +0100 Subject: [PATCH 412/421] Improve `purge_old_models` explanation --- docs/freqai-parameter-table.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index d05ce80f3..72ee1e6b3 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -15,7 +15,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `identifier` | **Required.**
A unique ID for the current model. If models are saved to disk, the `identifier` allows for reloading specific pre-trained models/data.
**Datatype:** String. | `live_retrain_hours` | Frequency of retraining during dry/live runs.
**Datatype:** Float > 0.
Default: `0` (models retrain as often as possible). | `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old.
**Datatype:** Positive integer.
Default: `0` (models never expire). -| `purge_old_models` | Delete obsolete models.
**Datatype:** Boolean.
Default: `False` (all historic models remain on disk). +| `purge_old_models` | Delete all unused models during live runs (not relevant to backtesting). If set to false (not default), dry/live runs will accumulate all unused models to disk. If
**Datatype:** Boolean.
Default: `True`. | `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`.
**Datatype:** Boolean.
Default: `False` (no models are saved). | `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)).
**Datatype:** Positive integer. | `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models.
**Datatype:** Boolean.
Default: `False`. From 6f2c3e2528bccafb1cb61f13f55a6af3b8767b30 Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 27 Dec 2022 13:41:51 +0100 Subject: [PATCH 413/421] Split migration and persistence tests --- tests/persistence/test_migrations.py | 411 ++++++++++++++++++++++++++ tests/persistence/test_persistence.py | 403 +------------------------ 2 files changed, 413 insertions(+), 401 deletions(-) create mode 100644 tests/persistence/test_migrations.py diff --git a/tests/persistence/test_migrations.py b/tests/persistence/test_migrations.py new file mode 100644 index 000000000..1cd236005 --- /dev/null +++ b/tests/persistence/test_migrations.py @@ -0,0 +1,411 @@ +# pragma pylint: disable=missing-docstring, C0103 +import logging +from pathlib import Path +from unittest.mock import MagicMock + +import pytest +from sqlalchemy import create_engine, text + +from freqtrade.constants import DEFAULT_DB_PROD_URL +from freqtrade.enums import TradingMode +from freqtrade.exceptions import OperationalException +from freqtrade.persistence import Trade, init_db +from freqtrade.persistence.migrations import get_last_sequence_ids, set_sequence_ids +from freqtrade.persistence.models import PairLock +from tests.conftest import log_has + + +spot, margin, futures = TradingMode.SPOT, TradingMode.MARGIN, TradingMode.FUTURES + + +def test_init_create_session(default_conf): + # Check if init create a session + init_db(default_conf['db_url']) + assert hasattr(Trade, '_session') + assert 'scoped_session' in type(Trade._session).__name__ + + +def test_init_custom_db_url(default_conf, tmpdir): + # Update path to a value other than default, but still in-memory + filename = f"{tmpdir}/freqtrade2_test.sqlite" + assert not Path(filename).is_file() + + default_conf.update({'db_url': f'sqlite:///{filename}'}) + + init_db(default_conf['db_url']) + assert Path(filename).is_file() + r = Trade._session.execute(text("PRAGMA journal_mode")) + assert r.first() == ('wal',) + + +def test_init_invalid_db_url(): + # Update path to a value other than default, but still in-memory + with pytest.raises(OperationalException, match=r'.*no valid database URL*'): + init_db('unknown:///some.url') + + with pytest.raises(OperationalException, match=r'Bad db-url.*For in-memory database, pl.*'): + init_db('sqlite:///') + + +def test_init_prod_db(default_conf, mocker): + default_conf.update({'dry_run': False}) + default_conf.update({'db_url': DEFAULT_DB_PROD_URL}) + + create_engine_mock = mocker.patch('freqtrade.persistence.models.create_engine', MagicMock()) + + init_db(default_conf['db_url']) + assert create_engine_mock.call_count == 1 + assert create_engine_mock.mock_calls[0][1][0] == 'sqlite:///tradesv3.sqlite' + + +def test_init_dryrun_db(default_conf, tmpdir): + filename = f"{tmpdir}/freqtrade2_prod.sqlite" + assert not Path(filename).is_file() + default_conf.update({ + 'dry_run': True, + 'db_url': f'sqlite:///{filename}' + }) + + init_db(default_conf['db_url']) + assert Path(filename).is_file() + + +def test_migrate_new(mocker, default_conf, fee, caplog): + """ + Test Database migration (starting with new pairformat) + """ + caplog.set_level(logging.DEBUG) + amount = 103.223 + # Always create all columns apart from the last! + create_table_old = """CREATE TABLE IF NOT EXISTS "trades" ( + id INTEGER NOT NULL, + exchange VARCHAR NOT NULL, + pair VARCHAR NOT NULL, + is_open BOOLEAN NOT NULL, + fee FLOAT NOT NULL, + open_rate FLOAT, + close_rate FLOAT, + close_profit FLOAT, + stake_amount FLOAT NOT NULL, + amount FLOAT, + open_date DATETIME NOT NULL, + close_date DATETIME, + open_order_id VARCHAR, + stop_loss FLOAT, + initial_stop_loss FLOAT, + max_rate FLOAT, + sell_reason VARCHAR, + strategy VARCHAR, + ticker_interval INTEGER, + stoploss_order_id VARCHAR, + PRIMARY KEY (id), + CHECK (is_open IN (0, 1)) + );""" + create_table_order = """CREATE TABLE orders ( + id INTEGER NOT NULL, + ft_trade_id INTEGER, + ft_order_side VARCHAR(25) NOT NULL, + ft_pair VARCHAR(25) NOT NULL, + ft_is_open BOOLEAN NOT NULL, + order_id VARCHAR(255) NOT NULL, + status VARCHAR(255), + symbol VARCHAR(25), + order_type VARCHAR(50), + side VARCHAR(25), + price FLOAT, + amount FLOAT, + filled FLOAT, + remaining FLOAT, + cost FLOAT, + order_date DATETIME, + order_filled_date DATETIME, + order_update_date DATETIME, + PRIMARY KEY (id) + );""" + insert_table_old = """INSERT INTO trades (exchange, pair, is_open, fee, + open_rate, stake_amount, amount, open_date, + stop_loss, initial_stop_loss, max_rate, ticker_interval, + open_order_id, stoploss_order_id) + VALUES ('binance', 'ETC/BTC', 1, {fee}, + 0.00258580, {stake}, {amount}, + '2019-11-28 12:44:24.000000', + 0.0, 0.0, 0.0, '5m', + 'buy_order', 'dry_stop_order_id222') + """.format(fee=fee.return_value, + stake=default_conf.get("stake_amount"), + amount=amount + ) + insert_orders = f""" + insert into orders ( + ft_trade_id, + ft_order_side, + ft_pair, + ft_is_open, + order_id, + status, + symbol, + order_type, + side, + price, + amount, + filled, + remaining, + cost) + values ( + 1, + 'buy', + 'ETC/BTC', + 0, + 'dry_buy_order', + 'closed', + 'ETC/BTC', + 'limit', + 'buy', + 0.00258580, + {amount}, + {amount}, + 0, + {amount * 0.00258580} + ), + ( + 1, + 'buy', + 'ETC/BTC', + 1, + 'dry_buy_order22', + 'canceled', + 'ETC/BTC', + 'limit', + 'buy', + 0.00258580, + {amount}, + {amount}, + 0, + {amount * 0.00258580} + ), + ( + 1, + 'stoploss', + 'ETC/BTC', + 1, + 'dry_stop_order_id11X', + 'canceled', + 'ETC/BTC', + 'limit', + 'sell', + 0.00258580, + {amount}, + {amount}, + 0, + {amount * 0.00258580} + ), + ( + 1, + 'stoploss', + 'ETC/BTC', + 1, + 'dry_stop_order_id222', + 'open', + 'ETC/BTC', + 'limit', + 'sell', + 0.00258580, + {amount}, + {amount}, + 0, + {amount * 0.00258580} + ) + """ + engine = create_engine('sqlite://') + mocker.patch('freqtrade.persistence.models.create_engine', lambda *args, **kwargs: engine) + + # Create table using the old format + with engine.begin() as connection: + connection.execute(text(create_table_old)) + connection.execute(text(create_table_order)) + connection.execute(text("create index ix_trades_is_open on trades(is_open)")) + connection.execute(text("create index ix_trades_pair on trades(pair)")) + connection.execute(text(insert_table_old)) + connection.execute(text(insert_orders)) + + # fake previous backup + connection.execute(text("create table trades_bak as select * from trades")) + + connection.execute(text("create table trades_bak1 as select * from trades")) + # Run init to test migration + init_db(default_conf['db_url']) + + assert len(Trade.query.filter(Trade.id == 1).all()) == 1 + trade = Trade.query.filter(Trade.id == 1).first() + assert trade.fee_open == fee.return_value + assert trade.fee_close == fee.return_value + assert trade.open_rate_requested is None + assert trade.close_rate_requested is None + assert trade.is_open == 1 + assert trade.amount == amount + assert trade.amount_requested == amount + assert trade.stake_amount == default_conf.get("stake_amount") + assert trade.pair == "ETC/BTC" + assert trade.exchange == "binance" + assert trade.max_rate == 0.0 + assert trade.min_rate is None + assert trade.stop_loss == 0.0 + assert trade.initial_stop_loss == 0.0 + assert trade.exit_reason is None + assert trade.strategy is None + assert trade.timeframe == '5m' + assert trade.stoploss_order_id == 'dry_stop_order_id222' + assert trade.stoploss_last_update is None + assert log_has("trying trades_bak1", caplog) + assert log_has("trying trades_bak2", caplog) + assert log_has("Running database migration for trades - backup: trades_bak2, orders_bak0", + caplog) + assert log_has("Database migration finished.", caplog) + assert pytest.approx(trade.open_trade_value) == trade._calc_open_trade_value( + trade.amount, trade.open_rate) + assert trade.close_profit_abs is None + + orders = trade.orders + assert len(orders) == 4 + assert orders[0].order_id == 'dry_buy_order' + assert orders[0].ft_order_side == 'buy' + + assert orders[-1].order_id == 'dry_stop_order_id222' + assert orders[-1].ft_order_side == 'stoploss' + assert orders[-1].ft_is_open is True + + assert orders[1].order_id == 'dry_buy_order22' + assert orders[1].ft_order_side == 'buy' + assert orders[1].ft_is_open is False + + assert orders[2].order_id == 'dry_stop_order_id11X' + assert orders[2].ft_order_side == 'stoploss' + assert orders[2].ft_is_open is False + + +def test_migrate_too_old(mocker, default_conf, fee, caplog): + """ + Test Database migration (starting with new pairformat) + """ + caplog.set_level(logging.DEBUG) + amount = 103.223 + create_table_old = """CREATE TABLE IF NOT EXISTS "trades" ( + id INTEGER NOT NULL, + exchange VARCHAR NOT NULL, + pair VARCHAR NOT NULL, + is_open BOOLEAN NOT NULL, + fee_open FLOAT NOT NULL, + fee_close FLOAT NOT NULL, + open_rate FLOAT, + close_rate FLOAT, + close_profit FLOAT, + stake_amount FLOAT NOT NULL, + amount FLOAT, + open_date DATETIME NOT NULL, + close_date DATETIME, + open_order_id VARCHAR, + PRIMARY KEY (id), + CHECK (is_open IN (0, 1)) + );""" + + insert_table_old = """INSERT INTO trades (exchange, pair, is_open, fee_open, fee_close, + open_rate, stake_amount, amount, open_date) + VALUES ('binance', 'ETC/BTC', 1, {fee}, {fee}, + 0.00258580, {stake}, {amount}, + '2019-11-28 12:44:24.000000') + """.format(fee=fee.return_value, + stake=default_conf.get("stake_amount"), + amount=amount + ) + engine = create_engine('sqlite://') + mocker.patch('freqtrade.persistence.models.create_engine', lambda *args, **kwargs: engine) + + # Create table using the old format + with engine.begin() as connection: + connection.execute(text(create_table_old)) + connection.execute(text(insert_table_old)) + + # Run init to test migration + with pytest.raises(OperationalException, match=r'Your database seems to be very old'): + init_db(default_conf['db_url']) + + +def test_migrate_get_last_sequence_ids(): + engine = MagicMock() + engine.begin = MagicMock() + engine.name = 'postgresql' + get_last_sequence_ids(engine, 'trades_bak', 'orders_bak') + + assert engine.begin.call_count == 2 + engine.reset_mock() + engine.begin.reset_mock() + + engine.name = 'somethingelse' + get_last_sequence_ids(engine, 'trades_bak', 'orders_bak') + + assert engine.begin.call_count == 0 + + +def test_migrate_set_sequence_ids(): + engine = MagicMock() + engine.begin = MagicMock() + engine.name = 'postgresql' + set_sequence_ids(engine, 22, 55, 5) + + assert engine.begin.call_count == 1 + engine.reset_mock() + engine.begin.reset_mock() + + engine.name = 'somethingelse' + set_sequence_ids(engine, 22, 55, 6) + + assert engine.begin.call_count == 0 + + +def test_migrate_pairlocks(mocker, default_conf, fee, caplog): + """ + Test Database migration (starting with new pairformat) + """ + caplog.set_level(logging.DEBUG) + # Always create all columns apart from the last! + create_table_old = """CREATE TABLE pairlocks ( + id INTEGER NOT NULL, + pair VARCHAR(25) NOT NULL, + reason VARCHAR(255), + lock_time DATETIME NOT NULL, + lock_end_time DATETIME NOT NULL, + active BOOLEAN NOT NULL, + PRIMARY KEY (id) + ) + """ + create_index1 = "CREATE INDEX ix_pairlocks_pair ON pairlocks (pair)" + create_index2 = "CREATE INDEX ix_pairlocks_lock_end_time ON pairlocks (lock_end_time)" + create_index3 = "CREATE INDEX ix_pairlocks_active ON pairlocks (active)" + insert_table_old = """INSERT INTO pairlocks ( + id, pair, reason, lock_time, lock_end_time, active) + VALUES (1, 'ETH/BTC', 'Auto lock', '2021-07-12 18:41:03', '2021-07-11 18:45:00', 1) + """ + insert_table_old2 = """INSERT INTO pairlocks ( + id, pair, reason, lock_time, lock_end_time, active) + VALUES (2, '*', 'Lock all', '2021-07-12 18:41:03', '2021-07-12 19:00:00', 1) + """ + engine = create_engine('sqlite://') + mocker.patch('freqtrade.persistence.models.create_engine', lambda *args, **kwargs: engine) + # Create table using the old format + with engine.begin() as connection: + connection.execute(text(create_table_old)) + + connection.execute(text(insert_table_old)) + connection.execute(text(insert_table_old2)) + connection.execute(text(create_index1)) + connection.execute(text(create_index2)) + connection.execute(text(create_index3)) + + init_db(default_conf['db_url']) + + assert len(PairLock.query.all()) == 2 + assert len(PairLock.query.filter(PairLock.pair == '*').all()) == 1 + pairlocks = PairLock.query.filter(PairLock.pair == 'ETH/BTC').all() + assert len(pairlocks) == 1 + pairlocks[0].pair == 'ETH/BTC' + pairlocks[0].side == '*' diff --git a/tests/persistence/test_persistence.py b/tests/persistence/test_persistence.py index fbb639d50..984f85c0d 100644 --- a/tests/persistence/test_persistence.py +++ b/tests/persistence/test_persistence.py @@ -1,78 +1,20 @@ # pragma pylint: disable=missing-docstring, C0103 -import logging from datetime import datetime, timedelta, timezone -from pathlib import Path from types import FunctionType -from unittest.mock import MagicMock import arrow import pytest -from sqlalchemy import create_engine, text -from freqtrade.constants import DATETIME_PRINT_FORMAT, DEFAULT_DB_PROD_URL +from freqtrade.constants import DATETIME_PRINT_FORMAT from freqtrade.enums import TradingMode -from freqtrade.exceptions import DependencyException, OperationalException +from freqtrade.exceptions import DependencyException from freqtrade.persistence import LocalTrade, Order, Trade, init_db -from freqtrade.persistence.migrations import get_last_sequence_ids, set_sequence_ids -from freqtrade.persistence.models import PairLock from tests.conftest import create_mock_trades, create_mock_trades_with_leverage, log_has, log_has_re spot, margin, futures = TradingMode.SPOT, TradingMode.MARGIN, TradingMode.FUTURES -def test_init_create_session(default_conf): - # Check if init create a session - init_db(default_conf['db_url']) - assert hasattr(Trade, '_session') - assert 'scoped_session' in type(Trade._session).__name__ - - -def test_init_custom_db_url(default_conf, tmpdir): - # Update path to a value other than default, but still in-memory - filename = f"{tmpdir}/freqtrade2_test.sqlite" - assert not Path(filename).is_file() - - default_conf.update({'db_url': f'sqlite:///{filename}'}) - - init_db(default_conf['db_url']) - assert Path(filename).is_file() - r = Trade._session.execute(text("PRAGMA journal_mode")) - assert r.first() == ('wal',) - - -def test_init_invalid_db_url(): - # Update path to a value other than default, but still in-memory - with pytest.raises(OperationalException, match=r'.*no valid database URL*'): - init_db('unknown:///some.url') - - with pytest.raises(OperationalException, match=r'Bad db-url.*For in-memory database, pl.*'): - init_db('sqlite:///') - - -def test_init_prod_db(default_conf, mocker): - default_conf.update({'dry_run': False}) - default_conf.update({'db_url': DEFAULT_DB_PROD_URL}) - - create_engine_mock = mocker.patch('freqtrade.persistence.models.create_engine', MagicMock()) - - init_db(default_conf['db_url']) - assert create_engine_mock.call_count == 1 - assert create_engine_mock.mock_calls[0][1][0] == 'sqlite:///tradesv3.sqlite' - - -def test_init_dryrun_db(default_conf, tmpdir): - filename = f"{tmpdir}/freqtrade2_prod.sqlite" - assert not Path(filename).is_file() - default_conf.update({ - 'dry_run': True, - 'db_url': f'sqlite:///{filename}' - }) - - init_db(default_conf['db_url']) - assert Path(filename).is_file() - - @pytest.mark.parametrize('is_short', [False, True]) @pytest.mark.usefixtures("init_persistence") def test_enter_exit_side(fee, is_short): @@ -1204,347 +1146,6 @@ def test_calc_profit( trade.open_rate)) == round(profit_ratio, 8) -def test_migrate_new(mocker, default_conf, fee, caplog): - """ - Test Database migration (starting with new pairformat) - """ - caplog.set_level(logging.DEBUG) - amount = 103.223 - # Always create all columns apart from the last! - create_table_old = """CREATE TABLE IF NOT EXISTS "trades" ( - id INTEGER NOT NULL, - exchange VARCHAR NOT NULL, - pair VARCHAR NOT NULL, - is_open BOOLEAN NOT NULL, - fee FLOAT NOT NULL, - open_rate FLOAT, - close_rate FLOAT, - close_profit FLOAT, - stake_amount FLOAT NOT NULL, - amount FLOAT, - open_date DATETIME NOT NULL, - close_date DATETIME, - open_order_id VARCHAR, - stop_loss FLOAT, - initial_stop_loss FLOAT, - max_rate FLOAT, - sell_reason VARCHAR, - strategy VARCHAR, - ticker_interval INTEGER, - stoploss_order_id VARCHAR, - PRIMARY KEY (id), - CHECK (is_open IN (0, 1)) - );""" - create_table_order = """CREATE TABLE orders ( - id INTEGER NOT NULL, - ft_trade_id INTEGER, - ft_order_side VARCHAR(25) NOT NULL, - ft_pair VARCHAR(25) NOT NULL, - ft_is_open BOOLEAN NOT NULL, - order_id VARCHAR(255) NOT NULL, - status VARCHAR(255), - symbol VARCHAR(25), - order_type VARCHAR(50), - side VARCHAR(25), - price FLOAT, - amount FLOAT, - filled FLOAT, - remaining FLOAT, - cost FLOAT, - order_date DATETIME, - order_filled_date DATETIME, - order_update_date DATETIME, - PRIMARY KEY (id) - );""" - insert_table_old = """INSERT INTO trades (exchange, pair, is_open, fee, - open_rate, stake_amount, amount, open_date, - stop_loss, initial_stop_loss, max_rate, ticker_interval, - open_order_id, stoploss_order_id) - VALUES ('binance', 'ETC/BTC', 1, {fee}, - 0.00258580, {stake}, {amount}, - '2019-11-28 12:44:24.000000', - 0.0, 0.0, 0.0, '5m', - 'buy_order', 'dry_stop_order_id222') - """.format(fee=fee.return_value, - stake=default_conf.get("stake_amount"), - amount=amount - ) - insert_orders = f""" - insert into orders ( - ft_trade_id, - ft_order_side, - ft_pair, - ft_is_open, - order_id, - status, - symbol, - order_type, - side, - price, - amount, - filled, - remaining, - cost) - values ( - 1, - 'buy', - 'ETC/BTC', - 0, - 'dry_buy_order', - 'closed', - 'ETC/BTC', - 'limit', - 'buy', - 0.00258580, - {amount}, - {amount}, - 0, - {amount * 0.00258580} - ), - ( - 1, - 'buy', - 'ETC/BTC', - 1, - 'dry_buy_order22', - 'canceled', - 'ETC/BTC', - 'limit', - 'buy', - 0.00258580, - {amount}, - {amount}, - 0, - {amount * 0.00258580} - ), - ( - 1, - 'stoploss', - 'ETC/BTC', - 1, - 'dry_stop_order_id11X', - 'canceled', - 'ETC/BTC', - 'limit', - 'sell', - 0.00258580, - {amount}, - {amount}, - 0, - {amount * 0.00258580} - ), - ( - 1, - 'stoploss', - 'ETC/BTC', - 1, - 'dry_stop_order_id222', - 'open', - 'ETC/BTC', - 'limit', - 'sell', - 0.00258580, - {amount}, - {amount}, - 0, - {amount * 0.00258580} - ) - """ - engine = create_engine('sqlite://') - mocker.patch('freqtrade.persistence.models.create_engine', lambda *args, **kwargs: engine) - - # Create table using the old format - with engine.begin() as connection: - connection.execute(text(create_table_old)) - connection.execute(text(create_table_order)) - connection.execute(text("create index ix_trades_is_open on trades(is_open)")) - connection.execute(text("create index ix_trades_pair on trades(pair)")) - connection.execute(text(insert_table_old)) - connection.execute(text(insert_orders)) - - # fake previous backup - connection.execute(text("create table trades_bak as select * from trades")) - - connection.execute(text("create table trades_bak1 as select * from trades")) - # Run init to test migration - init_db(default_conf['db_url']) - - assert len(Trade.query.filter(Trade.id == 1).all()) == 1 - trade = Trade.query.filter(Trade.id == 1).first() - assert trade.fee_open == fee.return_value - assert trade.fee_close == fee.return_value - assert trade.open_rate_requested is None - assert trade.close_rate_requested is None - assert trade.is_open == 1 - assert trade.amount == amount - assert trade.amount_requested == amount - assert trade.stake_amount == default_conf.get("stake_amount") - assert trade.pair == "ETC/BTC" - assert trade.exchange == "binance" - assert trade.max_rate == 0.0 - assert trade.min_rate is None - assert trade.stop_loss == 0.0 - assert trade.initial_stop_loss == 0.0 - assert trade.exit_reason is None - assert trade.strategy is None - assert trade.timeframe == '5m' - assert trade.stoploss_order_id == 'dry_stop_order_id222' - assert trade.stoploss_last_update is None - assert log_has("trying trades_bak1", caplog) - assert log_has("trying trades_bak2", caplog) - assert log_has("Running database migration for trades - backup: trades_bak2, orders_bak0", - caplog) - assert log_has("Database migration finished.", caplog) - assert pytest.approx(trade.open_trade_value) == trade._calc_open_trade_value( - trade.amount, trade.open_rate) - assert trade.close_profit_abs is None - - orders = trade.orders - assert len(orders) == 4 - assert orders[0].order_id == 'dry_buy_order' - assert orders[0].ft_order_side == 'buy' - - assert orders[-1].order_id == 'dry_stop_order_id222' - assert orders[-1].ft_order_side == 'stoploss' - assert orders[-1].ft_is_open is True - - assert orders[1].order_id == 'dry_buy_order22' - assert orders[1].ft_order_side == 'buy' - assert orders[1].ft_is_open is False - - assert orders[2].order_id == 'dry_stop_order_id11X' - assert orders[2].ft_order_side == 'stoploss' - assert orders[2].ft_is_open is False - - -def test_migrate_too_old(mocker, default_conf, fee, caplog): - """ - Test Database migration (starting with new pairformat) - """ - caplog.set_level(logging.DEBUG) - amount = 103.223 - create_table_old = """CREATE TABLE IF NOT EXISTS "trades" ( - id INTEGER NOT NULL, - exchange VARCHAR NOT NULL, - pair VARCHAR NOT NULL, - is_open BOOLEAN NOT NULL, - fee_open FLOAT NOT NULL, - fee_close FLOAT NOT NULL, - open_rate FLOAT, - close_rate FLOAT, - close_profit FLOAT, - stake_amount FLOAT NOT NULL, - amount FLOAT, - open_date DATETIME NOT NULL, - close_date DATETIME, - open_order_id VARCHAR, - PRIMARY KEY (id), - CHECK (is_open IN (0, 1)) - );""" - - insert_table_old = """INSERT INTO trades (exchange, pair, is_open, fee_open, fee_close, - open_rate, stake_amount, amount, open_date) - VALUES ('binance', 'ETC/BTC', 1, {fee}, {fee}, - 0.00258580, {stake}, {amount}, - '2019-11-28 12:44:24.000000') - """.format(fee=fee.return_value, - stake=default_conf.get("stake_amount"), - amount=amount - ) - engine = create_engine('sqlite://') - mocker.patch('freqtrade.persistence.models.create_engine', lambda *args, **kwargs: engine) - - # Create table using the old format - with engine.begin() as connection: - connection.execute(text(create_table_old)) - connection.execute(text(insert_table_old)) - - # Run init to test migration - with pytest.raises(OperationalException, match=r'Your database seems to be very old'): - init_db(default_conf['db_url']) - - -def test_migrate_get_last_sequence_ids(): - engine = MagicMock() - engine.begin = MagicMock() - engine.name = 'postgresql' - get_last_sequence_ids(engine, 'trades_bak', 'orders_bak') - - assert engine.begin.call_count == 2 - engine.reset_mock() - engine.begin.reset_mock() - - engine.name = 'somethingelse' - get_last_sequence_ids(engine, 'trades_bak', 'orders_bak') - - assert engine.begin.call_count == 0 - - -def test_migrate_set_sequence_ids(): - engine = MagicMock() - engine.begin = MagicMock() - engine.name = 'postgresql' - set_sequence_ids(engine, 22, 55, 5) - - assert engine.begin.call_count == 1 - engine.reset_mock() - engine.begin.reset_mock() - - engine.name = 'somethingelse' - set_sequence_ids(engine, 22, 55, 6) - - assert engine.begin.call_count == 0 - - -def test_migrate_pairlocks(mocker, default_conf, fee, caplog): - """ - Test Database migration (starting with new pairformat) - """ - caplog.set_level(logging.DEBUG) - # Always create all columns apart from the last! - create_table_old = """CREATE TABLE pairlocks ( - id INTEGER NOT NULL, - pair VARCHAR(25) NOT NULL, - reason VARCHAR(255), - lock_time DATETIME NOT NULL, - lock_end_time DATETIME NOT NULL, - active BOOLEAN NOT NULL, - PRIMARY KEY (id) - ) - """ - create_index1 = "CREATE INDEX ix_pairlocks_pair ON pairlocks (pair)" - create_index2 = "CREATE INDEX ix_pairlocks_lock_end_time ON pairlocks (lock_end_time)" - create_index3 = "CREATE INDEX ix_pairlocks_active ON pairlocks (active)" - insert_table_old = """INSERT INTO pairlocks ( - id, pair, reason, lock_time, lock_end_time, active) - VALUES (1, 'ETH/BTC', 'Auto lock', '2021-07-12 18:41:03', '2021-07-11 18:45:00', 1) - """ - insert_table_old2 = """INSERT INTO pairlocks ( - id, pair, reason, lock_time, lock_end_time, active) - VALUES (2, '*', 'Lock all', '2021-07-12 18:41:03', '2021-07-12 19:00:00', 1) - """ - engine = create_engine('sqlite://') - mocker.patch('freqtrade.persistence.models.create_engine', lambda *args, **kwargs: engine) - # Create table using the old format - with engine.begin() as connection: - connection.execute(text(create_table_old)) - - connection.execute(text(insert_table_old)) - connection.execute(text(insert_table_old2)) - connection.execute(text(create_index1)) - connection.execute(text(create_index2)) - connection.execute(text(create_index3)) - - init_db(default_conf['db_url']) - - assert len(PairLock.query.all()) == 2 - assert len(PairLock.query.filter(PairLock.pair == '*').all()) == 1 - pairlocks = PairLock.query.filter(PairLock.pair == 'ETH/BTC').all() - assert len(pairlocks) == 1 - pairlocks[0].pair == 'ETH/BTC' - pairlocks[0].side == '*' - - def test_adjust_stop_loss(fee): trade = Trade( pair='ADA/USDT', From 55001bf321db562fd6592dcd5e8612835033cc1d Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 27 Dec 2022 13:42:56 +0100 Subject: [PATCH 414/421] Keep max_stake_amount (only relevant for DCA orders). --- freqtrade/persistence/migrations.py | 15 ++++++++------- freqtrade/persistence/trade_model.py | 3 +++ tests/persistence/test_migrations.py | 1 + 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/freqtrade/persistence/migrations.py b/freqtrade/persistence/migrations.py index edbcd6be3..44a6756d1 100644 --- a/freqtrade/persistence/migrations.py +++ b/freqtrade/persistence/migrations.py @@ -109,11 +109,10 @@ def migrate_trades_and_orders_table( else: is_short = get_column_def(cols, 'is_short', '0') - # Margin Properties + # Futures Properties interest_rate = get_column_def(cols, 'interest_rate', '0.0') - - # Futures properties funding_fees = get_column_def(cols, 'funding_fees', '0.0') + max_stake_amount = get_column_def(cols, 'max_stake_amount', 'stake_amount') # If ticker-interval existed use that, else null. if has_column(cols, 'ticker_interval'): @@ -162,7 +161,8 @@ def migrate_trades_and_orders_table( timeframe, open_trade_value, close_profit_abs, trading_mode, leverage, liquidation_price, is_short, interest_rate, funding_fees, realized_profit, - amount_precision, price_precision, precision_mode, contract_size + amount_precision, price_precision, precision_mode, contract_size, + max_stake_amount ) select id, lower(exchange), pair, {base_currency} base_currency, {stake_currency} stake_currency, @@ -190,7 +190,8 @@ def migrate_trades_and_orders_table( {is_short} is_short, {interest_rate} interest_rate, {funding_fees} funding_fees, {realized_profit} realized_profit, {amount_precision} amount_precision, {price_precision} price_precision, - {precision_mode} precision_mode, {contract_size} contract_size + {precision_mode} precision_mode, {contract_size} contract_size, + {max_stake_amount} max_stake_amount from {trade_back_name} """)) @@ -310,8 +311,8 @@ def check_migrate(engine, decl_base, previous_tables) -> None: # if ('orders' not in previous_tables # or not has_column(cols_orders, 'funding_fee')): migrating = False - # if not has_column(cols_trades, 'contract_size'): - if not has_column(cols_orders, 'funding_fee'): + # if not has_column(cols_orders, 'funding_fee'): + if not has_column(cols_trades, 'max_stake_amount'): migrating = True logger.info(f"Running database migration for trades - " f"backup: {table_back_name}, {order_table_bak_name}") diff --git a/freqtrade/persistence/trade_model.py b/freqtrade/persistence/trade_model.py index 186a1e584..ad3f9e3b9 100644 --- a/freqtrade/persistence/trade_model.py +++ b/freqtrade/persistence/trade_model.py @@ -293,6 +293,7 @@ class LocalTrade(): close_profit: Optional[float] = None close_profit_abs: Optional[float] = None stake_amount: float = 0.0 + max_stake_amount: float = 0.0 amount: float = 0.0 amount_requested: Optional[float] = None open_date: datetime @@ -918,6 +919,7 @@ class LocalTrade(): else: total_stake = total_stake + self._calc_open_trade_value(tmp_amount, price) self.funding_fees = funding_fees + self.max_stake_amount = total_stake if close_profit: self.close_profit = close_profit @@ -1169,6 +1171,7 @@ class Trade(_DECL_BASE, LocalTrade): close_profit = Column(Float) close_profit_abs = Column(Float) stake_amount = Column(Float, nullable=False) + max_stake_amount = Column(Float) amount = Column(Float) amount_requested = Column(Float) open_date = Column(DateTime, nullable=False, default=datetime.utcnow) diff --git a/tests/persistence/test_migrations.py b/tests/persistence/test_migrations.py index 1cd236005..2a6959d58 100644 --- a/tests/persistence/test_migrations.py +++ b/tests/persistence/test_migrations.py @@ -264,6 +264,7 @@ def test_migrate_new(mocker, default_conf, fee, caplog): assert pytest.approx(trade.open_trade_value) == trade._calc_open_trade_value( trade.amount, trade.open_rate) assert trade.close_profit_abs is None + assert trade.stake_amount == trade.max_stake_amount orders = trade.orders assert len(orders) == 4 From cb66663fd2505c1410280636d57c3b9b504ef2f8 Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 27 Dec 2022 13:46:27 +0100 Subject: [PATCH 415/421] show max_stake_amount in API --- freqtrade/persistence/trade_model.py | 1 + freqtrade/rpc/api_server/api_schemas.py | 1 + tests/persistence/test_persistence.py | 2 ++ tests/rpc/test_rpc.py | 1 + tests/rpc/test_rpc_apiserver.py | 2 ++ 5 files changed, 7 insertions(+) diff --git a/freqtrade/persistence/trade_model.py b/freqtrade/persistence/trade_model.py index ad3f9e3b9..8d2c3f10e 100644 --- a/freqtrade/persistence/trade_model.py +++ b/freqtrade/persistence/trade_model.py @@ -470,6 +470,7 @@ class LocalTrade(): 'amount': round(self.amount, 8), 'amount_requested': round(self.amount_requested, 8) if self.amount_requested else None, 'stake_amount': round(self.stake_amount, 8), + 'max_stake_amount': round(self.max_stake_amount, 8) if self.max_stake_amount else None, 'strategy': self.strategy, 'buy_tag': self.enter_tag, 'enter_tag': self.enter_tag, diff --git a/freqtrade/rpc/api_server/api_schemas.py b/freqtrade/rpc/api_server/api_schemas.py index 17dff222d..59018aa50 100644 --- a/freqtrade/rpc/api_server/api_schemas.py +++ b/freqtrade/rpc/api_server/api_schemas.py @@ -217,6 +217,7 @@ class TradeSchema(BaseModel): amount: float amount_requested: float stake_amount: float + max_stake_amount: Optional[float] strategy: str buy_tag: Optional[str] # Deprecated enter_tag: Optional[str] diff --git a/tests/persistence/test_persistence.py b/tests/persistence/test_persistence.py index 984f85c0d..499fefce5 100644 --- a/tests/persistence/test_persistence.py +++ b/tests/persistence/test_persistence.py @@ -1359,6 +1359,7 @@ def test_to_json(fee): 'amount': 123.0, 'amount_requested': 123.0, 'stake_amount': 0.001, + 'max_stake_amount': None, 'trade_duration': None, 'trade_duration_s': None, 'realized_profit': 0.0, @@ -1427,6 +1428,7 @@ def test_to_json(fee): 'amount': 100.0, 'amount_requested': 101.0, 'stake_amount': 0.001, + 'max_stake_amount': None, 'trade_duration': 60, 'trade_duration_s': 3600, 'stop_loss_abs': None, diff --git a/tests/rpc/test_rpc.py b/tests/rpc/test_rpc.py index 24b5f1cbe..fd04e5c85 100644 --- a/tests/rpc/test_rpc.py +++ b/tests/rpc/test_rpc.py @@ -64,6 +64,7 @@ def test_rpc_trade_status(default_conf, ticker, fee, mocker) -> None: 'amount': 91.07468123, 'amount_requested': 91.07468124, 'stake_amount': 0.001, + 'max_stake_amount': ANY, 'trade_duration': None, 'trade_duration_s': None, 'close_profit': None, diff --git a/tests/rpc/test_rpc_apiserver.py b/tests/rpc/test_rpc_apiserver.py index 2a2a38196..16e2a6737 100644 --- a/tests/rpc/test_rpc_apiserver.py +++ b/tests/rpc/test_rpc_apiserver.py @@ -985,6 +985,7 @@ def test_api_status(botclient, mocker, ticker, fee, markets, is_short, 'base_currency': 'ETH', 'quote_currency': 'BTC', 'stake_amount': 0.001, + 'max_stake_amount': ANY, 'stop_loss_abs': ANY, 'stop_loss_pct': ANY, 'stop_loss_ratio': ANY, @@ -1188,6 +1189,7 @@ def test_api_force_entry(botclient, mocker, fee, endpoint): 'base_currency': 'ETH', 'quote_currency': 'BTC', 'stake_amount': 1, + 'max_stake_amount': ANY, 'stop_loss_abs': None, 'stop_loss_pct': None, 'stop_loss_ratio': None, From 62c4675e295e178320550c50d9fbf5126ca8b23f Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 27 Dec 2022 13:55:46 +0100 Subject: [PATCH 416/421] Remove some deprecated fields from the API --- freqtrade/persistence/trade_model.py | 2 -- freqtrade/rpc/api_server/api_schemas.py | 2 -- tests/persistence/test_persistence.py | 7 +------ tests/rpc/test_rpc.py | 2 -- tests/rpc/test_rpc_apiserver.py | 4 ---- 5 files changed, 1 insertion(+), 16 deletions(-) diff --git a/freqtrade/persistence/trade_model.py b/freqtrade/persistence/trade_model.py index 8d2c3f10e..e954fd263 100644 --- a/freqtrade/persistence/trade_model.py +++ b/freqtrade/persistence/trade_model.py @@ -472,7 +472,6 @@ class LocalTrade(): 'stake_amount': round(self.stake_amount, 8), 'max_stake_amount': round(self.max_stake_amount, 8) if self.max_stake_amount else None, 'strategy': self.strategy, - 'buy_tag': self.enter_tag, 'enter_tag': self.enter_tag, 'timeframe': self.timeframe, @@ -509,7 +508,6 @@ class LocalTrade(): 'profit_pct': round(self.close_profit * 100, 2) if self.close_profit else None, 'profit_abs': self.close_profit_abs, - 'sell_reason': self.exit_reason, # Deprecated 'exit_reason': self.exit_reason, 'exit_order_status': self.exit_order_status, 'stop_loss_abs': self.stop_loss, diff --git a/freqtrade/rpc/api_server/api_schemas.py b/freqtrade/rpc/api_server/api_schemas.py index 59018aa50..404d64d16 100644 --- a/freqtrade/rpc/api_server/api_schemas.py +++ b/freqtrade/rpc/api_server/api_schemas.py @@ -219,7 +219,6 @@ class TradeSchema(BaseModel): stake_amount: float max_stake_amount: Optional[float] strategy: str - buy_tag: Optional[str] # Deprecated enter_tag: Optional[str] timeframe: int fee_open: Optional[float] @@ -244,7 +243,6 @@ class TradeSchema(BaseModel): profit_pct: Optional[float] profit_abs: Optional[float] profit_fiat: Optional[float] - sell_reason: Optional[str] # Deprecated exit_reason: Optional[str] exit_order_status: Optional[str] stop_loss_abs: Optional[float] diff --git a/tests/persistence/test_persistence.py b/tests/persistence/test_persistence.py index 499fefce5..830d84288 100644 --- a/tests/persistence/test_persistence.py +++ b/tests/persistence/test_persistence.py @@ -258,8 +258,7 @@ def test_interest(fee, exchange, is_short, lev, minutes, rate, interest, (True, 3.0, 30.0, margin), ]) @pytest.mark.usefixtures("init_persistence") -def test_borrowed(limit_buy_order_usdt, limit_sell_order_usdt, fee, - caplog, is_short, lev, borrowed, trading_mode): +def test_borrowed(fee, is_short, lev, borrowed, trading_mode): """ 10 minute limit trade on Binance/Kraken at 1x, 3x leverage fee: 0.25% quote @@ -1369,7 +1368,6 @@ def test_to_json(fee): 'profit_ratio': None, 'profit_pct': None, 'profit_abs': None, - 'sell_reason': None, 'exit_reason': None, 'exit_order_status': None, 'stop_loss_abs': None, @@ -1384,7 +1382,6 @@ def test_to_json(fee): 'min_rate': None, 'max_rate': None, 'strategy': None, - 'buy_tag': None, 'enter_tag': None, 'timeframe': None, 'exchange': 'binance', @@ -1460,11 +1457,9 @@ def test_to_json(fee): 'open_order_id': None, 'open_rate_requested': None, 'open_trade_value': 12.33075, - 'sell_reason': None, 'exit_reason': None, 'exit_order_status': None, 'strategy': None, - 'buy_tag': 'buys_signal_001', 'enter_tag': 'buys_signal_001', 'timeframe': None, 'exchange': 'binance', diff --git a/tests/rpc/test_rpc.py b/tests/rpc/test_rpc.py index fd04e5c85..4871d9b24 100644 --- a/tests/rpc/test_rpc.py +++ b/tests/rpc/test_rpc.py @@ -46,13 +46,11 @@ def test_rpc_trade_status(default_conf, ticker, fee, mocker) -> None: 'open_rate_requested': ANY, 'open_trade_value': 0.0010025, 'close_rate_requested': ANY, - 'sell_reason': ANY, 'exit_reason': ANY, 'exit_order_status': ANY, 'min_rate': ANY, 'max_rate': ANY, 'strategy': ANY, - 'buy_tag': ANY, 'enter_tag': ANY, 'timeframe': 5, 'open_order_id': ANY, diff --git a/tests/rpc/test_rpc_apiserver.py b/tests/rpc/test_rpc_apiserver.py index 16e2a6737..c130e9373 100644 --- a/tests/rpc/test_rpc_apiserver.py +++ b/tests/rpc/test_rpc_apiserver.py @@ -1015,11 +1015,9 @@ def test_api_status(botclient, mocker, ticker, fee, markets, is_short, 'open_order_id': open_order_id, 'open_rate_requested': ANY, 'open_trade_value': open_trade_value, - 'sell_reason': None, 'exit_reason': None, 'exit_order_status': None, 'strategy': CURRENT_TEST_STRATEGY, - 'buy_tag': None, 'enter_tag': None, 'timeframe': 5, 'exchange': 'binance', @@ -1220,11 +1218,9 @@ def test_api_force_entry(botclient, mocker, fee, endpoint): 'open_order_id': '123456', 'open_rate_requested': None, 'open_trade_value': 0.24605460, - 'sell_reason': None, 'exit_reason': None, 'exit_order_status': None, 'strategy': CURRENT_TEST_STRATEGY, - 'buy_tag': None, 'enter_tag': None, 'timeframe': 5, 'exchange': 'binance', From cd4faa9c59b710c34a6a3f78e1dec161e4a2a3bb Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 27 Dec 2022 18:08:20 +0100 Subject: [PATCH 417/421] keep max_stake_amount through backtests --- freqtrade/data/btanalysis.py | 50 +++++++++++-------- freqtrade/persistence/trade_model.py | 4 +- tests/optimize/test_backtesting.py | 1 + .../test_backtesting_adjust_position.py | 1 + .../backtest_results/backtest-result.json | 2 +- 5 files changed, 36 insertions(+), 22 deletions(-) diff --git a/freqtrade/data/btanalysis.py b/freqtrade/data/btanalysis.py index 6350aca55..3102683b2 100644 --- a/freqtrade/data/btanalysis.py +++ b/freqtrade/data/btanalysis.py @@ -20,8 +20,8 @@ from freqtrade.persistence import LocalTrade, Trade, init_db logger = logging.getLogger(__name__) # Newest format -BT_DATA_COLUMNS = ['pair', 'stake_amount', 'amount', 'open_date', 'close_date', - 'open_rate', 'close_rate', +BT_DATA_COLUMNS = ['pair', 'stake_amount', 'max_stake_amount', 'amount', + 'open_date', 'close_date', 'open_rate', 'close_rate', 'fee_open', 'fee_close', 'trade_duration', 'profit_ratio', 'profit_abs', 'exit_reason', 'initial_stop_loss_abs', 'initial_stop_loss_ratio', 'stop_loss_abs', @@ -241,6 +241,33 @@ def find_existing_backtest_stats(dirname: Union[Path, str], run_ids: Dict[str, s return results +def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame: + """ + Compatibility support for older backtest data. + """ + df['open_date'] = pd.to_datetime(df['open_date'], + utc=True, + infer_datetime_format=True + ) + df['close_date'] = pd.to_datetime(df['close_date'], + utc=True, + infer_datetime_format=True + ) + # Compatibility support for pre short Columns + if 'is_short' not in df.columns: + df['is_short'] = False + if 'leverage' not in df.columns: + df['leverage'] = 1.0 + if 'enter_tag' not in df.columns: + df['enter_tag'] = df['buy_tag'] + df = df.drop(['buy_tag'], axis=1) + if 'max_stake_amount' not in df.columns: + df['max_stake_amount'] = df['stake_amount'] + if 'orders' not in df.columns: + df['orders'] = None + return df + + def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = None) -> pd.DataFrame: """ Load backtest data file. @@ -269,24 +296,7 @@ def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = Non data = data['strategy'][strategy]['trades'] df = pd.DataFrame(data) if not df.empty: - df['open_date'] = pd.to_datetime(df['open_date'], - utc=True, - infer_datetime_format=True - ) - df['close_date'] = pd.to_datetime(df['close_date'], - utc=True, - infer_datetime_format=True - ) - # Compatibility support for pre short Columns - if 'is_short' not in df.columns: - df['is_short'] = False - if 'leverage' not in df.columns: - df['leverage'] = 1.0 - if 'enter_tag' not in df.columns: - df['enter_tag'] = df['buy_tag'] - df = df.drop(['buy_tag'], axis=1) - if 'orders' not in df.columns: - df['orders'] = None + df = _load_backtest_data_df_compatibility(df) else: # old format - only with lists. diff --git a/freqtrade/persistence/trade_model.py b/freqtrade/persistence/trade_model.py index e954fd263..0c36d2378 100644 --- a/freqtrade/persistence/trade_model.py +++ b/freqtrade/persistence/trade_model.py @@ -876,6 +876,7 @@ class LocalTrade(): ZERO = FtPrecise(0.0) current_amount = FtPrecise(0.0) current_stake = FtPrecise(0.0) + max_stake_amount = FtPrecise(0.0) total_stake = 0.0 # Total stake after all buy orders (does not subtract!) avg_price = FtPrecise(0.0) close_profit = 0.0 @@ -917,8 +918,9 @@ class LocalTrade(): exit_rate, amount=exit_amount, open_rate=avg_price) else: total_stake = total_stake + self._calc_open_trade_value(tmp_amount, price) + max_stake_amount += (tmp_amount * price) self.funding_fees = funding_fees - self.max_stake_amount = total_stake + self.max_stake_amount = float(max_stake_amount) if close_profit: self.close_profit = close_profit diff --git a/tests/optimize/test_backtesting.py b/tests/optimize/test_backtesting.py index ad6242b0e..fc14a0f88 100644 --- a/tests/optimize/test_backtesting.py +++ b/tests/optimize/test_backtesting.py @@ -710,6 +710,7 @@ def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None: expected = pd.DataFrame( {'pair': [pair, pair], 'stake_amount': [0.001, 0.001], + 'max_stake_amount': [0.001, 0.001], 'amount': [0.00957442, 0.0097064], 'open_date': pd.to_datetime([Arrow(2018, 1, 29, 18, 40, 0).datetime, Arrow(2018, 1, 30, 3, 30, 0).datetime], utc=True diff --git a/tests/optimize/test_backtesting_adjust_position.py b/tests/optimize/test_backtesting_adjust_position.py index b97b45e26..5c740458f 100644 --- a/tests/optimize/test_backtesting_adjust_position.py +++ b/tests/optimize/test_backtesting_adjust_position.py @@ -50,6 +50,7 @@ def test_backtest_position_adjustment(default_conf, fee, mocker, testdatadir) -> expected = pd.DataFrame( {'pair': [pair, pair], 'stake_amount': [500.0, 100.0], + 'max_stake_amount': [500.0, 100], 'amount': [4806.87657523, 970.63960782], 'open_date': pd.to_datetime([Arrow(2018, 1, 29, 18, 40, 0).datetime, Arrow(2018, 1, 30, 3, 30, 0).datetime], utc=True diff --git a/tests/testdata/backtest_results/backtest-result.json b/tests/testdata/backtest_results/backtest-result.json index f16f95c33..96440fdf5 100644 --- a/tests/testdata/backtest_results/backtest-result.json +++ b/tests/testdata/backtest_results/backtest-result.json @@ -1 +1 @@ -{"metadata":{"StrategyTestV3":{"run_id":"asdf","backtest_start_time":"2020-10-01 18:00:00+00:00"}},"strategy":{"StrategyTestV3":{"trades":[{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.37344398340249,"open_date":"2018-01-10 07:15:00+00:00","close_date":"2018-01-10 07:20:00+00:00","open_rate":9.64e-05,"close_rate":0.00010074887218045112,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":8.676e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.676e-05,"stop_loss_ratio":0.1,"min_rate":9.64e-05,"max_rate":0.00010074887218045112,"is_open":false,"open_timestamp":1515568500000.0,"close_timestamp":1515568800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":21.026072329688816,"open_date":"2018-01-10 07:15:00+00:00","close_date":"2018-01-10 07:30:00+00:00","open_rate":4.756e-05,"close_rate":4.9705563909774425e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":15,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":4.2804e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.2804e-05,"stop_loss_ratio":0.1,"min_rate":4.756e-05,"max_rate":4.9705563909774425e-05,"is_open":false,"open_timestamp":1515568500000.0,"close_timestamp":1515569400000.0,"is_short":false,"leverage":1.0,"enter_tag":"buy_tag","orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":29.94908655286014,"open_date":"2018-01-10 07:25:00+00:00","close_date":"2018-01-10 07:35:00+00:00","open_rate":3.339e-05,"close_rate":3.489631578947368e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":10,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":3.0050999999999997e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.0050999999999997e-05,"stop_loss_ratio":0.1,"min_rate":3.339e-05,"max_rate":3.489631578947368e-05,"is_open":false,"open_timestamp":1515569100000.0,"close_timestamp":1515569700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.313531353135314,"open_date":"2018-01-10 07:25:00+00:00","close_date":"2018-01-10 07:40:00+00:00","open_rate":9.696e-05,"close_rate":0.00010133413533834584,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":15,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":8.7264e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.7264e-05,"stop_loss_ratio":0.1,"min_rate":9.696e-05,"max_rate":0.00010133413533834584,"is_open":false,"open_timestamp":1515569100000.0,"close_timestamp":1515570000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010604453870625663,"open_date":"2018-01-10 07:35:00+00:00","close_date":"2018-01-10 08:35:00+00:00","open_rate":0.0943,"close_rate":0.09477268170426063,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.08487,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08487,"stop_loss_ratio":0.1,"min_rate":0.0943,"max_rate":0.09477268170426063,"is_open":false,"open_timestamp":1515569700000.0,"close_timestamp":1515573300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03677001860930642,"open_date":"2018-01-10 07:40:00+00:00","close_date":"2018-01-10 08:10:00+00:00","open_rate":0.02719607,"close_rate":0.02760503345864661,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.024476463,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.024476463,"stop_loss_ratio":0.1,"min_rate":0.02719607,"max_rate":0.02760503345864661,"is_open":false,"open_timestamp":1515570000000.0,"close_timestamp":1515571800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.021575196463739,"open_date":"2018-01-10 08:15:00+00:00","close_date":"2018-01-10 09:55:00+00:00","open_rate":0.04634952,"close_rate":0.046581848421052625,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":100,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.041714568,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.041714568,"stop_loss_ratio":0.1,"min_rate":0.04634952,"max_rate":0.046581848421052625,"is_open":false,"open_timestamp":1515572100000.0,"close_timestamp":1515578100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":32.615786040443574,"open_date":"2018-01-10 14:45:00+00:00","close_date":"2018-01-10 15:50:00+00:00","open_rate":3.066e-05,"close_rate":3.081368421052631e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":65,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":2.7594e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7594e-05,"stop_loss_ratio":0.1,"min_rate":3.066e-05,"max_rate":3.081368421052631e-05,"is_open":false,"open_timestamp":1515595500000.0,"close_timestamp":1515599400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.05917194776300452,"open_date":"2018-01-10 16:35:00+00:00","close_date":"2018-01-10 17:15:00+00:00","open_rate":0.0168999,"close_rate":0.016984611278195488,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.01520991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.01520991,"stop_loss_ratio":0.1,"min_rate":0.0168999,"max_rate":0.016984611278195488,"is_open":false,"open_timestamp":1515602100000.0,"close_timestamp":1515604500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010949822656672253,"open_date":"2018-01-10 16:40:00+00:00","close_date":"2018-01-10 17:20:00+00:00","open_rate":0.09132568,"close_rate":0.0917834528320802,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.08219311200000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08219311200000001,"stop_loss_ratio":0.1,"min_rate":0.09132568,"max_rate":0.0917834528320802,"is_open":false,"open_timestamp":1515602400000.0,"close_timestamp":1515604800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011238476768326556,"open_date":"2018-01-10 18:50:00+00:00","close_date":"2018-01-10 19:45:00+00:00","open_rate":0.08898003,"close_rate":0.08942604518796991,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.080082027,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.080082027,"stop_loss_ratio":0.1,"min_rate":0.08898003,"max_rate":0.08942604518796991,"is_open":false,"open_timestamp":1515610200000.0,"close_timestamp":1515613500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011682232072680309,"open_date":"2018-01-10 22:15:00+00:00","close_date":"2018-01-10 23:00:00+00:00","open_rate":0.08560008,"close_rate":0.08602915308270676,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.077040072,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.077040072,"stop_loss_ratio":0.1,"min_rate":0.08560008,"max_rate":0.08602915308270676,"is_open":false,"open_timestamp":1515622500000.0,"close_timestamp":1515625200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.4014726015023105,"open_date":"2018-01-10 22:50:00+00:00","close_date":"2018-01-10 23:20:00+00:00","open_rate":0.00249083,"close_rate":0.0025282860902255634,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002241747,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002241747,"stop_loss_ratio":0.1,"min_rate":0.00249083,"max_rate":0.0025282860902255634,"is_open":false,"open_timestamp":1515624600000.0,"close_timestamp":1515626400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":33.090668431502316,"open_date":"2018-01-10 23:15:00+00:00","close_date":"2018-01-11 00:15:00+00:00","open_rate":3.022e-05,"close_rate":3.037147869674185e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":2.7198e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7198e-05,"stop_loss_ratio":0.1,"min_rate":3.022e-05,"max_rate":3.037147869674185e-05,"is_open":false,"open_timestamp":1515626100000.0,"close_timestamp":1515629700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.41034058268362744,"open_date":"2018-01-10 23:40:00+00:00","close_date":"2018-01-11 00:05:00+00:00","open_rate":0.002437,"close_rate":0.0024980776942355883,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":0.0021933,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0021933,"stop_loss_ratio":0.1,"min_rate":0.002437,"max_rate":0.0024980776942355883,"is_open":false,"open_timestamp":1515627600000.0,"close_timestamp":1515629100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02095643931654345,"open_date":"2018-01-11 00:00:00+00:00","close_date":"2018-01-11 00:35:00+00:00","open_rate":0.04771803,"close_rate":0.04843559436090225,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.042946227,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.042946227,"stop_loss_ratio":0.1,"min_rate":0.04771803,"max_rate":0.04843559436090225,"is_open":false,"open_timestamp":1515628800000.0,"close_timestamp":1515630900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":27.389756231169542,"open_date":"2018-01-11 03:40:00+00:00","close_date":"2018-01-11 04:25:00+00:00","open_rate":3.651e-05,"close_rate":3.2859000000000005e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.10448878,"profit_abs":-9.999999999999994e-05,"exit_reason":"stop_loss","initial_stop_loss_abs":3.2859000000000005e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.2859000000000005e-05,"stop_loss_ratio":0.1,"min_rate":3.2859000000000005e-05,"max_rate":3.651e-05,"is_open":false,"open_timestamp":1515642000000.0,"close_timestamp":1515644700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011332594070446804,"open_date":"2018-01-11 03:55:00+00:00","close_date":"2018-01-11 04:25:00+00:00","open_rate":0.08824105,"close_rate":0.08956798308270676,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.079416945,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.079416945,"stop_loss_ratio":0.1,"min_rate":0.08824105,"max_rate":0.08956798308270676,"is_open":false,"open_timestamp":1515642900000.0,"close_timestamp":1515644700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.411522633744856,"open_date":"2018-01-11 04:00:00+00:00","close_date":"2018-01-11 04:50:00+00:00","open_rate":0.00243,"close_rate":0.002442180451127819,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002187,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002187,"stop_loss_ratio":0.1,"min_rate":0.00243,"max_rate":0.002442180451127819,"is_open":false,"open_timestamp":1515643200000.0,"close_timestamp":1515646200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.022001890402423376,"open_date":"2018-01-11 04:30:00+00:00","close_date":"2018-01-11 04:55:00+00:00","open_rate":0.04545064,"close_rate":0.046589753784461146,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":0.040905576,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.040905576,"stop_loss_ratio":0.1,"min_rate":0.04545064,"max_rate":0.046589753784461146,"is_open":false,"open_timestamp":1515645000000.0,"close_timestamp":1515646500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":29.655990510083036,"open_date":"2018-01-11 04:30:00+00:00","close_date":"2018-01-11 04:50:00+00:00","open_rate":3.372e-05,"close_rate":3.456511278195488e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":3.0348e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.0348e-05,"stop_loss_ratio":0.1,"min_rate":3.372e-05,"max_rate":3.456511278195488e-05,"is_open":false,"open_timestamp":1515645000000.0,"close_timestamp":1515646200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.037821482602118005,"open_date":"2018-01-11 04:55:00+00:00","close_date":"2018-01-11 05:15:00+00:00","open_rate":0.02644,"close_rate":0.02710265664160401,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":0.023796,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.023796,"stop_loss_ratio":0.1,"min_rate":0.02644,"max_rate":0.02710265664160401,"is_open":false,"open_timestamp":1515646500000.0,"close_timestamp":1515647700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011348161597821153,"open_date":"2018-01-11 11:20:00+00:00","close_date":"2018-01-11 12:00:00+00:00","open_rate":0.08812,"close_rate":0.08856170426065162,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.079308,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.079308,"stop_loss_ratio":0.1,"min_rate":0.08812,"max_rate":0.08856170426065162,"is_open":false,"open_timestamp":1515669600000.0,"close_timestamp":1515672000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.037263696923919086,"open_date":"2018-01-11 11:35:00+00:00","close_date":"2018-01-11 12:15:00+00:00","open_rate":0.02683577,"close_rate":0.026970285137844607,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.024152193,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.024152193,"stop_loss_ratio":0.1,"min_rate":0.02683577,"max_rate":0.026970285137844607,"is_open":false,"open_timestamp":1515670500000.0,"close_timestamp":1515672900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":20.329335230737954,"open_date":"2018-01-11 14:00:00+00:00","close_date":"2018-01-11 14:25:00+00:00","open_rate":4.919e-05,"close_rate":5.04228320802005e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":4.4271e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4271e-05,"stop_loss_ratio":0.1,"min_rate":4.919e-05,"max_rate":5.04228320802005e-05,"is_open":false,"open_timestamp":1515679200000.0,"close_timestamp":1515680700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.01138317402960718,"open_date":"2018-01-11 19:25:00+00:00","close_date":"2018-01-11 20:35:00+00:00","open_rate":0.08784896,"close_rate":0.08828930566416039,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":70,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.079064064,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.079064064,"stop_loss_ratio":0.1,"min_rate":0.08784896,"max_rate":0.08828930566416039,"is_open":false,"open_timestamp":1515698700000.0,"close_timestamp":1515702900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.58863858961802,"open_date":"2018-01-11 22:35:00+00:00","close_date":"2018-01-11 23:30:00+00:00","open_rate":5.105e-05,"close_rate":5.130588972431077e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.5945e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.5945e-05,"stop_loss_ratio":0.1,"min_rate":5.105e-05,"max_rate":5.130588972431077e-05,"is_open":false,"open_timestamp":1515710100000.0,"close_timestamp":1515713400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":25.252525252525253,"open_date":"2018-01-11 22:55:00+00:00","close_date":"2018-01-11 23:25:00+00:00","open_rate":3.96e-05,"close_rate":4.019548872180451e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":3.5640000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.5640000000000004e-05,"stop_loss_ratio":0.1,"min_rate":3.96e-05,"max_rate":4.019548872180451e-05,"is_open":false,"open_timestamp":1515711300000.0,"close_timestamp":1515713100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":34.66204506065858,"open_date":"2018-01-11 22:55:00+00:00","close_date":"2018-01-11 23:35:00+00:00","open_rate":2.885e-05,"close_rate":2.899461152882205e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":2.5965e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.5965e-05,"stop_loss_ratio":0.1,"min_rate":2.885e-05,"max_rate":2.899461152882205e-05,"is_open":false,"open_timestamp":1515711300000.0,"close_timestamp":1515713700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03780718336483932,"open_date":"2018-01-11 23:30:00+00:00","close_date":"2018-01-12 00:05:00+00:00","open_rate":0.02645,"close_rate":0.026847744360902256,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.023805000000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.023805000000000003,"stop_loss_ratio":0.1,"min_rate":0.02645,"max_rate":0.026847744360902256,"is_open":false,"open_timestamp":1515713400000.0,"close_timestamp":1515715500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.020833333333333332,"open_date":"2018-01-11 23:55:00+00:00","close_date":"2018-01-12 01:15:00+00:00","open_rate":0.048,"close_rate":0.04824060150375939,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0432,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0432,"stop_loss_ratio":0.1,"min_rate":0.048,"max_rate":0.04824060150375939,"is_open":false,"open_timestamp":1515714900000.0,"close_timestamp":1515719700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":21.31287297527707,"open_date":"2018-01-12 21:15:00+00:00","close_date":"2018-01-12 21:40:00+00:00","open_rate":4.692e-05,"close_rate":4.809593984962405e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":4.2228e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.2228e-05,"stop_loss_ratio":0.1,"min_rate":4.692e-05,"max_rate":4.809593984962405e-05,"is_open":false,"open_timestamp":1515791700000.0,"close_timestamp":1515793200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.38915654211062944,"open_date":"2018-01-13 00:55:00+00:00","close_date":"2018-01-13 06:20:00+00:00","open_rate":0.00256966,"close_rate":0.0025825405012531327,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":325,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002312694,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002312694,"stop_loss_ratio":0.1,"min_rate":0.00256966,"max_rate":0.0025825405012531327,"is_open":false,"open_timestamp":1515804900000.0,"close_timestamp":1515824400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":15.96933886937081,"open_date":"2018-01-13 10:55:00+00:00","close_date":"2018-01-13 11:35:00+00:00","open_rate":6.262e-05,"close_rate":6.293388471177944e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.6358e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.6358e-05,"stop_loss_ratio":0.1,"min_rate":6.262e-05,"max_rate":6.293388471177944e-05,"is_open":false,"open_timestamp":1515840900000.0,"close_timestamp":1515843300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":21.14164904862579,"open_date":"2018-01-13 13:05:00+00:00","close_date":"2018-01-15 14:10:00+00:00","open_rate":4.73e-05,"close_rate":4.753709273182957e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":2945,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.257e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.257e-05,"stop_loss_ratio":0.1,"min_rate":4.73e-05,"max_rate":4.753709273182957e-05,"is_open":false,"open_timestamp":1515848700000.0,"close_timestamp":1516025400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":16.49348507339601,"open_date":"2018-01-13 13:30:00+00:00","close_date":"2018-01-13 14:45:00+00:00","open_rate":6.063e-05,"close_rate":6.0933909774436085e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":75,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.4567e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.4567e-05,"stop_loss_ratio":0.1,"min_rate":6.063e-05,"max_rate":6.0933909774436085e-05,"is_open":false,"open_timestamp":1515850200000.0,"close_timestamp":1515854700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":9.023641941887746,"open_date":"2018-01-13 13:40:00+00:00","close_date":"2018-01-13 23:30:00+00:00","open_rate":0.00011082,"close_rate":0.00011137548872180448,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":590,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":9.9738e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":9.9738e-05,"stop_loss_ratio":0.1,"min_rate":0.00011082,"max_rate":0.00011137548872180448,"is_open":false,"open_timestamp":1515850800000.0,"close_timestamp":1515886200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":16.863406408094438,"open_date":"2018-01-13 15:15:00+00:00","close_date":"2018-01-13 15:55:00+00:00","open_rate":5.93e-05,"close_rate":5.9597243107769415e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.337e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.337e-05,"stop_loss_ratio":0.1,"min_rate":5.93e-05,"max_rate":5.9597243107769415e-05,"is_open":false,"open_timestamp":1515856500000.0,"close_timestamp":1515858900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.020618543947292404,"open_date":"2018-01-13 16:30:00+00:00","close_date":"2018-01-13 17:10:00+00:00","open_rate":0.04850003,"close_rate":0.04874313791979949,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.043650027,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.043650027,"stop_loss_ratio":0.1,"min_rate":0.04850003,"max_rate":0.04874313791979949,"is_open":false,"open_timestamp":1515861000000.0,"close_timestamp":1515863400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010178097365511457,"open_date":"2018-01-13 22:05:00+00:00","close_date":"2018-01-14 06:25:00+00:00","open_rate":0.09825019,"close_rate":0.09874267215538848,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":500,"profit_ratio":-0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.088425171,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.088425171,"stop_loss_ratio":0.1,"min_rate":0.09825019,"max_rate":0.09874267215538848,"is_open":false,"open_timestamp":1515881100000.0,"close_timestamp":1515911100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":16.616816218012627,"open_date":"2018-01-14 00:20:00+00:00","close_date":"2018-01-14 22:55:00+00:00","open_rate":6.018e-05,"close_rate":6.048165413533834e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":1355,"profit_ratio":0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":5.4162e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.4162e-05,"stop_loss_ratio":0.1,"min_rate":6.018e-05,"max_rate":6.048165413533834e-05,"is_open":false,"open_timestamp":1515889200000.0,"close_timestamp":1515970500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010246952581919518,"open_date":"2018-01-14 12:45:00+00:00","close_date":"2018-01-14 13:25:00+00:00","open_rate":0.09758999,"close_rate":0.0980791628822055,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.087830991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.087830991,"stop_loss_ratio":0.1,"min_rate":0.09758999,"max_rate":0.0980791628822055,"is_open":false,"open_timestamp":1515933900000.0,"close_timestamp":1515936300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3215434083601286,"open_date":"2018-01-14 15:30:00+00:00","close_date":"2018-01-14 16:00:00+00:00","open_rate":0.00311,"close_rate":0.0031567669172932328,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002799,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002799,"stop_loss_ratio":0.1,"min_rate":0.00311,"max_rate":0.0031567669172932328,"is_open":false,"open_timestamp":1515943800000.0,"close_timestamp":1515945600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.32010140812609433,"open_date":"2018-01-14 20:45:00+00:00","close_date":"2018-01-14 22:15:00+00:00","open_rate":0.00312401,"close_rate":0.003139669197994987,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":90,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002811609,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002811609,"stop_loss_ratio":0.1,"min_rate":0.00312401,"max_rate":0.003139669197994987,"is_open":false,"open_timestamp":1515962700000.0,"close_timestamp":1515968100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.057247866085791646,"open_date":"2018-01-14 23:35:00+00:00","close_date":"2018-01-15 00:30:00+00:00","open_rate":0.0174679,"close_rate":0.017555458395989976,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.015721110000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.015721110000000003,"stop_loss_ratio":0.1,"min_rate":0.0174679,"max_rate":0.017555458395989976,"is_open":false,"open_timestamp":1515972900000.0,"close_timestamp":1515976200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.013611282991367995,"open_date":"2018-01-14 23:45:00+00:00","close_date":"2018-01-15 00:25:00+00:00","open_rate":0.07346846,"close_rate":0.07383672295739348,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.066121614,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.066121614,"stop_loss_ratio":0.1,"min_rate":0.07346846,"max_rate":0.07383672295739348,"is_open":false,"open_timestamp":1515973500000.0,"close_timestamp":1515975900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010204706410596568,"open_date":"2018-01-15 02:25:00+00:00","close_date":"2018-01-15 03:05:00+00:00","open_rate":0.097994,"close_rate":0.09848519799498744,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0881946,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0881946,"stop_loss_ratio":0.1,"min_rate":0.097994,"max_rate":0.09848519799498744,"is_open":false,"open_timestamp":1515983100000.0,"close_timestamp":1515985500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010353038616834042,"open_date":"2018-01-15 07:20:00+00:00","close_date":"2018-01-15 08:00:00+00:00","open_rate":0.09659,"close_rate":0.09707416040100247,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.086931,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.086931,"stop_loss_ratio":0.1,"min_rate":0.09659,"max_rate":0.09707416040100247,"is_open":false,"open_timestamp":1516000800000.0,"close_timestamp":1516003200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.0130169219986,"open_date":"2018-01-15 08:20:00+00:00","close_date":"2018-01-15 08:55:00+00:00","open_rate":9.987e-05,"close_rate":0.00010137180451127818,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":8.9883e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.9883e-05,"stop_loss_ratio":0.1,"min_rate":9.987e-05,"max_rate":0.00010137180451127818,"is_open":false,"open_timestamp":1516004400000.0,"close_timestamp":1516006500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010537752023511832,"open_date":"2018-01-15 12:10:00+00:00","close_date":"2018-01-16 02:50:00+00:00","open_rate":0.0948969,"close_rate":0.09537257368421052,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":880,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.08540721000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08540721000000001,"stop_loss_ratio":0.1,"min_rate":0.0948969,"max_rate":0.09537257368421052,"is_open":false,"open_timestamp":1516018200000.0,"close_timestamp":1516071000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014084507042253523,"open_date":"2018-01-15 14:10:00+00:00","close_date":"2018-01-15 17:40:00+00:00","open_rate":0.071,"close_rate":0.07135588972431077,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":210,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0639,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0639,"stop_loss_ratio":0.1,"min_rate":0.071,"max_rate":0.07135588972431077,"is_open":false,"open_timestamp":1516025400000.0,"close_timestamp":1516038000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.021736763017766975,"open_date":"2018-01-15 14:30:00+00:00","close_date":"2018-01-15 15:10:00+00:00","open_rate":0.04600501,"close_rate":0.046235611553884705,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.041404509,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.041404509,"stop_loss_ratio":0.1,"min_rate":0.04600501,"max_rate":0.046235611553884705,"is_open":false,"open_timestamp":1516026600000.0,"close_timestamp":1516029000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.595465140919686,"open_date":"2018-01-15 18:10:00+00:00","close_date":"2018-01-15 19:25:00+00:00","open_rate":9.438e-05,"close_rate":9.485308270676693e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":75,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":8.4942e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.4942e-05,"stop_loss_ratio":0.1,"min_rate":9.438e-05,"max_rate":9.485308270676693e-05,"is_open":false,"open_timestamp":1516039800000.0,"close_timestamp":1516044300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.032894726021471705,"open_date":"2018-01-15 18:35:00+00:00","close_date":"2018-01-15 19:15:00+00:00","open_rate":0.03040001,"close_rate":0.030552391002506264,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.027360009,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.027360009,"stop_loss_ratio":0.1,"min_rate":0.03040001,"max_rate":0.030552391002506264,"is_open":false,"open_timestamp":1516041300000.0,"close_timestamp":1516043700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.13208840157615,"open_date":"2018-01-15 20:25:00+00:00","close_date":"2018-01-16 08:25:00+00:00","open_rate":5.837e-05,"close_rate":5.2533e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":720,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000005,"exit_reason":"stop_loss","initial_stop_loss_abs":5.2533e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.2533e-05,"stop_loss_ratio":0.1,"min_rate":5.2533e-05,"max_rate":5.837e-05,"is_open":false,"open_timestamp":1516047900000.0,"close_timestamp":1516091100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.021722130506560085,"open_date":"2018-01-15 20:40:00+00:00","close_date":"2018-01-15 22:00:00+00:00","open_rate":0.046036,"close_rate":0.04626675689223057,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0414324,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0414324,"stop_loss_ratio":0.1,"min_rate":0.046036,"max_rate":0.04626675689223057,"is_open":false,"open_timestamp":1516048800000.0,"close_timestamp":1516053600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.34861425832316545,"open_date":"2018-01-16 00:30:00+00:00","close_date":"2018-01-16 01:10:00+00:00","open_rate":0.0028685,"close_rate":0.0028828784461152877,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.00258165,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.00258165,"stop_loss_ratio":0.1,"min_rate":0.0028685,"max_rate":0.0028828784461152877,"is_open":false,"open_timestamp":1516062600000.0,"close_timestamp":1516065000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014854967241083492,"open_date":"2018-01-16 01:15:00+00:00","close_date":"2018-01-16 02:35:00+00:00","open_rate":0.06731755,"close_rate":0.0676549813283208,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.060585795000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060585795000000005,"stop_loss_ratio":0.1,"min_rate":0.06731755,"max_rate":0.0676549813283208,"is_open":false,"open_timestamp":1516065300000.0,"close_timestamp":1516070100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010848794492804754,"open_date":"2018-01-16 07:45:00+00:00","close_date":"2018-01-16 08:40:00+00:00","open_rate":0.09217614,"close_rate":0.09263817578947368,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.082958526,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.082958526,"stop_loss_ratio":0.1,"min_rate":0.09217614,"max_rate":0.09263817578947368,"is_open":false,"open_timestamp":1516088700000.0,"close_timestamp":1516092000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06060606060606061,"open_date":"2018-01-16 08:35:00+00:00","close_date":"2018-01-16 08:55:00+00:00","open_rate":0.0165,"close_rate":0.016913533834586467,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":0.01485,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.01485,"stop_loss_ratio":0.1,"min_rate":0.0165,"max_rate":0.016913533834586467,"is_open":false,"open_timestamp":1516091700000.0,"close_timestamp":1516092900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":12.57387149503332,"open_date":"2018-01-16 08:35:00+00:00","close_date":"2018-01-16 08:40:00+00:00","open_rate":7.953e-05,"close_rate":8.311781954887218e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":7.157700000000001e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":7.157700000000001e-05,"stop_loss_ratio":0.1,"min_rate":7.953e-05,"max_rate":8.311781954887218e-05,"is_open":false,"open_timestamp":1516091700000.0,"close_timestamp":1516092000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.022122914915269236,"open_date":"2018-01-16 08:45:00+00:00","close_date":"2018-01-16 09:50:00+00:00","open_rate":0.045202,"close_rate":0.04542857644110275,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":65,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0406818,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0406818,"stop_loss_ratio":0.1,"min_rate":0.045202,"max_rate":0.04542857644110275,"is_open":false,"open_timestamp":1516092300000.0,"close_timestamp":1516096200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.054878048780488,"open_date":"2018-01-16 09:15:00+00:00","close_date":"2018-01-16 09:45:00+00:00","open_rate":5.248e-05,"close_rate":5.326917293233082e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":4.7232e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7232e-05,"stop_loss_ratio":0.1,"min_rate":5.248e-05,"max_rate":5.326917293233082e-05,"is_open":false,"open_timestamp":1516094100000.0,"close_timestamp":1516095900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03457434486802627,"open_date":"2018-01-16 09:15:00+00:00","close_date":"2018-01-16 09:55:00+00:00","open_rate":0.02892318,"close_rate":0.02906815834586466,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.026030862,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.026030862,"stop_loss_ratio":0.1,"min_rate":0.02892318,"max_rate":0.02906815834586466,"is_open":false,"open_timestamp":1516094100000.0,"close_timestamp":1516096500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.38735944164405,"open_date":"2018-01-16 09:50:00+00:00","close_date":"2018-01-16 10:10:00+00:00","open_rate":5.158e-05,"close_rate":5.287273182957392e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":4.6422e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6422e-05,"stop_loss_ratio":0.1,"min_rate":5.158e-05,"max_rate":5.287273182957392e-05,"is_open":false,"open_timestamp":1516096200000.0,"close_timestamp":1516097400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.022948496230938985,"open_date":"2018-01-16 10:05:00+00:00","close_date":"2018-01-16 10:40:00+00:00","open_rate":0.04357584,"close_rate":0.044231115789473675,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.039218256,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.039218256,"stop_loss_ratio":0.1,"min_rate":0.04357584,"max_rate":0.044231115789473675,"is_open":false,"open_timestamp":1516097100000.0,"close_timestamp":1516099200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.035357778286929785,"open_date":"2018-01-16 10:05:00+00:00","close_date":"2018-01-16 10:35:00+00:00","open_rate":0.02828232,"close_rate":0.02870761804511278,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.025454088,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025454088,"stop_loss_ratio":0.1,"min_rate":0.02828232,"max_rate":0.02870761804511278,"is_open":false,"open_timestamp":1516097100000.0,"close_timestamp":1516098900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.64975755315181,"open_date":"2018-01-16 13:45:00+00:00","close_date":"2018-01-16 14:20:00+00:00","open_rate":5.362e-05,"close_rate":5.442631578947368e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":4.8258e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.8258e-05,"stop_loss_ratio":0.1,"min_rate":5.362e-05,"max_rate":5.442631578947368e-05,"is_open":false,"open_timestamp":1516110300000.0,"close_timestamp":1516112400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.86080724254998,"open_date":"2018-01-16 17:30:00+00:00","close_date":"2018-01-16 18:25:00+00:00","open_rate":5.302e-05,"close_rate":5.328576441102756e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.7718e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7718e-05,"stop_loss_ratio":0.1,"min_rate":5.302e-05,"max_rate":5.328576441102756e-05,"is_open":false,"open_timestamp":1516123800000.0,"close_timestamp":1516127100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010952903718828448,"open_date":"2018-01-16 18:15:00+00:00","close_date":"2018-01-16 18:45:00+00:00","open_rate":0.09129999,"close_rate":0.09267292218045112,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.082169991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.082169991,"stop_loss_ratio":0.1,"min_rate":0.09129999,"max_rate":0.09267292218045112,"is_open":false,"open_timestamp":1516126500000.0,"close_timestamp":1516128300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":26.26050420168067,"open_date":"2018-01-16 18:15:00+00:00","close_date":"2018-01-16 18:35:00+00:00","open_rate":3.808e-05,"close_rate":3.903438596491228e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":3.4272e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.4272e-05,"stop_loss_ratio":0.1,"min_rate":3.808e-05,"max_rate":3.903438596491228e-05,"is_open":false,"open_timestamp":1516126500000.0,"close_timestamp":1516127700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.035574376772493324,"open_date":"2018-01-16 19:00:00+00:00","close_date":"2018-01-16 19:30:00+00:00","open_rate":0.02811012,"close_rate":0.028532828571428567,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.025299108,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025299108,"stop_loss_ratio":0.1,"min_rate":0.02811012,"max_rate":0.028532828571428567,"is_open":false,"open_timestamp":1516129200000.0,"close_timestamp":1516131000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.387028357567759,"open_date":"2018-01-16 21:25:00+00:00","close_date":"2018-01-16 22:25:00+00:00","open_rate":0.00258379,"close_rate":0.002325411,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000005,"exit_reason":"stop_loss","initial_stop_loss_abs":0.002325411,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002325411,"stop_loss_ratio":0.1,"min_rate":0.002325411,"max_rate":0.00258379,"is_open":false,"open_timestamp":1516137900000.0,"close_timestamp":1516141500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":39.07776475185619,"open_date":"2018-01-16 21:25:00+00:00","close_date":"2018-01-16 22:45:00+00:00","open_rate":2.559e-05,"close_rate":2.3031e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000005,"exit_reason":"stop_loss","initial_stop_loss_abs":2.3031e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.3031e-05,"stop_loss_ratio":0.1,"min_rate":2.3031e-05,"max_rate":2.559e-05,"is_open":false,"open_timestamp":1516137900000.0,"close_timestamp":1516142700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":13.123359580052494,"open_date":"2018-01-16 21:35:00+00:00","close_date":"2018-01-16 22:25:00+00:00","open_rate":7.62e-05,"close_rate":6.858e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000005,"exit_reason":"stop_loss","initial_stop_loss_abs":6.858e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":6.858e-05,"stop_loss_ratio":0.1,"min_rate":6.858e-05,"max_rate":7.62e-05,"is_open":false,"open_timestamp":1516138500000.0,"close_timestamp":1516141500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06622516556291391,"open_date":"2018-01-16 22:30:00+00:00","close_date":"2018-01-16 22:40:00+00:00","open_rate":0.0151,"close_rate":0.015781203007518795,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":10,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":0.01359,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.01359,"stop_loss_ratio":0.1,"min_rate":0.0151,"max_rate":0.015781203007518795,"is_open":false,"open_timestamp":1516141800000.0,"close_timestamp":1516142400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.4350777048780912,"open_date":"2018-01-16 22:30:00+00:00","close_date":"2018-01-16 22:35:00+00:00","open_rate":0.00229844,"close_rate":0.002402129022556391,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.511278195488727e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002068596,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002068596,"stop_loss_ratio":0.1,"min_rate":0.00229844,"max_rate":0.002402129022556391,"is_open":false,"open_timestamp":1516141800000.0,"close_timestamp":1516142100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.4243113426908128,"open_date":"2018-01-16 22:40:00+00:00","close_date":"2018-01-16 22:45:00+00:00","open_rate":0.00235676,"close_rate":0.00246308,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.511278195488727e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002121084,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002121084,"stop_loss_ratio":0.1,"min_rate":0.00235676,"max_rate":0.00246308,"is_open":false,"open_timestamp":1516142400000.0,"close_timestamp":1516142700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.01585559988076589,"open_date":"2018-01-16 22:45:00+00:00","close_date":"2018-01-16 23:05:00+00:00","open_rate":0.0630692,"close_rate":0.06464988170426066,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":0.056762280000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.056762280000000005,"stop_loss_ratio":0.1,"min_rate":0.0630692,"max_rate":0.06464988170426066,"is_open":false,"open_timestamp":1516142700000.0,"close_timestamp":1516143900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":45.45454545454545,"open_date":"2018-01-16 22:50:00+00:00","close_date":"2018-01-16 22:55:00+00:00","open_rate":2.2e-05,"close_rate":2.299248120300751e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.511278195488684e-05,"exit_reason":"roi","initial_stop_loss_abs":1.98e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":1.98e-05,"stop_loss_ratio":0.1,"min_rate":2.2e-05,"max_rate":2.299248120300751e-05,"is_open":false,"open_timestamp":1516143000000.0,"close_timestamp":1516143300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":20.10454362685967,"open_date":"2018-01-17 03:30:00+00:00","close_date":"2018-01-17 04:00:00+00:00","open_rate":4.974e-05,"close_rate":5.048796992481203e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":4.4766000000000005e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4766000000000005e-05,"stop_loss_ratio":0.1,"min_rate":4.974e-05,"max_rate":5.048796992481203e-05,"is_open":false,"open_timestamp":1516159800000.0,"close_timestamp":1516161600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":14.068655036578503,"open_date":"2018-01-17 03:55:00+00:00","close_date":"2018-01-17 04:15:00+00:00","open_rate":7.108e-05,"close_rate":7.28614536340852e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":6.3972e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":6.3972e-05,"stop_loss_ratio":0.1,"min_rate":7.108e-05,"max_rate":7.28614536340852e-05,"is_open":false,"open_timestamp":1516161300000.0,"close_timestamp":1516162500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.0231107002542177,"open_date":"2018-01-17 09:35:00+00:00","close_date":"2018-01-17 10:15:00+00:00","open_rate":0.04327,"close_rate":0.04348689223057644,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.038943000000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.038943000000000005,"stop_loss_ratio":0.1,"min_rate":0.04327,"max_rate":0.04348689223057644,"is_open":false,"open_timestamp":1516181700000.0,"close_timestamp":1516184100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":20.012007204322593,"open_date":"2018-01-17 10:20:00+00:00","close_date":"2018-01-17 17:00:00+00:00","open_rate":4.997e-05,"close_rate":5.022047619047618e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":400,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":4.4973e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4973e-05,"stop_loss_ratio":0.1,"min_rate":4.997e-05,"max_rate":5.022047619047618e-05,"is_open":false,"open_timestamp":1516184400000.0,"close_timestamp":1516208400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014626687444363738,"open_date":"2018-01-17 10:30:00+00:00","close_date":"2018-01-17 11:25:00+00:00","open_rate":0.06836818,"close_rate":0.06871087764411027,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.061531362,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.061531362,"stop_loss_ratio":0.1,"min_rate":0.06836818,"max_rate":0.06871087764411027,"is_open":false,"open_timestamp":1516185000000.0,"close_timestamp":1516188300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":27.548209366391184,"open_date":"2018-01-17 10:30:00+00:00","close_date":"2018-01-17 11:10:00+00:00","open_rate":3.63e-05,"close_rate":3.648195488721804e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":3.2670000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.2670000000000004e-05,"stop_loss_ratio":0.1,"min_rate":3.63e-05,"max_rate":3.648195488721804e-05,"is_open":false,"open_timestamp":1516185000000.0,"close_timestamp":1516187400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03558718861209965,"open_date":"2018-01-17 12:30:00+00:00","close_date":"2018-01-17 22:05:00+00:00","open_rate":0.0281,"close_rate":0.02824085213032581,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":575,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.02529,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.02529,"stop_loss_ratio":0.1,"min_rate":0.0281,"max_rate":0.02824085213032581,"is_open":false,"open_timestamp":1516192200000.0,"close_timestamp":1516226700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011559355963546878,"open_date":"2018-01-17 12:35:00+00:00","close_date":"2018-01-17 16:55:00+00:00","open_rate":0.08651001,"close_rate":0.08694364413533832,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":260,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.077859009,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.077859009,"stop_loss_ratio":0.1,"min_rate":0.08651001,"max_rate":0.08694364413533832,"is_open":false,"open_timestamp":1516192500000.0,"close_timestamp":1516208100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.752529735487308,"open_date":"2018-01-18 05:00:00+00:00","close_date":"2018-01-18 05:55:00+00:00","open_rate":5.633e-05,"close_rate":5.6612355889724306e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.0697e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0697e-05,"stop_loss_ratio":0.1,"min_rate":5.633e-05,"max_rate":5.6612355889724306e-05,"is_open":false,"open_timestamp":1516251600000.0,"close_timestamp":1516254900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.01430923457900944,"open_date":"2018-01-18 05:20:00+00:00","close_date":"2018-01-18 05:55:00+00:00","open_rate":0.06988494,"close_rate":0.07093584135338346,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.06289644600000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06289644600000001,"stop_loss_ratio":0.1,"min_rate":0.06988494,"max_rate":0.07093584135338346,"is_open":false,"open_timestamp":1516252800000.0,"close_timestamp":1516254900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.034265103697024,"open_date":"2018-01-18 07:35:00+00:00","close_date":"2018-01-18 08:15:00+00:00","open_rate":5.545e-05,"close_rate":5.572794486215538e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":4.9905e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.9905e-05,"stop_loss_ratio":0.1,"min_rate":5.545e-05,"max_rate":5.572794486215538e-05,"is_open":false,"open_timestamp":1516260900000.0,"close_timestamp":1516263300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06121723118136401,"open_date":"2018-01-18 09:00:00+00:00","close_date":"2018-01-18 09:40:00+00:00","open_rate":0.01633527,"close_rate":0.016417151052631574,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.014701743,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014701743,"stop_loss_ratio":0.1,"min_rate":0.01633527,"max_rate":0.016417151052631574,"is_open":false,"open_timestamp":1516266000000.0,"close_timestamp":1516268400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3707356136045141,"open_date":"2018-01-18 16:40:00+00:00","close_date":"2018-01-18 17:20:00+00:00","open_rate":0.00269734,"close_rate":0.002710860501253133,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002427606,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002427606,"stop_loss_ratio":0.1,"min_rate":0.00269734,"max_rate":0.002710860501253133,"is_open":false,"open_timestamp":1516293600000.0,"close_timestamp":1516296000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":22.3463687150838,"open_date":"2018-01-18 18:05:00+00:00","close_date":"2018-01-18 18:30:00+00:00","open_rate":4.475e-05,"close_rate":4.587155388471177e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":4.0275e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.0275e-05,"stop_loss_ratio":0.1,"min_rate":4.475e-05,"max_rate":4.587155388471177e-05,"is_open":false,"open_timestamp":1516298700000.0,"close_timestamp":1516300200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":35.842293906810035,"open_date":"2018-01-18 18:25:00+00:00","close_date":"2018-01-18 18:55:00+00:00","open_rate":2.79e-05,"close_rate":2.8319548872180444e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":2.511e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.511e-05,"stop_loss_ratio":0.1,"min_rate":2.79e-05,"max_rate":2.8319548872180444e-05,"is_open":false,"open_timestamp":1516299900000.0,"close_timestamp":1516301700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.022525942001105578,"open_date":"2018-01-18 20:10:00+00:00","close_date":"2018-01-18 20:50:00+00:00","open_rate":0.04439326,"close_rate":0.04461578260651629,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.039953934,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.039953934,"stop_loss_ratio":0.1,"min_rate":0.04439326,"max_rate":0.04461578260651629,"is_open":false,"open_timestamp":1516306200000.0,"close_timestamp":1516308600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":22.271714922048996,"open_date":"2018-01-18 21:30:00+00:00","close_date":"2018-01-19 00:35:00+00:00","open_rate":4.49e-05,"close_rate":4.51250626566416e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":185,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.041e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.041e-05,"stop_loss_ratio":0.1,"min_rate":4.49e-05,"max_rate":4.51250626566416e-05,"is_open":false,"open_timestamp":1516311000000.0,"close_timestamp":1516322100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03502626970227671,"open_date":"2018-01-18 21:55:00+00:00","close_date":"2018-01-19 05:05:00+00:00","open_rate":0.02855,"close_rate":0.028693107769423555,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":430,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.025695,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025695,"stop_loss_ratio":0.1,"min_rate":0.02855,"max_rate":0.028693107769423555,"is_open":false,"open_timestamp":1516312500000.0,"close_timestamp":1516338300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.25327812284334,"open_date":"2018-01-18 22:10:00+00:00","close_date":"2018-01-18 22:50:00+00:00","open_rate":5.796e-05,"close_rate":5.8250526315789473e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.2164e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.2164e-05,"stop_loss_ratio":0.1,"min_rate":5.796e-05,"max_rate":5.8250526315789473e-05,"is_open":false,"open_timestamp":1516313400000.0,"close_timestamp":1516315800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02303975994413319,"open_date":"2018-01-18 23:50:00+00:00","close_date":"2018-01-19 00:30:00+00:00","open_rate":0.04340323,"close_rate":0.04362079005012531,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.039062907,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.039062907,"stop_loss_ratio":0.1,"min_rate":0.04340323,"max_rate":0.04362079005012531,"is_open":false,"open_timestamp":1516319400000.0,"close_timestamp":1516321800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02244943545282195,"open_date":"2018-01-19 16:45:00+00:00","close_date":"2018-01-19 17:35:00+00:00","open_rate":0.04454455,"close_rate":0.04476783095238095,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.040090095000000006,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.040090095000000006,"stop_loss_ratio":0.1,"min_rate":0.04454455,"max_rate":0.04476783095238095,"is_open":false,"open_timestamp":1516380300000.0,"close_timestamp":1516383300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.793594306049823,"open_date":"2018-01-19 17:15:00+00:00","close_date":"2018-01-19 19:55:00+00:00","open_rate":5.62e-05,"close_rate":5.648170426065162e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":160,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":5.058e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.058e-05,"stop_loss_ratio":0.1,"min_rate":5.62e-05,"max_rate":5.648170426065162e-05,"is_open":false,"open_timestamp":1516382100000.0,"close_timestamp":1516391700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":23.04678497349619,"open_date":"2018-01-19 17:20:00+00:00","close_date":"2018-01-19 20:15:00+00:00","open_rate":4.339e-05,"close_rate":4.360749373433584e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":175,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":3.9051e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.9051e-05,"stop_loss_ratio":0.1,"min_rate":4.339e-05,"max_rate":4.360749373433584e-05,"is_open":false,"open_timestamp":1516382400000.0,"close_timestamp":1516392900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":9.910802775024775,"open_date":"2018-01-20 04:45:00+00:00","close_date":"2018-01-20 17:35:00+00:00","open_rate":0.0001009,"close_rate":0.00010140576441102755,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":770,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":9.081e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":9.081e-05,"stop_loss_ratio":0.1,"min_rate":0.0001009,"max_rate":0.00010140576441102755,"is_open":false,"open_timestamp":1516423500000.0,"close_timestamp":1516469700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3696789338459548,"open_date":"2018-01-20 04:50:00+00:00","close_date":"2018-01-20 15:15:00+00:00","open_rate":0.00270505,"close_rate":0.002718609147869674,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":625,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002434545,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002434545,"stop_loss_ratio":0.1,"min_rate":0.00270505,"max_rate":0.002718609147869674,"is_open":false,"open_timestamp":1516423800000.0,"close_timestamp":1516461300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.033333311111125925,"open_date":"2018-01-20 04:50:00+00:00","close_date":"2018-01-20 07:00:00+00:00","open_rate":0.03000002,"close_rate":0.030150396040100245,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":130,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.027000018,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.027000018,"stop_loss_ratio":0.1,"min_rate":0.03000002,"max_rate":0.030150396040100245,"is_open":false,"open_timestamp":1516423800000.0,"close_timestamp":1516431600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.315018315018317,"open_date":"2018-01-20 09:00:00+00:00","close_date":"2018-01-20 09:40:00+00:00","open_rate":5.46e-05,"close_rate":5.4873684210526304e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.914e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.914e-05,"stop_loss_ratio":0.1,"min_rate":5.46e-05,"max_rate":5.4873684210526304e-05,"is_open":false,"open_timestamp":1516438800000.0,"close_timestamp":1516441200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03244412634781012,"open_date":"2018-01-20 18:25:00+00:00","close_date":"2018-01-25 03:50:00+00:00","open_rate":0.03082222,"close_rate":0.027739998,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":6325,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000015,"exit_reason":"stop_loss","initial_stop_loss_abs":0.027739998,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.027739998,"stop_loss_ratio":0.1,"min_rate":0.027739998,"max_rate":0.03082222,"is_open":false,"open_timestamp":1516472700000.0,"close_timestamp":1516852200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011148273260677063,"open_date":"2018-01-20 22:25:00+00:00","close_date":"2018-01-20 23:15:00+00:00","open_rate":0.08969999,"close_rate":0.09014961401002504,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.080729991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.080729991,"stop_loss_ratio":0.1,"min_rate":0.08969999,"max_rate":0.09014961401002504,"is_open":false,"open_timestamp":1516487100000.0,"close_timestamp":1516490100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06125570520324337,"open_date":"2018-01-21 02:50:00+00:00","close_date":"2018-01-21 14:30:00+00:00","open_rate":0.01632501,"close_rate":0.01640683962406015,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":700,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.014692509,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014692509,"stop_loss_ratio":0.1,"min_rate":0.01632501,"max_rate":0.01640683962406015,"is_open":false,"open_timestamp":1516503000000.0,"close_timestamp":1516545000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.01417675579120474,"open_date":"2018-01-21 10:20:00+00:00","close_date":"2018-01-21 11:00:00+00:00","open_rate":0.070538,"close_rate":0.07089157393483708,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0634842,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0634842,"stop_loss_ratio":0.1,"min_rate":0.070538,"max_rate":0.07089157393483708,"is_open":false,"open_timestamp":1516530000000.0,"close_timestamp":1516532400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.864365214110546,"open_date":"2018-01-21 15:50:00+00:00","close_date":"2018-01-21 18:45:00+00:00","open_rate":5.301e-05,"close_rate":5.327571428571427e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":175,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":4.7709e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7709e-05,"stop_loss_ratio":0.1,"min_rate":5.301e-05,"max_rate":5.327571428571427e-05,"is_open":false,"open_timestamp":1516549800000.0,"close_timestamp":1516560300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":25.284450063211125,"open_date":"2018-01-21 16:20:00+00:00","close_date":"2018-01-21 17:00:00+00:00","open_rate":3.955e-05,"close_rate":3.9748245614035085e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":3.5595e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.5595e-05,"stop_loss_ratio":0.1,"min_rate":3.955e-05,"max_rate":3.9748245614035085e-05,"is_open":false,"open_timestamp":1516551600000.0,"close_timestamp":1516554000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.38683971296493297,"open_date":"2018-01-21 21:15:00+00:00","close_date":"2018-01-21 21:45:00+00:00","open_rate":0.00258505,"close_rate":0.002623922932330827,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002326545,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002326545,"stop_loss_ratio":0.1,"min_rate":0.00258505,"max_rate":0.002623922932330827,"is_open":false,"open_timestamp":1516569300000.0,"close_timestamp":1516571100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":25.621316935690498,"open_date":"2018-01-21 21:15:00+00:00","close_date":"2018-01-21 21:55:00+00:00","open_rate":3.903e-05,"close_rate":3.922563909774435e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":3.5127e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.5127e-05,"stop_loss_ratio":0.1,"min_rate":3.903e-05,"max_rate":3.922563909774435e-05,"is_open":false,"open_timestamp":1516569300000.0,"close_timestamp":1516571700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.098548510313215,"open_date":"2018-01-22 00:35:00+00:00","close_date":"2018-01-22 10:35:00+00:00","open_rate":5.236e-05,"close_rate":5.262245614035087e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":600,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":4.7124e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7124e-05,"stop_loss_ratio":0.1,"min_rate":5.236e-05,"max_rate":5.262245614035087e-05,"is_open":false,"open_timestamp":1516581300000.0,"close_timestamp":1516617300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":11.076650420912715,"open_date":"2018-01-22 01:30:00+00:00","close_date":"2018-01-22 02:10:00+00:00","open_rate":9.028e-05,"close_rate":9.07325313283208e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":8.1252e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.1252e-05,"stop_loss_ratio":0.1,"min_rate":9.028e-05,"max_rate":9.07325313283208e-05,"is_open":false,"open_timestamp":1516584600000.0,"close_timestamp":1516587000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3721622627465575,"open_date":"2018-01-22 12:25:00+00:00","close_date":"2018-01-22 14:35:00+00:00","open_rate":0.002687,"close_rate":0.002700468671679198,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":130,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0024183000000000004,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0024183000000000004,"stop_loss_ratio":0.1,"min_rate":0.002687,"max_rate":0.002700468671679198,"is_open":false,"open_timestamp":1516623900000.0,"close_timestamp":1516631700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":23.99232245681382,"open_date":"2018-01-22 13:15:00+00:00","close_date":"2018-01-22 13:55:00+00:00","open_rate":4.168e-05,"close_rate":4.188892230576441e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":3.7512e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.7512e-05,"stop_loss_ratio":0.1,"min_rate":4.168e-05,"max_rate":4.188892230576441e-05,"is_open":false,"open_timestamp":1516626900000.0,"close_timestamp":1516629300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":11.336583153837434,"open_date":"2018-01-22 14:00:00+00:00","close_date":"2018-01-22 14:30:00+00:00","open_rate":8.821e-05,"close_rate":8.953646616541353e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":7.9389e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":7.9389e-05,"stop_loss_ratio":0.1,"min_rate":8.821e-05,"max_rate":8.953646616541353e-05,"is_open":false,"open_timestamp":1516629600000.0,"close_timestamp":1516631400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.334880123743233,"open_date":"2018-01-22 15:55:00+00:00","close_date":"2018-01-22 16:40:00+00:00","open_rate":5.172e-05,"close_rate":5.1979248120300745e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6548e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6548e-05,"stop_loss_ratio":0.1,"min_rate":5.172e-05,"max_rate":5.1979248120300745e-05,"is_open":false,"open_timestamp":1516636500000.0,"close_timestamp":1516639200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":33.04692663582287,"open_date":"2018-01-22 16:05:00+00:00","close_date":"2018-01-22 16:25:00+00:00","open_rate":3.026e-05,"close_rate":3.101839598997494e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":2.7234e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7234e-05,"stop_loss_ratio":0.1,"min_rate":3.026e-05,"max_rate":3.101839598997494e-05,"is_open":false,"open_timestamp":1516637100000.0,"close_timestamp":1516638300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014156285390713478,"open_date":"2018-01-22 19:50:00+00:00","close_date":"2018-01-23 00:10:00+00:00","open_rate":0.07064,"close_rate":0.07099408521303258,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":260,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.063576,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.063576,"stop_loss_ratio":0.1,"min_rate":0.07064,"max_rate":0.07099408521303258,"is_open":false,"open_timestamp":1516650600000.0,"close_timestamp":1516666200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.06080938507725528,"open_date":"2018-01-22 21:25:00+00:00","close_date":"2018-01-22 22:05:00+00:00","open_rate":0.01644483,"close_rate":0.01652726022556391,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.014800347,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014800347,"stop_loss_ratio":0.1,"min_rate":0.01644483,"max_rate":0.01652726022556391,"is_open":false,"open_timestamp":1516656300000.0,"close_timestamp":1516658700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":23.08935580697299,"open_date":"2018-01-23 00:05:00+00:00","close_date":"2018-01-23 00:35:00+00:00","open_rate":4.331e-05,"close_rate":4.3961278195488714e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":3.8979e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.8979e-05,"stop_loss_ratio":0.1,"min_rate":4.331e-05,"max_rate":4.3961278195488714e-05,"is_open":false,"open_timestamp":1516665900000.0,"close_timestamp":1516667700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":31.250000000000004,"open_date":"2018-01-23 01:50:00+00:00","close_date":"2018-01-23 02:15:00+00:00","open_rate":3.2e-05,"close_rate":3.2802005012531326e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":2.88e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.88e-05,"stop_loss_ratio":0.1,"min_rate":3.2e-05,"max_rate":3.2802005012531326e-05,"is_open":false,"open_timestamp":1516672200000.0,"close_timestamp":1516673700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010907854156754156,"open_date":"2018-01-23 04:25:00+00:00","close_date":"2018-01-23 05:15:00+00:00","open_rate":0.09167706,"close_rate":0.09213659413533835,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.08250935400000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08250935400000001,"stop_loss_ratio":0.1,"min_rate":0.09167706,"max_rate":0.09213659413533835,"is_open":false,"open_timestamp":1516681500000.0,"close_timestamp":1516684500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014440474918339117,"open_date":"2018-01-23 07:35:00+00:00","close_date":"2018-01-23 09:00:00+00:00","open_rate":0.0692498,"close_rate":0.06959691679197995,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":85,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.06232482,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06232482,"stop_loss_ratio":0.1,"min_rate":0.0692498,"max_rate":0.06959691679197995,"is_open":false,"open_timestamp":1516692900000.0,"close_timestamp":1516698000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":31.426775612822127,"open_date":"2018-01-23 10:50:00+00:00","close_date":"2018-01-23 13:05:00+00:00","open_rate":3.182e-05,"close_rate":3.197949874686716e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":135,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":2.8638e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.8638e-05,"stop_loss_ratio":0.1,"min_rate":3.182e-05,"max_rate":3.197949874686716e-05,"is_open":false,"open_timestamp":1516704600000.0,"close_timestamp":1516712700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024461839530332683,"open_date":"2018-01-23 11:05:00+00:00","close_date":"2018-01-23 16:05:00+00:00","open_rate":0.04088,"close_rate":0.04108491228070175,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":300,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.036792,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036792,"stop_loss_ratio":0.1,"min_rate":0.04088,"max_rate":0.04108491228070175,"is_open":false,"open_timestamp":1516705500000.0,"close_timestamp":1516723500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.417475728155345,"open_date":"2018-01-23 14:55:00+00:00","close_date":"2018-01-23 15:35:00+00:00","open_rate":5.15e-05,"close_rate":5.175814536340851e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.635e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.635e-05,"stop_loss_ratio":0.1,"min_rate":5.15e-05,"max_rate":5.175814536340851e-05,"is_open":false,"open_timestamp":1516719300000.0,"close_timestamp":1516721700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.011023294646713328,"open_date":"2018-01-23 16:35:00+00:00","close_date":"2018-01-24 00:05:00+00:00","open_rate":0.09071698,"close_rate":0.09117170170426064,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":450,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.081645282,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.081645282,"stop_loss_ratio":0.1,"min_rate":0.09071698,"max_rate":0.09117170170426064,"is_open":false,"open_timestamp":1516725300000.0,"close_timestamp":1516752300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":31.969309462915604,"open_date":"2018-01-23 17:25:00+00:00","close_date":"2018-01-23 18:45:00+00:00","open_rate":3.128e-05,"close_rate":3.1436791979949865e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":2.8152e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.8152e-05,"stop_loss_ratio":0.1,"min_rate":3.128e-05,"max_rate":3.1436791979949865e-05,"is_open":false,"open_timestamp":1516728300000.0,"close_timestamp":1516733100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.465724751439035,"open_date":"2018-01-23 20:15:00+00:00","close_date":"2018-01-23 22:00:00+00:00","open_rate":9.555e-05,"close_rate":9.602894736842104e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":105,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":8.5995e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.5995e-05,"stop_loss_ratio":0.1,"min_rate":9.555e-05,"max_rate":9.602894736842104e-05,"is_open":false,"open_timestamp":1516738500000.0,"close_timestamp":1516744800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02450979791426522,"open_date":"2018-01-23 22:30:00+00:00","close_date":"2018-01-23 23:10:00+00:00","open_rate":0.04080001,"close_rate":0.0410045213283208,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.036720009,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036720009,"stop_loss_ratio":0.1,"min_rate":0.04080001,"max_rate":0.0410045213283208,"is_open":false,"open_timestamp":1516746600000.0,"close_timestamp":1516749000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.36858415649816,"open_date":"2018-01-23 23:50:00+00:00","close_date":"2018-01-24 03:35:00+00:00","open_rate":5.163e-05,"close_rate":5.18887969924812e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":225,"profit_ratio":-0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6467e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6467e-05,"stop_loss_ratio":0.1,"min_rate":5.163e-05,"max_rate":5.18887969924812e-05,"is_open":false,"open_timestamp":1516751400000.0,"close_timestamp":1516764900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024747691102289384,"open_date":"2018-01-24 00:20:00+00:00","close_date":"2018-01-24 01:50:00+00:00","open_rate":0.04040781,"close_rate":0.04061035541353383,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":90,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.036367029,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036367029,"stop_loss_ratio":0.1,"min_rate":0.04040781,"max_rate":0.04061035541353383,"is_open":false,"open_timestamp":1516753200000.0,"close_timestamp":1516758600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.485580670303975,"open_date":"2018-01-24 06:45:00+00:00","close_date":"2018-01-24 07:25:00+00:00","open_rate":5.132e-05,"close_rate":5.157724310776942e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6188000000000006e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6188000000000006e-05,"stop_loss_ratio":0.1,"min_rate":5.132e-05,"max_rate":5.157724310776942e-05,"is_open":false,"open_timestamp":1516776300000.0,"close_timestamp":1516778700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":19.23816852635629,"open_date":"2018-01-24 14:15:00+00:00","close_date":"2018-01-24 14:25:00+00:00","open_rate":5.198e-05,"close_rate":5.432496240601503e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":10,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":4.6782e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6782e-05,"stop_loss_ratio":0.1,"min_rate":5.198e-05,"max_rate":5.432496240601503e-05,"is_open":false,"open_timestamp":1516803300000.0,"close_timestamp":1516803900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":32.74394237066143,"open_date":"2018-01-24 14:50:00+00:00","close_date":"2018-01-24 16:35:00+00:00","open_rate":3.054e-05,"close_rate":3.069308270676692e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":105,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":2.7486000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7486000000000004e-05,"stop_loss_ratio":0.1,"min_rate":3.054e-05,"max_rate":3.069308270676692e-05,"is_open":false,"open_timestamp":1516805400000.0,"close_timestamp":1516811700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":10.795638562020944,"open_date":"2018-01-24 15:10:00+00:00","close_date":"2018-01-24 16:15:00+00:00","open_rate":9.263e-05,"close_rate":9.309431077694236e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":65,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":8.3367e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.3367e-05,"stop_loss_ratio":0.1,"min_rate":9.263e-05,"max_rate":9.309431077694236e-05,"is_open":false,"open_timestamp":1516806600000.0,"close_timestamp":1516810500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":18.13565469713457,"open_date":"2018-01-24 22:40:00+00:00","close_date":"2018-01-24 23:25:00+00:00","open_rate":5.514e-05,"close_rate":5.54163909774436e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.962599999999999e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.962599999999999e-05,"stop_loss_ratio":0.1,"min_rate":5.514e-05,"max_rate":5.54163909774436e-05,"is_open":false,"open_timestamp":1516833600000.0,"close_timestamp":1516836300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":20.3210729526519,"open_date":"2018-01-25 00:50:00+00:00","close_date":"2018-01-25 01:30:00+00:00","open_rate":4.921e-05,"close_rate":4.9456666666666664e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.4289e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4289e-05,"stop_loss_ratio":0.1,"min_rate":4.921e-05,"max_rate":4.9456666666666664e-05,"is_open":false,"open_timestamp":1516841400000.0,"close_timestamp":1516843800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.38461538461538464,"open_date":"2018-01-25 08:15:00+00:00","close_date":"2018-01-25 12:15:00+00:00","open_rate":0.0026,"close_rate":0.002613032581453634,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":240,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.00234,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.00234,"stop_loss_ratio":0.1,"min_rate":0.0026,"max_rate":0.002613032581453634,"is_open":false,"open_timestamp":1516868100000.0,"close_timestamp":1516882500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03571593119825878,"open_date":"2018-01-25 10:25:00+00:00","close_date":"2018-01-25 16:15:00+00:00","open_rate":0.02799871,"close_rate":0.028139054411027563,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":350,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.025198839,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025198839,"stop_loss_ratio":0.1,"min_rate":0.02799871,"max_rate":0.028139054411027563,"is_open":false,"open_timestamp":1516875900000.0,"close_timestamp":1516896900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024516401717913305,"open_date":"2018-01-25 11:00:00+00:00","close_date":"2018-01-25 11:45:00+00:00","open_rate":0.04078902,"close_rate":0.0409934762406015,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.036710118,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036710118,"stop_loss_ratio":0.1,"min_rate":0.04078902,"max_rate":0.0409934762406015,"is_open":false,"open_timestamp":1516878000000.0,"close_timestamp":1516880700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"amount":34.602076124567475,"open_date":"2018-01-25 13:05:00+00:00","close_date":"2018-01-25 13:45:00+00:00","open_rate":2.89e-05,"close_rate":2.904486215538847e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":2.601e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.601e-05,"stop_loss_ratio":0.1,"min_rate":2.89e-05,"max_rate":2.904486215538847e-05,"is_open":false,"open_timestamp":1516885500000.0,"close_timestamp":1516887900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02432912439481303,"open_date":"2018-01-25 13:20:00+00:00","close_date":"2018-01-25 14:05:00+00:00","open_rate":0.041103,"close_rate":0.04130903007518797,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0369927,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0369927,"stop_loss_ratio":0.1,"min_rate":0.041103,"max_rate":0.04130903007518797,"is_open":false,"open_timestamp":1516886400000.0,"close_timestamp":1516889100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":18.42299189388357,"open_date":"2018-01-25 15:45:00+00:00","close_date":"2018-01-25 16:15:00+00:00","open_rate":5.428e-05,"close_rate":5.509624060150376e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":4.8852000000000006e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.8852000000000006e-05,"stop_loss_ratio":0.1,"min_rate":5.428e-05,"max_rate":5.509624060150376e-05,"is_open":false,"open_timestamp":1516895100000.0,"close_timestamp":1516896900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":18.47063169560399,"open_date":"2018-01-25 17:45:00+00:00","close_date":"2018-01-25 23:15:00+00:00","open_rate":5.414e-05,"close_rate":5.441137844611528e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":330,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.8726e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.8726e-05,"stop_loss_ratio":0.1,"min_rate":5.414e-05,"max_rate":5.441137844611528e-05,"is_open":false,"open_timestamp":1516902300000.0,"close_timestamp":1516922100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02415005686130888,"open_date":"2018-01-25 21:15:00+00:00","close_date":"2018-01-25 21:55:00+00:00","open_rate":0.04140777,"close_rate":0.0416153277443609,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.037266993000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.037266993000000005,"stop_loss_ratio":0.1,"min_rate":0.04140777,"max_rate":0.0416153277443609,"is_open":false,"open_timestamp":1516914900000.0,"close_timestamp":1516917300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3932224183965176,"open_date":"2018-01-26 02:05:00+00:00","close_date":"2018-01-26 02:45:00+00:00","open_rate":0.00254309,"close_rate":0.002555837318295739,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002288781,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002288781,"stop_loss_ratio":0.1,"min_rate":0.00254309,"max_rate":0.002555837318295739,"is_open":false,"open_timestamp":1516932300000.0,"close_timestamp":1516934700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.834849295523455,"open_date":"2018-01-26 02:55:00+00:00","close_date":"2018-01-26 15:10:00+00:00","open_rate":5.607e-05,"close_rate":5.6351052631578935e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":735,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.0463e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0463e-05,"stop_loss_ratio":0.1,"min_rate":5.607e-05,"max_rate":5.6351052631578935e-05,"is_open":false,"open_timestamp":1516935300000.0,"close_timestamp":1516979400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.39400171784748983,"open_date":"2018-01-26 06:10:00+00:00","close_date":"2018-01-26 09:25:00+00:00","open_rate":0.00253806,"close_rate":0.0025507821052631577,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":195,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002284254,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002284254,"stop_loss_ratio":0.1,"min_rate":0.00253806,"max_rate":0.0025507821052631577,"is_open":false,"open_timestamp":1516947000000.0,"close_timestamp":1516958700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.024096385542168672,"open_date":"2018-01-26 07:25:00+00:00","close_date":"2018-01-26 09:55:00+00:00","open_rate":0.0415,"close_rate":0.04170802005012531,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":150,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.03735,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.03735,"stop_loss_ratio":0.1,"min_rate":0.0415,"max_rate":0.04170802005012531,"is_open":false,"open_timestamp":1516951500000.0,"close_timestamp":1516960500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":18.793459875963165,"open_date":"2018-01-26 09:55:00+00:00","close_date":"2018-01-26 10:25:00+00:00","open_rate":5.321e-05,"close_rate":5.401015037593984e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":4.7889e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7889e-05,"stop_loss_ratio":0.1,"min_rate":5.321e-05,"max_rate":5.401015037593984e-05,"is_open":false,"open_timestamp":1516960500000.0,"close_timestamp":1516962300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.036074437437185386,"open_date":"2018-01-26 16:05:00+00:00","close_date":"2018-01-26 16:45:00+00:00","open_rate":0.02772046,"close_rate":0.02785940967418546,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.024948414,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.024948414,"stop_loss_ratio":0.1,"min_rate":0.02772046,"max_rate":0.02785940967418546,"is_open":false,"open_timestamp":1516982700000.0,"close_timestamp":1516985100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010569326272036914,"open_date":"2018-01-26 23:35:00+00:00","close_date":"2018-01-27 00:15:00+00:00","open_rate":0.09461341,"close_rate":0.09508766268170424,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.085152069,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.085152069,"stop_loss_ratio":0.1,"min_rate":0.09461341,"max_rate":0.09508766268170424,"is_open":false,"open_timestamp":1517009700000.0,"close_timestamp":1517012100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":17.809439002671414,"open_date":"2018-01-27 00:35:00+00:00","close_date":"2018-01-27 01:30:00+00:00","open_rate":5.615e-05,"close_rate":5.643145363408521e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":5.0535e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0535e-05,"stop_loss_ratio":0.1,"min_rate":5.615e-05,"max_rate":5.643145363408521e-05,"is_open":false,"open_timestamp":1517013300000.0,"close_timestamp":1517016600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"amount":17.998560115190784,"open_date":"2018-01-27 00:45:00+00:00","close_date":"2018-01-30 04:45:00+00:00","open_rate":5.556e-05,"close_rate":5.144e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":4560,"profit_ratio":-0.07877175,"profit_abs":-7.415406767458598e-05,"exit_reason":"force_exit","initial_stop_loss_abs":5.0004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0004e-05,"stop_loss_ratio":0.1,"min_rate":5.144e-05,"max_rate":5.556e-05,"is_open":false,"open_timestamp":1517013900000.0,"close_timestamp":1517287500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014492751522789634,"open_date":"2018-01-27 02:30:00+00:00","close_date":"2018-01-27 11:25:00+00:00","open_rate":0.06900001,"close_rate":0.06934587471177944,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":535,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.062100009000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.062100009000000005,"stop_loss_ratio":0.1,"min_rate":0.06900001,"max_rate":0.06934587471177944,"is_open":false,"open_timestamp":1517020200000.0,"close_timestamp":1517052300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010582027378879436,"open_date":"2018-01-27 06:25:00+00:00","close_date":"2018-01-27 07:05:00+00:00","open_rate":0.09449985,"close_rate":0.0949735334586466,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.085049865,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.085049865,"stop_loss_ratio":0.1,"min_rate":0.09449985,"max_rate":0.0949735334586466,"is_open":false,"open_timestamp":1517034300000.0,"close_timestamp":1517036700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"amount":0.02434885085598385,"open_date":"2018-01-27 09:40:00+00:00","close_date":"2018-01-30 04:40:00+00:00","open_rate":0.0410697,"close_rate":0.03928809,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":4020,"profit_ratio":-0.04815133,"profit_abs":-4.338015617352949e-05,"exit_reason":"force_exit","initial_stop_loss_abs":0.03696273,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.03696273,"stop_loss_ratio":0.1,"min_rate":0.03928809,"max_rate":0.0410697,"is_open":false,"open_timestamp":1517046000000.0,"close_timestamp":1517287200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.03508771929824561,"open_date":"2018-01-27 11:45:00+00:00","close_date":"2018-01-27 12:30:00+00:00","open_rate":0.0285,"close_rate":0.02864285714285714,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.025650000000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025650000000000003,"stop_loss_ratio":0.1,"min_rate":0.0285,"max_rate":0.02864285714285714,"is_open":false,"open_timestamp":1517053500000.0,"close_timestamp":1517056200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"amount":0.034887307020861215,"open_date":"2018-01-27 12:35:00+00:00","close_date":"2018-01-27 15:25:00+00:00","open_rate":0.02866372,"close_rate":0.02880739779448621,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":170,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.025797348,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025797348,"stop_loss_ratio":0.1,"min_rate":0.02866372,"max_rate":0.02880739779448621,"is_open":false,"open_timestamp":1517056500000.0,"close_timestamp":1517066700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"amount":0.010484268355332824,"open_date":"2018-01-27 15:50:00+00:00","close_date":"2018-01-27 16:50:00+00:00","open_rate":0.095381,"close_rate":0.09585910025062656,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0858429,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0858429,"stop_loss_ratio":0.1,"min_rate":0.095381,"max_rate":0.09585910025062656,"is_open":false,"open_timestamp":1517068200000.0,"close_timestamp":1517071800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014794886650455415,"open_date":"2018-01-27 17:05:00+00:00","close_date":"2018-01-27 17:45:00+00:00","open_rate":0.06759092,"close_rate":0.06792972160401002,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.060831828,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060831828,"stop_loss_ratio":0.1,"min_rate":0.06759092,"max_rate":0.06792972160401002,"is_open":false,"open_timestamp":1517072700000.0,"close_timestamp":1517075100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.38684569885609726,"open_date":"2018-01-27 23:40:00+00:00","close_date":"2018-01-28 01:05:00+00:00","open_rate":0.00258501,"close_rate":0.002597967443609022,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":85,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002326509,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002326509,"stop_loss_ratio":0.1,"min_rate":0.00258501,"max_rate":0.002597967443609022,"is_open":false,"open_timestamp":1517096400000.0,"close_timestamp":1517101500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014928710926711672,"open_date":"2018-01-28 02:25:00+00:00","close_date":"2018-01-28 08:10:00+00:00","open_rate":0.06698502,"close_rate":0.0673207845112782,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":345,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.060286518,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060286518,"stop_loss_ratio":0.1,"min_rate":0.06698502,"max_rate":0.0673207845112782,"is_open":false,"open_timestamp":1517106300000.0,"close_timestamp":1517127000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014767187899175548,"open_date":"2018-01-28 10:25:00+00:00","close_date":"2018-01-28 16:30:00+00:00","open_rate":0.0677177,"close_rate":0.06805713709273183,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":365,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.06094593000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06094593000000001,"stop_loss_ratio":0.1,"min_rate":0.0677177,"max_rate":0.06805713709273183,"is_open":false,"open_timestamp":1517135100000.0,"close_timestamp":1517157000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"amount":19.175455417066157,"open_date":"2018-01-28 20:35:00+00:00","close_date":"2018-01-28 21:35:00+00:00","open_rate":5.215e-05,"close_rate":5.2411403508771925e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6935e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6935e-05,"stop_loss_ratio":0.1,"min_rate":5.215e-05,"max_rate":5.2411403508771925e-05,"is_open":false,"open_timestamp":1517171700000.0,"close_timestamp":1517175300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.36521808998243305,"open_date":"2018-01-28 22:00:00+00:00","close_date":"2018-01-28 22:30:00+00:00","open_rate":0.00273809,"close_rate":0.002779264285714285,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002464281,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002464281,"stop_loss_ratio":0.1,"min_rate":0.00273809,"max_rate":0.002779264285714285,"is_open":false,"open_timestamp":1517176800000.0,"close_timestamp":1517178600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"amount":0.3641236272539253,"open_date":"2018-01-29 00:00:00+00:00","close_date":"2018-01-29 00:30:00+00:00","open_rate":0.00274632,"close_rate":0.002787618045112782,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002471688,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002471688,"stop_loss_ratio":0.1,"min_rate":0.00274632,"max_rate":0.002787618045112782,"is_open":false,"open_timestamp":1517184000000.0,"close_timestamp":1517185800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"amount":0.061634117689115045,"open_date":"2018-01-29 02:15:00+00:00","close_date":"2018-01-29 03:00:00+00:00","open_rate":0.01622478,"close_rate":0.016306107218045113,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.014602302,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014602302,"stop_loss_ratio":0.1,"min_rate":0.01622478,"max_rate":0.016306107218045113,"is_open":false,"open_timestamp":1517192100000.0,"close_timestamp":1517194800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014492753623188404,"open_date":"2018-01-29 03:05:00+00:00","close_date":"2018-01-29 03:45:00+00:00","open_rate":0.069,"close_rate":0.06934586466165413,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.06210000000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06210000000000001,"stop_loss_ratio":0.1,"min_rate":0.069,"max_rate":0.06934586466165413,"is_open":false,"open_timestamp":1517195100000.0,"close_timestamp":1517197500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":11.42204454597373,"open_date":"2018-01-29 05:20:00+00:00","close_date":"2018-01-29 06:55:00+00:00","open_rate":8.755e-05,"close_rate":8.798884711779448e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":95,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":7.879500000000001e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":7.879500000000001e-05,"stop_loss_ratio":0.1,"min_rate":8.755e-05,"max_rate":8.798884711779448e-05,"is_open":false,"open_timestamp":1517203200000.0,"close_timestamp":1517208900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014650376815016871,"open_date":"2018-01-29 07:00:00+00:00","close_date":"2018-01-29 19:25:00+00:00","open_rate":0.06825763,"close_rate":0.06859977350877192,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":745,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.061431867,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.061431867,"stop_loss_ratio":0.1,"min_rate":0.06825763,"max_rate":0.06859977350877192,"is_open":false,"open_timestamp":1517209200000.0,"close_timestamp":1517253900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"amount":0.014894490408841846,"open_date":"2018-01-29 19:45:00+00:00","close_date":"2018-01-29 20:25:00+00:00","open_rate":0.06713892,"close_rate":0.06747545593984962,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.060425028000000006,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060425028000000006,"stop_loss_ratio":0.1,"min_rate":0.06713892,"max_rate":0.06747545593984962,"is_open":false,"open_timestamp":1517255100000.0,"close_timestamp":1517257500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"amount":11.193194537721066,"open_date":"2018-01-29 23:30:00+00:00","close_date":"2018-01-30 04:45:00+00:00","open_rate":8.934e-05,"close_rate":8.8e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":315,"profit_ratio":-0.0199116,"profit_abs":-1.4998880680546292e-05,"exit_reason":"force_exit","initial_stop_loss_abs":8.0406e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.0406e-05,"stop_loss_ratio":0.1,"min_rate":8.8e-05,"max_rate":8.934e-05,"is_open":false,"open_timestamp":1517268600000.0,"close_timestamp":1517287500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null}],"locks":[],"best_pair":{"key":"LTC/BTC","trades":8,"profit_mean":0.00748129625,"profit_mean_pct":0.748129625,"profit_sum":0.05985037,"profit_sum_pct":5.99,"profit_total_abs":0.00010025062656641558,"profit_total":0.010025062656641558,"profit_total_pct":1.0,"duration_avg":"1:59:00","wins":8,"draws":0,"losses":0},"worst_pair":{"key":"XMR/BTC","trades":16,"profit_mean":-0.0027899012500000007,"profit_mean_pct":-0.2789901250000001,"profit_sum":-0.04463842000000001,"profit_sum_pct":-4.46,"profit_total_abs":3.533834586465928e-05,"profit_total":0.003533834586465928,"profit_total_pct":0.35,"duration_avg":"8:41:00","wins":15,"draws":0,"losses":1},"results_per_pair":[{"key":"XLM/BTC","trades":21,"profit_mean":0.0026243899999999994,"profit_mean_pct":0.2624389999999999,"profit_sum":0.05511218999999999,"profit_sum_pct":5.51,"profit_total_abs":0.00016065162907268006,"profit_total":0.016065162907268005,"profit_total_pct":1.61,"duration_avg":"3:21:00","wins":20,"draws":0,"losses":1},{"key":"ETC/BTC","trades":20,"profit_mean":0.0022568569999999997,"profit_mean_pct":0.22568569999999996,"profit_sum":0.04513713999999999,"profit_sum_pct":4.51,"profit_total_abs":0.00014561403508771753,"profit_total":0.014561403508771753,"profit_total_pct":1.46,"duration_avg":"1:45:00","wins":19,"draws":0,"losses":1},{"key":"ETH/BTC","trades":21,"profit_mean":0.0009500057142857142,"profit_mean_pct":0.09500057142857142,"profit_sum":0.01995012,"profit_sum_pct":2.0,"profit_total_abs":0.00012531328320801774,"profit_total":0.012531328320801774,"profit_total_pct":1.25,"duration_avg":"2:17:00","wins":21,"draws":0,"losses":0},{"key":"ADA/BTC","trades":29,"profit_mean":-0.0011598141379310352,"profit_mean_pct":-0.11598141379310352,"profit_sum":-0.03363461000000002,"profit_sum_pct":-3.36,"profit_total_abs":0.00011156021803969656,"profit_total":0.011156021803969657,"profit_total_pct":1.12,"duration_avg":"5:35:00","wins":27,"draws":0,"losses":2},{"key":"TRX/BTC","trades":15,"profit_mean":0.0023467073333333323,"profit_mean_pct":0.23467073333333321,"profit_sum":0.035200609999999986,"profit_sum_pct":3.52,"profit_total_abs":0.00011056502909388873,"profit_total":0.011056502909388873,"profit_total_pct":1.11,"duration_avg":"2:28:00","wins":13,"draws":0,"losses":2},{"key":"DASH/BTC","trades":16,"profit_mean":0.0018703237499999997,"profit_mean_pct":0.18703237499999997,"profit_sum":0.029925179999999996,"profit_sum_pct":2.99,"profit_total_abs":0.0001102756892230564,"profit_total":0.01102756892230564,"profit_total_pct":1.1,"duration_avg":"3:03:00","wins":16,"draws":0,"losses":0},{"key":"LTC/BTC","trades":8,"profit_mean":0.00748129625,"profit_mean_pct":0.748129625,"profit_sum":0.05985037,"profit_sum_pct":5.99,"profit_total_abs":0.00010025062656641558,"profit_total":0.010025062656641558,"profit_total_pct":1.0,"duration_avg":"1:59:00","wins":8,"draws":0,"losses":0},{"key":"ZEC/BTC","trades":21,"profit_mean":-0.00039290904761904774,"profit_mean_pct":-0.03929090476190478,"profit_sum":-0.008251090000000003,"profit_sum_pct":-0.83,"profit_total_abs":9.697072101945111e-05,"profit_total":0.009697072101945111,"profit_total_pct":0.97,"duration_avg":"4:17:00","wins":20,"draws":0,"losses":1},{"key":"NXT/BTC","trades":12,"profit_mean":-0.0012261025000000006,"profit_mean_pct":-0.12261025000000006,"profit_sum":-0.014713230000000008,"profit_sum_pct":-1.47,"profit_total_abs":4.536340852130151e-05,"profit_total":0.004536340852130151,"profit_total_pct":0.45,"duration_avg":"0:57:00","wins":11,"draws":0,"losses":1},{"key":"XMR/BTC","trades":16,"profit_mean":-0.0027899012500000007,"profit_mean_pct":-0.2789901250000001,"profit_sum":-0.04463842000000001,"profit_sum_pct":-4.46,"profit_total_abs":3.533834586465928e-05,"profit_total":0.003533834586465928,"profit_total_pct":0.35,"duration_avg":"8:41:00","wins":15,"draws":0,"losses":1},{"key":"TOTAL","trades":179,"profit_mean":0.0008041243575418989,"profit_mean_pct":0.0804124357541899,"profit_sum":0.1439382599999999,"profit_sum_pct":14.39,"profit_total_abs":0.0010419029856968845,"profit_total":0.10419029856968845,"profit_total_pct":10.42,"duration_avg":"3:40:00","wins":170,"draws":0,"losses":9}],"results_per_enter_tag":[{"key":"buy_tag","trades":1,"profit_mean":0.03990025,"profit_mean_pct":3.9900249999999997,"profit_sum":0.03990025,"profit_sum_pct":3.99,"profit_total_abs":4.5112781954887056e-05,"profit_total":0.004511278195488706,"profit_total_pct":0.45,"duration_avg":"0:15:00","wins":1,"draws":0,"losses":0},{"key":"TOTAL","trades":179,"profit_mean":0.0008041243575418989,"profit_mean_pct":0.0804124357541899,"profit_sum":0.1439382599999999,"profit_sum_pct":14.39,"profit_total_abs":0.0010419029856968845,"profit_total":0.10419029856968845,"profit_total_pct":10.42,"duration_avg":"3:40:00","wins":170,"draws":0,"losses":9}],"exit_reason_summary":[{"exit_reason":"roi","trades":170,"wins":170,"draws":0,"losses":0,"profit_mean":0.005398268352941177,"profit_mean_pct":0.54,"profit_sum":0.91770562,"profit_sum_pct":91.77,"profit_total_abs":0.0017744360902255465,"profit_total":0.30590187333333335,"profit_total_pct":30.59},{"exit_reason":"stop_loss","trades":6,"wins":0,"draws":0,"losses":6,"profit_mean":-0.10448878000000002,"profit_mean_pct":-10.45,"profit_sum":-0.6269326800000001,"profit_sum_pct":-62.69,"profit_total_abs":-0.0006000000000000003,"profit_total":-0.20897756000000003,"profit_total_pct":-20.9},{"exit_reason":"force_exit","trades":3,"wins":0,"draws":0,"losses":3,"profit_mean":-0.04894489333333333,"profit_mean_pct":-4.89,"profit_sum":-0.14683468,"profit_sum_pct":-14.68,"profit_total_abs":-0.00013253310452866177,"profit_total":-0.04894489333333333,"profit_total_pct":-4.89}],"left_open_trades":[{"key":"TRX/BTC","trades":1,"profit_mean":-0.0199116,"profit_mean_pct":-1.9911600000000003,"profit_sum":-0.0199116,"profit_sum_pct":-1.99,"profit_total_abs":-1.4998880680546292e-05,"profit_total":-0.0014998880680546292,"profit_total_pct":-0.15,"duration_avg":"5:15:00","wins":0,"draws":0,"losses":1},{"key":"ZEC/BTC","trades":1,"profit_mean":-0.04815133,"profit_mean_pct":-4.815133,"profit_sum":-0.04815133,"profit_sum_pct":-4.82,"profit_total_abs":-4.338015617352949e-05,"profit_total":-0.004338015617352949,"profit_total_pct":-0.43,"duration_avg":"2 days, 19:00:00","wins":0,"draws":0,"losses":1},{"key":"ADA/BTC","trades":1,"profit_mean":-0.07877175,"profit_mean_pct":-7.877175,"profit_sum":-0.07877175,"profit_sum_pct":-7.88,"profit_total_abs":-7.415406767458598e-05,"profit_total":-0.007415406767458598,"profit_total_pct":-0.74,"duration_avg":"3 days, 4:00:00","wins":0,"draws":0,"losses":1},{"key":"TOTAL","trades":3,"profit_mean":-0.04894489333333333,"profit_mean_pct":-4.894489333333333,"profit_sum":-0.14683468,"profit_sum_pct":-14.68,"profit_total_abs":-0.00013253310452866177,"profit_total":-0.013253310452866176,"profit_total_pct":-1.33,"duration_avg":"2 days, 1:25:00","wins":0,"draws":0,"losses":3}],"total_trades":179,"trade_count_long":179,"trade_count_short":0,"total_volume":0.17900000000000005,"avg_stake_amount":0.0010000000000000002,"profit_mean":0.0008041243575418989,"profit_median":0.0,"profit_total":0.10419029856968845,"profit_total_long":0.10419029856968845,"profit_total_short":0.0,"profit_total_abs":0.0010419029856968845,"profit_total_long_abs":0.0010419029856968845,"profit_total_short_abs":0.0,"cagr":5.712688499973264,"profit_factor":2.4223288739520954,"backtest_start":"2018-01-10 07:15:00","backtest_start_ts":1515568500000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":"2020-10-01 18:00:00+00:00","backtest_run_end_ts":"2020-10-01 18:01:00+00:00","trades_per_day":9.42,"market_change":1.22,"pairlist":["TRX/BTC","ADA/BTC","XLM/BTC","ETH/BTC","XMR/BTC","ZEC/BTC","NXT/BTC","LTC/BTC","ETC/BTC","DASH/BTC"],"stake_amount":0.001,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":0.01,"dry_run_wallet":0.01,"final_balance":0.011041902985696884,"rejected_signals":0,"timedout_entry_orders":0,"timedout_exit_orders":0,"canceled_trade_entries":0,"canceled_entry_orders":0,"replaced_entry_orders":0,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timeframe_detail":"","timerange":"","enable_protections":false,"strategy_name":"StrategyTestV3","stoploss":0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":false,"ignore_roi_if_entry_signal":false,"backtest_best_day":0.17955111999999998,"backtest_worst_day":-0.14683468,"backtest_best_day_abs":0.000245614,"backtest_worst_day_abs":-0.0001325331,"winning_days":19,"draw_days":0,"losing_days":2,"daily_profit":[["2018-01-10",0.000245614],["2018-01-11",0.0001055138],["2018-01-12",4.51128e-05],["2018-01-13",3.00752e-05],["2018-01-14",3.50877e-05],["2018-01-15",6.51629e-05],["2018-01-16",5.11278e-05],["2018-01-17",7.01754e-05],["2018-01-18",8.5213e-05],["2018-01-19",3.00752e-05],["2018-01-20",2.50627e-05],["2018-01-21",4.01003e-05],["2018-01-22",7.01754e-05],["2018-01-23",8.5213e-05],["2018-01-24",8.02005e-05],["2018-01-25",-4.48622e-05],["2018-01-26",4.01003e-05],["2018-01-27",4.01003e-05],["2018-01-28",3.50877e-05],["2018-01-29",4.01003e-05],["2018-01-30",-0.0001325331]],"wins":48,"losses":9,"draws":122,"holding_avg":"3:40:00","holding_avg_s":13200.0,"winner_holding_avg":"0:24:00","winner_holding_avg_s":1440.0,"loser_holding_avg":"1 day, 5:57:00","loser_holding_avg_s":107820.0,"max_drawdown":0.21142322000000008,"max_drawdown_account":0.018740312808228732,"max_relative_drawdown":0.018740312808228732,"max_drawdown_abs":0.0002000000000000001,"drawdown_start":"2018-01-16 19:30:00","drawdown_start_ts":1516131000000.0,"drawdown_end":"2018-01-16 22:25:00","drawdown_end_ts":1516141500000.0,"max_drawdown_low":0.0004721804511278108,"max_drawdown_high":0.0006721804511278109,"csum_min":0.010045112781954888,"csum_max":0.011069172932330812}},"strategy_comparison":[{"key":"StrategyTestV3","trades":179,"profit_mean":0.0008041243575418989,"profit_mean_pct":0.0804124357541899,"profit_sum":0.1439382599999999,"profit_sum_pct":14.39,"profit_total_abs":0.0010419029856968845,"profit_total":0.10419029856968845,"profit_total_pct":10.42,"duration_avg":"3:40:00","wins":170,"draws":0,"losses":9,"max_drawdown_account":0.018740312808228732,"max_drawdown_abs":"0.0002"}]} +{"metadata":{"StrategyTestV3":{"run_id":"asdf","backtest_start_time":"2020-10-01 18:00:00+00:00"}},"strategy":{"StrategyTestV3":{"trades":[{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":10.37344398340249,"open_date":"2018-01-10 07:15:00+00:00","close_date":"2018-01-10 07:20:00+00:00","open_rate":9.64e-05,"close_rate":0.00010074887218045112,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":8.676e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.676e-05,"stop_loss_ratio":0.1,"min_rate":9.64e-05,"max_rate":0.00010074887218045112,"is_open":false,"open_timestamp":1515568500000.0,"close_timestamp":1515568800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":21.026072329688816,"open_date":"2018-01-10 07:15:00+00:00","close_date":"2018-01-10 07:30:00+00:00","open_rate":4.756e-05,"close_rate":4.9705563909774425e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":15,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":4.2804e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.2804e-05,"stop_loss_ratio":0.1,"min_rate":4.756e-05,"max_rate":4.9705563909774425e-05,"is_open":false,"open_timestamp":1515568500000.0,"close_timestamp":1515569400000.0,"is_short":false,"leverage":1.0,"enter_tag":"buy_tag","orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":29.94908655286014,"open_date":"2018-01-10 07:25:00+00:00","close_date":"2018-01-10 07:35:00+00:00","open_rate":3.339e-05,"close_rate":3.489631578947368e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":10,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":3.0050999999999997e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.0050999999999997e-05,"stop_loss_ratio":0.1,"min_rate":3.339e-05,"max_rate":3.489631578947368e-05,"is_open":false,"open_timestamp":1515569100000.0,"close_timestamp":1515569700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":10.313531353135314,"open_date":"2018-01-10 07:25:00+00:00","close_date":"2018-01-10 07:40:00+00:00","open_rate":9.696e-05,"close_rate":0.00010133413533834584,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":15,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":8.7264e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.7264e-05,"stop_loss_ratio":0.1,"min_rate":9.696e-05,"max_rate":0.00010133413533834584,"is_open":false,"open_timestamp":1515569100000.0,"close_timestamp":1515570000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010604453870625663,"open_date":"2018-01-10 07:35:00+00:00","close_date":"2018-01-10 08:35:00+00:00","open_rate":0.0943,"close_rate":0.09477268170426063,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.08487,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08487,"stop_loss_ratio":0.1,"min_rate":0.0943,"max_rate":0.09477268170426063,"is_open":false,"open_timestamp":1515569700000.0,"close_timestamp":1515573300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.03677001860930642,"open_date":"2018-01-10 07:40:00+00:00","close_date":"2018-01-10 08:10:00+00:00","open_rate":0.02719607,"close_rate":0.02760503345864661,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.024476463,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.024476463,"stop_loss_ratio":0.1,"min_rate":0.02719607,"max_rate":0.02760503345864661,"is_open":false,"open_timestamp":1515570000000.0,"close_timestamp":1515571800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.021575196463739,"open_date":"2018-01-10 08:15:00+00:00","close_date":"2018-01-10 09:55:00+00:00","open_rate":0.04634952,"close_rate":0.046581848421052625,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":100,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.041714568,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.041714568,"stop_loss_ratio":0.1,"min_rate":0.04634952,"max_rate":0.046581848421052625,"is_open":false,"open_timestamp":1515572100000.0,"close_timestamp":1515578100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":32.615786040443574,"open_date":"2018-01-10 14:45:00+00:00","close_date":"2018-01-10 15:50:00+00:00","open_rate":3.066e-05,"close_rate":3.081368421052631e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":65,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":2.7594e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7594e-05,"stop_loss_ratio":0.1,"min_rate":3.066e-05,"max_rate":3.081368421052631e-05,"is_open":false,"open_timestamp":1515595500000.0,"close_timestamp":1515599400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.05917194776300452,"open_date":"2018-01-10 16:35:00+00:00","close_date":"2018-01-10 17:15:00+00:00","open_rate":0.0168999,"close_rate":0.016984611278195488,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.01520991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.01520991,"stop_loss_ratio":0.1,"min_rate":0.0168999,"max_rate":0.016984611278195488,"is_open":false,"open_timestamp":1515602100000.0,"close_timestamp":1515604500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010949822656672253,"open_date":"2018-01-10 16:40:00+00:00","close_date":"2018-01-10 17:20:00+00:00","open_rate":0.09132568,"close_rate":0.0917834528320802,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.08219311200000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08219311200000001,"stop_loss_ratio":0.1,"min_rate":0.09132568,"max_rate":0.0917834528320802,"is_open":false,"open_timestamp":1515602400000.0,"close_timestamp":1515604800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.011238476768326556,"open_date":"2018-01-10 18:50:00+00:00","close_date":"2018-01-10 19:45:00+00:00","open_rate":0.08898003,"close_rate":0.08942604518796991,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.080082027,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.080082027,"stop_loss_ratio":0.1,"min_rate":0.08898003,"max_rate":0.08942604518796991,"is_open":false,"open_timestamp":1515610200000.0,"close_timestamp":1515613500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.011682232072680309,"open_date":"2018-01-10 22:15:00+00:00","close_date":"2018-01-10 23:00:00+00:00","open_rate":0.08560008,"close_rate":0.08602915308270676,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.077040072,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.077040072,"stop_loss_ratio":0.1,"min_rate":0.08560008,"max_rate":0.08602915308270676,"is_open":false,"open_timestamp":1515622500000.0,"close_timestamp":1515625200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.4014726015023105,"open_date":"2018-01-10 22:50:00+00:00","close_date":"2018-01-10 23:20:00+00:00","open_rate":0.00249083,"close_rate":0.0025282860902255634,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002241747,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002241747,"stop_loss_ratio":0.1,"min_rate":0.00249083,"max_rate":0.0025282860902255634,"is_open":false,"open_timestamp":1515624600000.0,"close_timestamp":1515626400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":33.090668431502316,"open_date":"2018-01-10 23:15:00+00:00","close_date":"2018-01-11 00:15:00+00:00","open_rate":3.022e-05,"close_rate":3.037147869674185e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":2.7198e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7198e-05,"stop_loss_ratio":0.1,"min_rate":3.022e-05,"max_rate":3.037147869674185e-05,"is_open":false,"open_timestamp":1515626100000.0,"close_timestamp":1515629700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.41034058268362744,"open_date":"2018-01-10 23:40:00+00:00","close_date":"2018-01-11 00:05:00+00:00","open_rate":0.002437,"close_rate":0.0024980776942355883,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":0.0021933,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0021933,"stop_loss_ratio":0.1,"min_rate":0.002437,"max_rate":0.0024980776942355883,"is_open":false,"open_timestamp":1515627600000.0,"close_timestamp":1515629100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.02095643931654345,"open_date":"2018-01-11 00:00:00+00:00","close_date":"2018-01-11 00:35:00+00:00","open_rate":0.04771803,"close_rate":0.04843559436090225,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.042946227,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.042946227,"stop_loss_ratio":0.1,"min_rate":0.04771803,"max_rate":0.04843559436090225,"is_open":false,"open_timestamp":1515628800000.0,"close_timestamp":1515630900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":27.389756231169542,"open_date":"2018-01-11 03:40:00+00:00","close_date":"2018-01-11 04:25:00+00:00","open_rate":3.651e-05,"close_rate":3.2859000000000005e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.10448878,"profit_abs":-9.999999999999994e-05,"exit_reason":"stop_loss","initial_stop_loss_abs":3.2859000000000005e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.2859000000000005e-05,"stop_loss_ratio":0.1,"min_rate":3.2859000000000005e-05,"max_rate":3.651e-05,"is_open":false,"open_timestamp":1515642000000.0,"close_timestamp":1515644700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.011332594070446804,"open_date":"2018-01-11 03:55:00+00:00","close_date":"2018-01-11 04:25:00+00:00","open_rate":0.08824105,"close_rate":0.08956798308270676,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.079416945,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.079416945,"stop_loss_ratio":0.1,"min_rate":0.08824105,"max_rate":0.08956798308270676,"is_open":false,"open_timestamp":1515642900000.0,"close_timestamp":1515644700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.411522633744856,"open_date":"2018-01-11 04:00:00+00:00","close_date":"2018-01-11 04:50:00+00:00","open_rate":0.00243,"close_rate":0.002442180451127819,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002187,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002187,"stop_loss_ratio":0.1,"min_rate":0.00243,"max_rate":0.002442180451127819,"is_open":false,"open_timestamp":1515643200000.0,"close_timestamp":1515646200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.022001890402423376,"open_date":"2018-01-11 04:30:00+00:00","close_date":"2018-01-11 04:55:00+00:00","open_rate":0.04545064,"close_rate":0.046589753784461146,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":0.040905576,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.040905576,"stop_loss_ratio":0.1,"min_rate":0.04545064,"max_rate":0.046589753784461146,"is_open":false,"open_timestamp":1515645000000.0,"close_timestamp":1515646500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":29.655990510083036,"open_date":"2018-01-11 04:30:00+00:00","close_date":"2018-01-11 04:50:00+00:00","open_rate":3.372e-05,"close_rate":3.456511278195488e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":3.0348e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.0348e-05,"stop_loss_ratio":0.1,"min_rate":3.372e-05,"max_rate":3.456511278195488e-05,"is_open":false,"open_timestamp":1515645000000.0,"close_timestamp":1515646200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.037821482602118005,"open_date":"2018-01-11 04:55:00+00:00","close_date":"2018-01-11 05:15:00+00:00","open_rate":0.02644,"close_rate":0.02710265664160401,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":0.023796,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.023796,"stop_loss_ratio":0.1,"min_rate":0.02644,"max_rate":0.02710265664160401,"is_open":false,"open_timestamp":1515646500000.0,"close_timestamp":1515647700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.011348161597821153,"open_date":"2018-01-11 11:20:00+00:00","close_date":"2018-01-11 12:00:00+00:00","open_rate":0.08812,"close_rate":0.08856170426065162,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.079308,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.079308,"stop_loss_ratio":0.1,"min_rate":0.08812,"max_rate":0.08856170426065162,"is_open":false,"open_timestamp":1515669600000.0,"close_timestamp":1515672000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.037263696923919086,"open_date":"2018-01-11 11:35:00+00:00","close_date":"2018-01-11 12:15:00+00:00","open_rate":0.02683577,"close_rate":0.026970285137844607,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.024152193,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.024152193,"stop_loss_ratio":0.1,"min_rate":0.02683577,"max_rate":0.026970285137844607,"is_open":false,"open_timestamp":1515670500000.0,"close_timestamp":1515672900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":20.329335230737954,"open_date":"2018-01-11 14:00:00+00:00","close_date":"2018-01-11 14:25:00+00:00","open_rate":4.919e-05,"close_rate":5.04228320802005e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":4.4271e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4271e-05,"stop_loss_ratio":0.1,"min_rate":4.919e-05,"max_rate":5.04228320802005e-05,"is_open":false,"open_timestamp":1515679200000.0,"close_timestamp":1515680700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.01138317402960718,"open_date":"2018-01-11 19:25:00+00:00","close_date":"2018-01-11 20:35:00+00:00","open_rate":0.08784896,"close_rate":0.08828930566416039,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":70,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.079064064,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.079064064,"stop_loss_ratio":0.1,"min_rate":0.08784896,"max_rate":0.08828930566416039,"is_open":false,"open_timestamp":1515698700000.0,"close_timestamp":1515702900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":19.58863858961802,"open_date":"2018-01-11 22:35:00+00:00","close_date":"2018-01-11 23:30:00+00:00","open_rate":5.105e-05,"close_rate":5.130588972431077e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.5945e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.5945e-05,"stop_loss_ratio":0.1,"min_rate":5.105e-05,"max_rate":5.130588972431077e-05,"is_open":false,"open_timestamp":1515710100000.0,"close_timestamp":1515713400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":25.252525252525253,"open_date":"2018-01-11 22:55:00+00:00","close_date":"2018-01-11 23:25:00+00:00","open_rate":3.96e-05,"close_rate":4.019548872180451e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":3.5640000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.5640000000000004e-05,"stop_loss_ratio":0.1,"min_rate":3.96e-05,"max_rate":4.019548872180451e-05,"is_open":false,"open_timestamp":1515711300000.0,"close_timestamp":1515713100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":34.66204506065858,"open_date":"2018-01-11 22:55:00+00:00","close_date":"2018-01-11 23:35:00+00:00","open_rate":2.885e-05,"close_rate":2.899461152882205e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":2.5965e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.5965e-05,"stop_loss_ratio":0.1,"min_rate":2.885e-05,"max_rate":2.899461152882205e-05,"is_open":false,"open_timestamp":1515711300000.0,"close_timestamp":1515713700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.03780718336483932,"open_date":"2018-01-11 23:30:00+00:00","close_date":"2018-01-12 00:05:00+00:00","open_rate":0.02645,"close_rate":0.026847744360902256,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.023805000000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.023805000000000003,"stop_loss_ratio":0.1,"min_rate":0.02645,"max_rate":0.026847744360902256,"is_open":false,"open_timestamp":1515713400000.0,"close_timestamp":1515715500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.020833333333333332,"open_date":"2018-01-11 23:55:00+00:00","close_date":"2018-01-12 01:15:00+00:00","open_rate":0.048,"close_rate":0.04824060150375939,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0432,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0432,"stop_loss_ratio":0.1,"min_rate":0.048,"max_rate":0.04824060150375939,"is_open":false,"open_timestamp":1515714900000.0,"close_timestamp":1515719700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":21.31287297527707,"open_date":"2018-01-12 21:15:00+00:00","close_date":"2018-01-12 21:40:00+00:00","open_rate":4.692e-05,"close_rate":4.809593984962405e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":4.2228e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.2228e-05,"stop_loss_ratio":0.1,"min_rate":4.692e-05,"max_rate":4.809593984962405e-05,"is_open":false,"open_timestamp":1515791700000.0,"close_timestamp":1515793200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.38915654211062944,"open_date":"2018-01-13 00:55:00+00:00","close_date":"2018-01-13 06:20:00+00:00","open_rate":0.00256966,"close_rate":0.0025825405012531327,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":325,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002312694,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002312694,"stop_loss_ratio":0.1,"min_rate":0.00256966,"max_rate":0.0025825405012531327,"is_open":false,"open_timestamp":1515804900000.0,"close_timestamp":1515824400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":15.96933886937081,"open_date":"2018-01-13 10:55:00+00:00","close_date":"2018-01-13 11:35:00+00:00","open_rate":6.262e-05,"close_rate":6.293388471177944e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.6358e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.6358e-05,"stop_loss_ratio":0.1,"min_rate":6.262e-05,"max_rate":6.293388471177944e-05,"is_open":false,"open_timestamp":1515840900000.0,"close_timestamp":1515843300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":21.14164904862579,"open_date":"2018-01-13 13:05:00+00:00","close_date":"2018-01-15 14:10:00+00:00","open_rate":4.73e-05,"close_rate":4.753709273182957e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":2945,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.257e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.257e-05,"stop_loss_ratio":0.1,"min_rate":4.73e-05,"max_rate":4.753709273182957e-05,"is_open":false,"open_timestamp":1515848700000.0,"close_timestamp":1516025400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":16.49348507339601,"open_date":"2018-01-13 13:30:00+00:00","close_date":"2018-01-13 14:45:00+00:00","open_rate":6.063e-05,"close_rate":6.0933909774436085e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":75,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.4567e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.4567e-05,"stop_loss_ratio":0.1,"min_rate":6.063e-05,"max_rate":6.0933909774436085e-05,"is_open":false,"open_timestamp":1515850200000.0,"close_timestamp":1515854700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":9.023641941887746,"open_date":"2018-01-13 13:40:00+00:00","close_date":"2018-01-13 23:30:00+00:00","open_rate":0.00011082,"close_rate":0.00011137548872180448,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":590,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":9.9738e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":9.9738e-05,"stop_loss_ratio":0.1,"min_rate":0.00011082,"max_rate":0.00011137548872180448,"is_open":false,"open_timestamp":1515850800000.0,"close_timestamp":1515886200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":16.863406408094438,"open_date":"2018-01-13 15:15:00+00:00","close_date":"2018-01-13 15:55:00+00:00","open_rate":5.93e-05,"close_rate":5.9597243107769415e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.337e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.337e-05,"stop_loss_ratio":0.1,"min_rate":5.93e-05,"max_rate":5.9597243107769415e-05,"is_open":false,"open_timestamp":1515856500000.0,"close_timestamp":1515858900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.020618543947292404,"open_date":"2018-01-13 16:30:00+00:00","close_date":"2018-01-13 17:10:00+00:00","open_rate":0.04850003,"close_rate":0.04874313791979949,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.043650027,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.043650027,"stop_loss_ratio":0.1,"min_rate":0.04850003,"max_rate":0.04874313791979949,"is_open":false,"open_timestamp":1515861000000.0,"close_timestamp":1515863400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010178097365511457,"open_date":"2018-01-13 22:05:00+00:00","close_date":"2018-01-14 06:25:00+00:00","open_rate":0.09825019,"close_rate":0.09874267215538848,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":500,"profit_ratio":-0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.088425171,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.088425171,"stop_loss_ratio":0.1,"min_rate":0.09825019,"max_rate":0.09874267215538848,"is_open":false,"open_timestamp":1515881100000.0,"close_timestamp":1515911100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":16.616816218012627,"open_date":"2018-01-14 00:20:00+00:00","close_date":"2018-01-14 22:55:00+00:00","open_rate":6.018e-05,"close_rate":6.048165413533834e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":1355,"profit_ratio":0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":5.4162e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.4162e-05,"stop_loss_ratio":0.1,"min_rate":6.018e-05,"max_rate":6.048165413533834e-05,"is_open":false,"open_timestamp":1515889200000.0,"close_timestamp":1515970500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010246952581919518,"open_date":"2018-01-14 12:45:00+00:00","close_date":"2018-01-14 13:25:00+00:00","open_rate":0.09758999,"close_rate":0.0980791628822055,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.087830991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.087830991,"stop_loss_ratio":0.1,"min_rate":0.09758999,"max_rate":0.0980791628822055,"is_open":false,"open_timestamp":1515933900000.0,"close_timestamp":1515936300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.3215434083601286,"open_date":"2018-01-14 15:30:00+00:00","close_date":"2018-01-14 16:00:00+00:00","open_rate":0.00311,"close_rate":0.0031567669172932328,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002799,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002799,"stop_loss_ratio":0.1,"min_rate":0.00311,"max_rate":0.0031567669172932328,"is_open":false,"open_timestamp":1515943800000.0,"close_timestamp":1515945600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.32010140812609433,"open_date":"2018-01-14 20:45:00+00:00","close_date":"2018-01-14 22:15:00+00:00","open_rate":0.00312401,"close_rate":0.003139669197994987,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":90,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002811609,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002811609,"stop_loss_ratio":0.1,"min_rate":0.00312401,"max_rate":0.003139669197994987,"is_open":false,"open_timestamp":1515962700000.0,"close_timestamp":1515968100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.057247866085791646,"open_date":"2018-01-14 23:35:00+00:00","close_date":"2018-01-15 00:30:00+00:00","open_rate":0.0174679,"close_rate":0.017555458395989976,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.015721110000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.015721110000000003,"stop_loss_ratio":0.1,"min_rate":0.0174679,"max_rate":0.017555458395989976,"is_open":false,"open_timestamp":1515972900000.0,"close_timestamp":1515976200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.013611282991367995,"open_date":"2018-01-14 23:45:00+00:00","close_date":"2018-01-15 00:25:00+00:00","open_rate":0.07346846,"close_rate":0.07383672295739348,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.066121614,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.066121614,"stop_loss_ratio":0.1,"min_rate":0.07346846,"max_rate":0.07383672295739348,"is_open":false,"open_timestamp":1515973500000.0,"close_timestamp":1515975900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010204706410596568,"open_date":"2018-01-15 02:25:00+00:00","close_date":"2018-01-15 03:05:00+00:00","open_rate":0.097994,"close_rate":0.09848519799498744,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0881946,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0881946,"stop_loss_ratio":0.1,"min_rate":0.097994,"max_rate":0.09848519799498744,"is_open":false,"open_timestamp":1515983100000.0,"close_timestamp":1515985500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010353038616834042,"open_date":"2018-01-15 07:20:00+00:00","close_date":"2018-01-15 08:00:00+00:00","open_rate":0.09659,"close_rate":0.09707416040100247,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.086931,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.086931,"stop_loss_ratio":0.1,"min_rate":0.09659,"max_rate":0.09707416040100247,"is_open":false,"open_timestamp":1516000800000.0,"close_timestamp":1516003200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":10.0130169219986,"open_date":"2018-01-15 08:20:00+00:00","close_date":"2018-01-15 08:55:00+00:00","open_rate":9.987e-05,"close_rate":0.00010137180451127818,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":8.9883e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.9883e-05,"stop_loss_ratio":0.1,"min_rate":9.987e-05,"max_rate":0.00010137180451127818,"is_open":false,"open_timestamp":1516004400000.0,"close_timestamp":1516006500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010537752023511832,"open_date":"2018-01-15 12:10:00+00:00","close_date":"2018-01-16 02:50:00+00:00","open_rate":0.0948969,"close_rate":0.09537257368421052,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":880,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.08540721000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08540721000000001,"stop_loss_ratio":0.1,"min_rate":0.0948969,"max_rate":0.09537257368421052,"is_open":false,"open_timestamp":1516018200000.0,"close_timestamp":1516071000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.014084507042253523,"open_date":"2018-01-15 14:10:00+00:00","close_date":"2018-01-15 17:40:00+00:00","open_rate":0.071,"close_rate":0.07135588972431077,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":210,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0639,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0639,"stop_loss_ratio":0.1,"min_rate":0.071,"max_rate":0.07135588972431077,"is_open":false,"open_timestamp":1516025400000.0,"close_timestamp":1516038000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.021736763017766975,"open_date":"2018-01-15 14:30:00+00:00","close_date":"2018-01-15 15:10:00+00:00","open_rate":0.04600501,"close_rate":0.046235611553884705,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.041404509,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.041404509,"stop_loss_ratio":0.1,"min_rate":0.04600501,"max_rate":0.046235611553884705,"is_open":false,"open_timestamp":1516026600000.0,"close_timestamp":1516029000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":10.595465140919686,"open_date":"2018-01-15 18:10:00+00:00","close_date":"2018-01-15 19:25:00+00:00","open_rate":9.438e-05,"close_rate":9.485308270676693e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":75,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":8.4942e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.4942e-05,"stop_loss_ratio":0.1,"min_rate":9.438e-05,"max_rate":9.485308270676693e-05,"is_open":false,"open_timestamp":1516039800000.0,"close_timestamp":1516044300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.032894726021471705,"open_date":"2018-01-15 18:35:00+00:00","close_date":"2018-01-15 19:15:00+00:00","open_rate":0.03040001,"close_rate":0.030552391002506264,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.027360009,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.027360009,"stop_loss_ratio":0.1,"min_rate":0.03040001,"max_rate":0.030552391002506264,"is_open":false,"open_timestamp":1516041300000.0,"close_timestamp":1516043700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":17.13208840157615,"open_date":"2018-01-15 20:25:00+00:00","close_date":"2018-01-16 08:25:00+00:00","open_rate":5.837e-05,"close_rate":5.2533e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":720,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000005,"exit_reason":"stop_loss","initial_stop_loss_abs":5.2533e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.2533e-05,"stop_loss_ratio":0.1,"min_rate":5.2533e-05,"max_rate":5.837e-05,"is_open":false,"open_timestamp":1516047900000.0,"close_timestamp":1516091100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.021722130506560085,"open_date":"2018-01-15 20:40:00+00:00","close_date":"2018-01-15 22:00:00+00:00","open_rate":0.046036,"close_rate":0.04626675689223057,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0414324,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0414324,"stop_loss_ratio":0.1,"min_rate":0.046036,"max_rate":0.04626675689223057,"is_open":false,"open_timestamp":1516048800000.0,"close_timestamp":1516053600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.34861425832316545,"open_date":"2018-01-16 00:30:00+00:00","close_date":"2018-01-16 01:10:00+00:00","open_rate":0.0028685,"close_rate":0.0028828784461152877,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.00258165,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.00258165,"stop_loss_ratio":0.1,"min_rate":0.0028685,"max_rate":0.0028828784461152877,"is_open":false,"open_timestamp":1516062600000.0,"close_timestamp":1516065000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.014854967241083492,"open_date":"2018-01-16 01:15:00+00:00","close_date":"2018-01-16 02:35:00+00:00","open_rate":0.06731755,"close_rate":0.0676549813283208,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.060585795000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060585795000000005,"stop_loss_ratio":0.1,"min_rate":0.06731755,"max_rate":0.0676549813283208,"is_open":false,"open_timestamp":1516065300000.0,"close_timestamp":1516070100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010848794492804754,"open_date":"2018-01-16 07:45:00+00:00","close_date":"2018-01-16 08:40:00+00:00","open_rate":0.09217614,"close_rate":0.09263817578947368,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.082958526,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.082958526,"stop_loss_ratio":0.1,"min_rate":0.09217614,"max_rate":0.09263817578947368,"is_open":false,"open_timestamp":1516088700000.0,"close_timestamp":1516092000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.06060606060606061,"open_date":"2018-01-16 08:35:00+00:00","close_date":"2018-01-16 08:55:00+00:00","open_rate":0.0165,"close_rate":0.016913533834586467,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":0.01485,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.01485,"stop_loss_ratio":0.1,"min_rate":0.0165,"max_rate":0.016913533834586467,"is_open":false,"open_timestamp":1516091700000.0,"close_timestamp":1516092900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":12.57387149503332,"open_date":"2018-01-16 08:35:00+00:00","close_date":"2018-01-16 08:40:00+00:00","open_rate":7.953e-05,"close_rate":8.311781954887218e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":7.157700000000001e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":7.157700000000001e-05,"stop_loss_ratio":0.1,"min_rate":7.953e-05,"max_rate":8.311781954887218e-05,"is_open":false,"open_timestamp":1516091700000.0,"close_timestamp":1516092000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.022122914915269236,"open_date":"2018-01-16 08:45:00+00:00","close_date":"2018-01-16 09:50:00+00:00","open_rate":0.045202,"close_rate":0.04542857644110275,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":65,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0406818,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0406818,"stop_loss_ratio":0.1,"min_rate":0.045202,"max_rate":0.04542857644110275,"is_open":false,"open_timestamp":1516092300000.0,"close_timestamp":1516096200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":19.054878048780488,"open_date":"2018-01-16 09:15:00+00:00","close_date":"2018-01-16 09:45:00+00:00","open_rate":5.248e-05,"close_rate":5.326917293233082e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":4.7232e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7232e-05,"stop_loss_ratio":0.1,"min_rate":5.248e-05,"max_rate":5.326917293233082e-05,"is_open":false,"open_timestamp":1516094100000.0,"close_timestamp":1516095900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.03457434486802627,"open_date":"2018-01-16 09:15:00+00:00","close_date":"2018-01-16 09:55:00+00:00","open_rate":0.02892318,"close_rate":0.02906815834586466,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.026030862,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.026030862,"stop_loss_ratio":0.1,"min_rate":0.02892318,"max_rate":0.02906815834586466,"is_open":false,"open_timestamp":1516094100000.0,"close_timestamp":1516096500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":19.38735944164405,"open_date":"2018-01-16 09:50:00+00:00","close_date":"2018-01-16 10:10:00+00:00","open_rate":5.158e-05,"close_rate":5.287273182957392e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":4.6422e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6422e-05,"stop_loss_ratio":0.1,"min_rate":5.158e-05,"max_rate":5.287273182957392e-05,"is_open":false,"open_timestamp":1516096200000.0,"close_timestamp":1516097400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.035357778286929785,"open_date":"2018-01-16 10:05:00+00:00","close_date":"2018-01-16 10:35:00+00:00","open_rate":0.02828232,"close_rate":0.02870761804511278,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.025454088,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025454088,"stop_loss_ratio":0.1,"min_rate":0.02828232,"max_rate":0.02870761804511278,"is_open":false,"open_timestamp":1516097100000.0,"close_timestamp":1516098900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.022948496230938985,"open_date":"2018-01-16 10:05:00+00:00","close_date":"2018-01-16 10:40:00+00:00","open_rate":0.04357584,"close_rate":0.044231115789473675,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.039218256,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.039218256,"stop_loss_ratio":0.1,"min_rate":0.04357584,"max_rate":0.044231115789473675,"is_open":false,"open_timestamp":1516097100000.0,"close_timestamp":1516099200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":18.64975755315181,"open_date":"2018-01-16 13:45:00+00:00","close_date":"2018-01-16 14:20:00+00:00","open_rate":5.362e-05,"close_rate":5.442631578947368e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":4.8258e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.8258e-05,"stop_loss_ratio":0.1,"min_rate":5.362e-05,"max_rate":5.442631578947368e-05,"is_open":false,"open_timestamp":1516110300000.0,"close_timestamp":1516112400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":18.86080724254998,"open_date":"2018-01-16 17:30:00+00:00","close_date":"2018-01-16 18:25:00+00:00","open_rate":5.302e-05,"close_rate":5.328576441102756e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.7718e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7718e-05,"stop_loss_ratio":0.1,"min_rate":5.302e-05,"max_rate":5.328576441102756e-05,"is_open":false,"open_timestamp":1516123800000.0,"close_timestamp":1516127100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010952903718828448,"open_date":"2018-01-16 18:15:00+00:00","close_date":"2018-01-16 18:45:00+00:00","open_rate":0.09129999,"close_rate":0.09267292218045112,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.082169991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.082169991,"stop_loss_ratio":0.1,"min_rate":0.09129999,"max_rate":0.09267292218045112,"is_open":false,"open_timestamp":1516126500000.0,"close_timestamp":1516128300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":26.26050420168067,"open_date":"2018-01-16 18:15:00+00:00","close_date":"2018-01-16 18:35:00+00:00","open_rate":3.808e-05,"close_rate":3.903438596491228e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":3.4272e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.4272e-05,"stop_loss_ratio":0.1,"min_rate":3.808e-05,"max_rate":3.903438596491228e-05,"is_open":false,"open_timestamp":1516126500000.0,"close_timestamp":1516127700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.035574376772493324,"open_date":"2018-01-16 19:00:00+00:00","close_date":"2018-01-16 19:30:00+00:00","open_rate":0.02811012,"close_rate":0.028532828571428567,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.025299108,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025299108,"stop_loss_ratio":0.1,"min_rate":0.02811012,"max_rate":0.028532828571428567,"is_open":false,"open_timestamp":1516129200000.0,"close_timestamp":1516131000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.387028357567759,"open_date":"2018-01-16 21:25:00+00:00","close_date":"2018-01-16 22:25:00+00:00","open_rate":0.00258379,"close_rate":0.002325411,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000005,"exit_reason":"stop_loss","initial_stop_loss_abs":0.002325411,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002325411,"stop_loss_ratio":0.1,"min_rate":0.002325411,"max_rate":0.00258379,"is_open":false,"open_timestamp":1516137900000.0,"close_timestamp":1516141500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":39.07776475185619,"open_date":"2018-01-16 21:25:00+00:00","close_date":"2018-01-16 22:45:00+00:00","open_rate":2.559e-05,"close_rate":2.3031e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000005,"exit_reason":"stop_loss","initial_stop_loss_abs":2.3031e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.3031e-05,"stop_loss_ratio":0.1,"min_rate":2.3031e-05,"max_rate":2.559e-05,"is_open":false,"open_timestamp":1516137900000.0,"close_timestamp":1516142700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":13.123359580052494,"open_date":"2018-01-16 21:35:00+00:00","close_date":"2018-01-16 22:25:00+00:00","open_rate":7.62e-05,"close_rate":6.858e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000005,"exit_reason":"stop_loss","initial_stop_loss_abs":6.858e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":6.858e-05,"stop_loss_ratio":0.1,"min_rate":6.858e-05,"max_rate":7.62e-05,"is_open":false,"open_timestamp":1516138500000.0,"close_timestamp":1516141500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.4350777048780912,"open_date":"2018-01-16 22:30:00+00:00","close_date":"2018-01-16 22:35:00+00:00","open_rate":0.00229844,"close_rate":0.002402129022556391,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.511278195488727e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002068596,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002068596,"stop_loss_ratio":0.1,"min_rate":0.00229844,"max_rate":0.002402129022556391,"is_open":false,"open_timestamp":1516141800000.0,"close_timestamp":1516142100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.06622516556291391,"open_date":"2018-01-16 22:30:00+00:00","close_date":"2018-01-16 22:40:00+00:00","open_rate":0.0151,"close_rate":0.015781203007518795,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":10,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":0.01359,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.01359,"stop_loss_ratio":0.1,"min_rate":0.0151,"max_rate":0.015781203007518795,"is_open":false,"open_timestamp":1516141800000.0,"close_timestamp":1516142400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.4243113426908128,"open_date":"2018-01-16 22:40:00+00:00","close_date":"2018-01-16 22:45:00+00:00","open_rate":0.00235676,"close_rate":0.00246308,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.511278195488727e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002121084,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002121084,"stop_loss_ratio":0.1,"min_rate":0.00235676,"max_rate":0.00246308,"is_open":false,"open_timestamp":1516142400000.0,"close_timestamp":1516142700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.01585559988076589,"open_date":"2018-01-16 22:45:00+00:00","close_date":"2018-01-16 23:05:00+00:00","open_rate":0.0630692,"close_rate":0.06464988170426066,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":0.056762280000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.056762280000000005,"stop_loss_ratio":0.1,"min_rate":0.0630692,"max_rate":0.06464988170426066,"is_open":false,"open_timestamp":1516142700000.0,"close_timestamp":1516143900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":45.45454545454545,"open_date":"2018-01-16 22:50:00+00:00","close_date":"2018-01-16 22:55:00+00:00","open_rate":2.2e-05,"close_rate":2.299248120300751e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":5,"profit_ratio":0.03990025,"profit_abs":4.511278195488684e-05,"exit_reason":"roi","initial_stop_loss_abs":1.98e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":1.98e-05,"stop_loss_ratio":0.1,"min_rate":2.2e-05,"max_rate":2.299248120300751e-05,"is_open":false,"open_timestamp":1516143000000.0,"close_timestamp":1516143300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":20.10454362685967,"open_date":"2018-01-17 03:30:00+00:00","close_date":"2018-01-17 04:00:00+00:00","open_rate":4.974e-05,"close_rate":5.048796992481203e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":4.4766000000000005e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4766000000000005e-05,"stop_loss_ratio":0.1,"min_rate":4.974e-05,"max_rate":5.048796992481203e-05,"is_open":false,"open_timestamp":1516159800000.0,"close_timestamp":1516161600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":14.068655036578503,"open_date":"2018-01-17 03:55:00+00:00","close_date":"2018-01-17 04:15:00+00:00","open_rate":7.108e-05,"close_rate":7.28614536340852e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641603896e-05,"exit_reason":"roi","initial_stop_loss_abs":6.3972e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":6.3972e-05,"stop_loss_ratio":0.1,"min_rate":7.108e-05,"max_rate":7.28614536340852e-05,"is_open":false,"open_timestamp":1516161300000.0,"close_timestamp":1516162500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.0231107002542177,"open_date":"2018-01-17 09:35:00+00:00","close_date":"2018-01-17 10:15:00+00:00","open_rate":0.04327,"close_rate":0.04348689223057644,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.038943000000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.038943000000000005,"stop_loss_ratio":0.1,"min_rate":0.04327,"max_rate":0.04348689223057644,"is_open":false,"open_timestamp":1516181700000.0,"close_timestamp":1516184100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":20.012007204322593,"open_date":"2018-01-17 10:20:00+00:00","close_date":"2018-01-17 17:00:00+00:00","open_rate":4.997e-05,"close_rate":5.022047619047618e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":400,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":4.4973e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4973e-05,"stop_loss_ratio":0.1,"min_rate":4.997e-05,"max_rate":5.022047619047618e-05,"is_open":false,"open_timestamp":1516184400000.0,"close_timestamp":1516208400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.014626687444363738,"open_date":"2018-01-17 10:30:00+00:00","close_date":"2018-01-17 11:25:00+00:00","open_rate":0.06836818,"close_rate":0.06871087764411027,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.061531362,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.061531362,"stop_loss_ratio":0.1,"min_rate":0.06836818,"max_rate":0.06871087764411027,"is_open":false,"open_timestamp":1516185000000.0,"close_timestamp":1516188300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":27.548209366391184,"open_date":"2018-01-17 10:30:00+00:00","close_date":"2018-01-17 11:10:00+00:00","open_rate":3.63e-05,"close_rate":3.648195488721804e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":3.2670000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.2670000000000004e-05,"stop_loss_ratio":0.1,"min_rate":3.63e-05,"max_rate":3.648195488721804e-05,"is_open":false,"open_timestamp":1516185000000.0,"close_timestamp":1516187400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.03558718861209965,"open_date":"2018-01-17 12:30:00+00:00","close_date":"2018-01-17 22:05:00+00:00","open_rate":0.0281,"close_rate":0.02824085213032581,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":575,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.02529,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.02529,"stop_loss_ratio":0.1,"min_rate":0.0281,"max_rate":0.02824085213032581,"is_open":false,"open_timestamp":1516192200000.0,"close_timestamp":1516226700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.011559355963546878,"open_date":"2018-01-17 12:35:00+00:00","close_date":"2018-01-17 16:55:00+00:00","open_rate":0.08651001,"close_rate":0.08694364413533832,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":260,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.077859009,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.077859009,"stop_loss_ratio":0.1,"min_rate":0.08651001,"max_rate":0.08694364413533832,"is_open":false,"open_timestamp":1516192500000.0,"close_timestamp":1516208100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":17.752529735487308,"open_date":"2018-01-18 05:00:00+00:00","close_date":"2018-01-18 05:55:00+00:00","open_rate":5.633e-05,"close_rate":5.6612355889724306e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.0697e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0697e-05,"stop_loss_ratio":0.1,"min_rate":5.633e-05,"max_rate":5.6612355889724306e-05,"is_open":false,"open_timestamp":1516251600000.0,"close_timestamp":1516254900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.01430923457900944,"open_date":"2018-01-18 05:20:00+00:00","close_date":"2018-01-18 05:55:00+00:00","open_rate":0.06988494,"close_rate":0.07093584135338346,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":35,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.06289644600000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06289644600000001,"stop_loss_ratio":0.1,"min_rate":0.06988494,"max_rate":0.07093584135338346,"is_open":false,"open_timestamp":1516252800000.0,"close_timestamp":1516254900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":18.034265103697024,"open_date":"2018-01-18 07:35:00+00:00","close_date":"2018-01-18 08:15:00+00:00","open_rate":5.545e-05,"close_rate":5.572794486215538e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":4.9905e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.9905e-05,"stop_loss_ratio":0.1,"min_rate":5.545e-05,"max_rate":5.572794486215538e-05,"is_open":false,"open_timestamp":1516260900000.0,"close_timestamp":1516263300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.06121723118136401,"open_date":"2018-01-18 09:00:00+00:00","close_date":"2018-01-18 09:40:00+00:00","open_rate":0.01633527,"close_rate":0.016417151052631574,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.014701743,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014701743,"stop_loss_ratio":0.1,"min_rate":0.01633527,"max_rate":0.016417151052631574,"is_open":false,"open_timestamp":1516266000000.0,"close_timestamp":1516268400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.3707356136045141,"open_date":"2018-01-18 16:40:00+00:00","close_date":"2018-01-18 17:20:00+00:00","open_rate":0.00269734,"close_rate":0.002710860501253133,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002427606,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002427606,"stop_loss_ratio":0.1,"min_rate":0.00269734,"max_rate":0.002710860501253133,"is_open":false,"open_timestamp":1516293600000.0,"close_timestamp":1516296000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":22.3463687150838,"open_date":"2018-01-18 18:05:00+00:00","close_date":"2018-01-18 18:30:00+00:00","open_rate":4.475e-05,"close_rate":4.587155388471177e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":4.0275e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.0275e-05,"stop_loss_ratio":0.1,"min_rate":4.475e-05,"max_rate":4.587155388471177e-05,"is_open":false,"open_timestamp":1516298700000.0,"close_timestamp":1516300200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":35.842293906810035,"open_date":"2018-01-18 18:25:00+00:00","close_date":"2018-01-18 18:55:00+00:00","open_rate":2.79e-05,"close_rate":2.8319548872180444e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":2.511e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.511e-05,"stop_loss_ratio":0.1,"min_rate":2.79e-05,"max_rate":2.8319548872180444e-05,"is_open":false,"open_timestamp":1516299900000.0,"close_timestamp":1516301700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.022525942001105578,"open_date":"2018-01-18 20:10:00+00:00","close_date":"2018-01-18 20:50:00+00:00","open_rate":0.04439326,"close_rate":0.04461578260651629,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.039953934,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.039953934,"stop_loss_ratio":0.1,"min_rate":0.04439326,"max_rate":0.04461578260651629,"is_open":false,"open_timestamp":1516306200000.0,"close_timestamp":1516308600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":22.271714922048996,"open_date":"2018-01-18 21:30:00+00:00","close_date":"2018-01-19 00:35:00+00:00","open_rate":4.49e-05,"close_rate":4.51250626566416e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":185,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.041e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.041e-05,"stop_loss_ratio":0.1,"min_rate":4.49e-05,"max_rate":4.51250626566416e-05,"is_open":false,"open_timestamp":1516311000000.0,"close_timestamp":1516322100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.03502626970227671,"open_date":"2018-01-18 21:55:00+00:00","close_date":"2018-01-19 05:05:00+00:00","open_rate":0.02855,"close_rate":0.028693107769423555,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":430,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.025695,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025695,"stop_loss_ratio":0.1,"min_rate":0.02855,"max_rate":0.028693107769423555,"is_open":false,"open_timestamp":1516312500000.0,"close_timestamp":1516338300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":17.25327812284334,"open_date":"2018-01-18 22:10:00+00:00","close_date":"2018-01-18 22:50:00+00:00","open_rate":5.796e-05,"close_rate":5.8250526315789473e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.2164e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.2164e-05,"stop_loss_ratio":0.1,"min_rate":5.796e-05,"max_rate":5.8250526315789473e-05,"is_open":false,"open_timestamp":1516313400000.0,"close_timestamp":1516315800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.02303975994413319,"open_date":"2018-01-18 23:50:00+00:00","close_date":"2018-01-19 00:30:00+00:00","open_rate":0.04340323,"close_rate":0.04362079005012531,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.039062907,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.039062907,"stop_loss_ratio":0.1,"min_rate":0.04340323,"max_rate":0.04362079005012531,"is_open":false,"open_timestamp":1516319400000.0,"close_timestamp":1516321800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.02244943545282195,"open_date":"2018-01-19 16:45:00+00:00","close_date":"2018-01-19 17:35:00+00:00","open_rate":0.04454455,"close_rate":0.04476783095238095,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.040090095000000006,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.040090095000000006,"stop_loss_ratio":0.1,"min_rate":0.04454455,"max_rate":0.04476783095238095,"is_open":false,"open_timestamp":1516380300000.0,"close_timestamp":1516383300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":17.793594306049823,"open_date":"2018-01-19 17:15:00+00:00","close_date":"2018-01-19 19:55:00+00:00","open_rate":5.62e-05,"close_rate":5.648170426065162e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":160,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":5.058e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.058e-05,"stop_loss_ratio":0.1,"min_rate":5.62e-05,"max_rate":5.648170426065162e-05,"is_open":false,"open_timestamp":1516382100000.0,"close_timestamp":1516391700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":23.04678497349619,"open_date":"2018-01-19 17:20:00+00:00","close_date":"2018-01-19 20:15:00+00:00","open_rate":4.339e-05,"close_rate":4.360749373433584e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":175,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":3.9051e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.9051e-05,"stop_loss_ratio":0.1,"min_rate":4.339e-05,"max_rate":4.360749373433584e-05,"is_open":false,"open_timestamp":1516382400000.0,"close_timestamp":1516392900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":9.910802775024775,"open_date":"2018-01-20 04:45:00+00:00","close_date":"2018-01-20 17:35:00+00:00","open_rate":0.0001009,"close_rate":0.00010140576441102755,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":770,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":9.081e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":9.081e-05,"stop_loss_ratio":0.1,"min_rate":0.0001009,"max_rate":0.00010140576441102755,"is_open":false,"open_timestamp":1516423500000.0,"close_timestamp":1516469700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.3696789338459548,"open_date":"2018-01-20 04:50:00+00:00","close_date":"2018-01-20 15:15:00+00:00","open_rate":0.00270505,"close_rate":0.002718609147869674,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":625,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002434545,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002434545,"stop_loss_ratio":0.1,"min_rate":0.00270505,"max_rate":0.002718609147869674,"is_open":false,"open_timestamp":1516423800000.0,"close_timestamp":1516461300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.033333311111125925,"open_date":"2018-01-20 04:50:00+00:00","close_date":"2018-01-20 07:00:00+00:00","open_rate":0.03000002,"close_rate":0.030150396040100245,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":130,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.027000018,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.027000018,"stop_loss_ratio":0.1,"min_rate":0.03000002,"max_rate":0.030150396040100245,"is_open":false,"open_timestamp":1516423800000.0,"close_timestamp":1516431600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":18.315018315018317,"open_date":"2018-01-20 09:00:00+00:00","close_date":"2018-01-20 09:40:00+00:00","open_rate":5.46e-05,"close_rate":5.4873684210526304e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.914e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.914e-05,"stop_loss_ratio":0.1,"min_rate":5.46e-05,"max_rate":5.4873684210526304e-05,"is_open":false,"open_timestamp":1516438800000.0,"close_timestamp":1516441200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.03244412634781012,"open_date":"2018-01-20 18:25:00+00:00","close_date":"2018-01-25 03:50:00+00:00","open_rate":0.03082222,"close_rate":0.027739998,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":6325,"profit_ratio":-0.10448878,"profit_abs":-0.00010000000000000015,"exit_reason":"stop_loss","initial_stop_loss_abs":0.027739998,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.027739998,"stop_loss_ratio":0.1,"min_rate":0.027739998,"max_rate":0.03082222,"is_open":false,"open_timestamp":1516472700000.0,"close_timestamp":1516852200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.011148273260677063,"open_date":"2018-01-20 22:25:00+00:00","close_date":"2018-01-20 23:15:00+00:00","open_rate":0.08969999,"close_rate":0.09014961401002504,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.080729991,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.080729991,"stop_loss_ratio":0.1,"min_rate":0.08969999,"max_rate":0.09014961401002504,"is_open":false,"open_timestamp":1516487100000.0,"close_timestamp":1516490100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.06125570520324337,"open_date":"2018-01-21 02:50:00+00:00","close_date":"2018-01-21 14:30:00+00:00","open_rate":0.01632501,"close_rate":0.01640683962406015,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":700,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.014692509,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014692509,"stop_loss_ratio":0.1,"min_rate":0.01632501,"max_rate":0.01640683962406015,"is_open":false,"open_timestamp":1516503000000.0,"close_timestamp":1516545000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.01417675579120474,"open_date":"2018-01-21 10:20:00+00:00","close_date":"2018-01-21 11:00:00+00:00","open_rate":0.070538,"close_rate":0.07089157393483708,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0634842,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0634842,"stop_loss_ratio":0.1,"min_rate":0.070538,"max_rate":0.07089157393483708,"is_open":false,"open_timestamp":1516530000000.0,"close_timestamp":1516532400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":18.864365214110546,"open_date":"2018-01-21 15:50:00+00:00","close_date":"2018-01-21 18:45:00+00:00","open_rate":5.301e-05,"close_rate":5.327571428571427e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":175,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":4.7709e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7709e-05,"stop_loss_ratio":0.1,"min_rate":5.301e-05,"max_rate":5.327571428571427e-05,"is_open":false,"open_timestamp":1516549800000.0,"close_timestamp":1516560300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":25.284450063211125,"open_date":"2018-01-21 16:20:00+00:00","close_date":"2018-01-21 17:00:00+00:00","open_rate":3.955e-05,"close_rate":3.9748245614035085e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":3.5595e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.5595e-05,"stop_loss_ratio":0.1,"min_rate":3.955e-05,"max_rate":3.9748245614035085e-05,"is_open":false,"open_timestamp":1516551600000.0,"close_timestamp":1516554000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.38683971296493297,"open_date":"2018-01-21 21:15:00+00:00","close_date":"2018-01-21 21:45:00+00:00","open_rate":0.00258505,"close_rate":0.002623922932330827,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002326545,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002326545,"stop_loss_ratio":0.1,"min_rate":0.00258505,"max_rate":0.002623922932330827,"is_open":false,"open_timestamp":1516569300000.0,"close_timestamp":1516571100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":25.621316935690498,"open_date":"2018-01-21 21:15:00+00:00","close_date":"2018-01-21 21:55:00+00:00","open_rate":3.903e-05,"close_rate":3.922563909774435e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":3.5127e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.5127e-05,"stop_loss_ratio":0.1,"min_rate":3.903e-05,"max_rate":3.922563909774435e-05,"is_open":false,"open_timestamp":1516569300000.0,"close_timestamp":1516571700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":19.098548510313215,"open_date":"2018-01-22 00:35:00+00:00","close_date":"2018-01-22 10:35:00+00:00","open_rate":5.236e-05,"close_rate":5.262245614035087e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":600,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":4.7124e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7124e-05,"stop_loss_ratio":0.1,"min_rate":5.236e-05,"max_rate":5.262245614035087e-05,"is_open":false,"open_timestamp":1516581300000.0,"close_timestamp":1516617300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":11.076650420912715,"open_date":"2018-01-22 01:30:00+00:00","close_date":"2018-01-22 02:10:00+00:00","open_rate":9.028e-05,"close_rate":9.07325313283208e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":8.1252e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.1252e-05,"stop_loss_ratio":0.1,"min_rate":9.028e-05,"max_rate":9.07325313283208e-05,"is_open":false,"open_timestamp":1516584600000.0,"close_timestamp":1516587000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.3721622627465575,"open_date":"2018-01-22 12:25:00+00:00","close_date":"2018-01-22 14:35:00+00:00","open_rate":0.002687,"close_rate":0.002700468671679198,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":130,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0024183000000000004,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0024183000000000004,"stop_loss_ratio":0.1,"min_rate":0.002687,"max_rate":0.002700468671679198,"is_open":false,"open_timestamp":1516623900000.0,"close_timestamp":1516631700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":23.99232245681382,"open_date":"2018-01-22 13:15:00+00:00","close_date":"2018-01-22 13:55:00+00:00","open_rate":4.168e-05,"close_rate":4.188892230576441e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":3.7512e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.7512e-05,"stop_loss_ratio":0.1,"min_rate":4.168e-05,"max_rate":4.188892230576441e-05,"is_open":false,"open_timestamp":1516626900000.0,"close_timestamp":1516629300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":11.336583153837434,"open_date":"2018-01-22 14:00:00+00:00","close_date":"2018-01-22 14:30:00+00:00","open_rate":8.821e-05,"close_rate":8.953646616541353e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":7.9389e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":7.9389e-05,"stop_loss_ratio":0.1,"min_rate":8.821e-05,"max_rate":8.953646616541353e-05,"is_open":false,"open_timestamp":1516629600000.0,"close_timestamp":1516631400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":19.334880123743233,"open_date":"2018-01-22 15:55:00+00:00","close_date":"2018-01-22 16:40:00+00:00","open_rate":5.172e-05,"close_rate":5.1979248120300745e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6548e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6548e-05,"stop_loss_ratio":0.1,"min_rate":5.172e-05,"max_rate":5.1979248120300745e-05,"is_open":false,"open_timestamp":1516636500000.0,"close_timestamp":1516639200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":33.04692663582287,"open_date":"2018-01-22 16:05:00+00:00","close_date":"2018-01-22 16:25:00+00:00","open_rate":3.026e-05,"close_rate":3.101839598997494e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":20,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":2.7234e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7234e-05,"stop_loss_ratio":0.1,"min_rate":3.026e-05,"max_rate":3.101839598997494e-05,"is_open":false,"open_timestamp":1516637100000.0,"close_timestamp":1516638300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.014156285390713478,"open_date":"2018-01-22 19:50:00+00:00","close_date":"2018-01-23 00:10:00+00:00","open_rate":0.07064,"close_rate":0.07099408521303258,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":260,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.063576,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.063576,"stop_loss_ratio":0.1,"min_rate":0.07064,"max_rate":0.07099408521303258,"is_open":false,"open_timestamp":1516650600000.0,"close_timestamp":1516666200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.06080938507725528,"open_date":"2018-01-22 21:25:00+00:00","close_date":"2018-01-22 22:05:00+00:00","open_rate":0.01644483,"close_rate":0.01652726022556391,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.014800347,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014800347,"stop_loss_ratio":0.1,"min_rate":0.01644483,"max_rate":0.01652726022556391,"is_open":false,"open_timestamp":1516656300000.0,"close_timestamp":1516658700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":23.08935580697299,"open_date":"2018-01-23 00:05:00+00:00","close_date":"2018-01-23 00:35:00+00:00","open_rate":4.331e-05,"close_rate":4.3961278195488714e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":3.8979e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":3.8979e-05,"stop_loss_ratio":0.1,"min_rate":4.331e-05,"max_rate":4.3961278195488714e-05,"is_open":false,"open_timestamp":1516665900000.0,"close_timestamp":1516667700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":31.250000000000004,"open_date":"2018-01-23 01:50:00+00:00","close_date":"2018-01-23 02:15:00+00:00","open_rate":3.2e-05,"close_rate":3.2802005012531326e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":25,"profit_ratio":0.01995012,"profit_abs":2.5062656641604113e-05,"exit_reason":"roi","initial_stop_loss_abs":2.88e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.88e-05,"stop_loss_ratio":0.1,"min_rate":3.2e-05,"max_rate":3.2802005012531326e-05,"is_open":false,"open_timestamp":1516672200000.0,"close_timestamp":1516673700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010907854156754156,"open_date":"2018-01-23 04:25:00+00:00","close_date":"2018-01-23 05:15:00+00:00","open_rate":0.09167706,"close_rate":0.09213659413533835,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":50,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.08250935400000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.08250935400000001,"stop_loss_ratio":0.1,"min_rate":0.09167706,"max_rate":0.09213659413533835,"is_open":false,"open_timestamp":1516681500000.0,"close_timestamp":1516684500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.014440474918339117,"open_date":"2018-01-23 07:35:00+00:00","close_date":"2018-01-23 09:00:00+00:00","open_rate":0.0692498,"close_rate":0.06959691679197995,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":85,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.06232482,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06232482,"stop_loss_ratio":0.1,"min_rate":0.0692498,"max_rate":0.06959691679197995,"is_open":false,"open_timestamp":1516692900000.0,"close_timestamp":1516698000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":31.426775612822127,"open_date":"2018-01-23 10:50:00+00:00","close_date":"2018-01-23 13:05:00+00:00","open_rate":3.182e-05,"close_rate":3.197949874686716e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":135,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":2.8638e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.8638e-05,"stop_loss_ratio":0.1,"min_rate":3.182e-05,"max_rate":3.197949874686716e-05,"is_open":false,"open_timestamp":1516704600000.0,"close_timestamp":1516712700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.024461839530332683,"open_date":"2018-01-23 11:05:00+00:00","close_date":"2018-01-23 16:05:00+00:00","open_rate":0.04088,"close_rate":0.04108491228070175,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":300,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.036792,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036792,"stop_loss_ratio":0.1,"min_rate":0.04088,"max_rate":0.04108491228070175,"is_open":false,"open_timestamp":1516705500000.0,"close_timestamp":1516723500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":19.417475728155345,"open_date":"2018-01-23 14:55:00+00:00","close_date":"2018-01-23 15:35:00+00:00","open_rate":5.15e-05,"close_rate":5.175814536340851e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.635e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.635e-05,"stop_loss_ratio":0.1,"min_rate":5.15e-05,"max_rate":5.175814536340851e-05,"is_open":false,"open_timestamp":1516719300000.0,"close_timestamp":1516721700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.011023294646713328,"open_date":"2018-01-23 16:35:00+00:00","close_date":"2018-01-24 00:05:00+00:00","open_rate":0.09071698,"close_rate":0.09117170170426064,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":450,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.081645282,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.081645282,"stop_loss_ratio":0.1,"min_rate":0.09071698,"max_rate":0.09117170170426064,"is_open":false,"open_timestamp":1516725300000.0,"close_timestamp":1516752300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":31.969309462915604,"open_date":"2018-01-23 17:25:00+00:00","close_date":"2018-01-23 18:45:00+00:00","open_rate":3.128e-05,"close_rate":3.1436791979949865e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":80,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":2.8152e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.8152e-05,"stop_loss_ratio":0.1,"min_rate":3.128e-05,"max_rate":3.1436791979949865e-05,"is_open":false,"open_timestamp":1516728300000.0,"close_timestamp":1516733100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":10.465724751439035,"open_date":"2018-01-23 20:15:00+00:00","close_date":"2018-01-23 22:00:00+00:00","open_rate":9.555e-05,"close_rate":9.602894736842104e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":105,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":8.5995e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.5995e-05,"stop_loss_ratio":0.1,"min_rate":9.555e-05,"max_rate":9.602894736842104e-05,"is_open":false,"open_timestamp":1516738500000.0,"close_timestamp":1516744800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.02450979791426522,"open_date":"2018-01-23 22:30:00+00:00","close_date":"2018-01-23 23:10:00+00:00","open_rate":0.04080001,"close_rate":0.0410045213283208,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":0.036720009,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036720009,"stop_loss_ratio":0.1,"min_rate":0.04080001,"max_rate":0.0410045213283208,"is_open":false,"open_timestamp":1516746600000.0,"close_timestamp":1516749000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":19.36858415649816,"open_date":"2018-01-23 23:50:00+00:00","close_date":"2018-01-24 03:35:00+00:00","open_rate":5.163e-05,"close_rate":5.18887969924812e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":225,"profit_ratio":-0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6467e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6467e-05,"stop_loss_ratio":0.1,"min_rate":5.163e-05,"max_rate":5.18887969924812e-05,"is_open":false,"open_timestamp":1516751400000.0,"close_timestamp":1516764900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.024747691102289384,"open_date":"2018-01-24 00:20:00+00:00","close_date":"2018-01-24 01:50:00+00:00","open_rate":0.04040781,"close_rate":0.04061035541353383,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":90,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.036367029,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036367029,"stop_loss_ratio":0.1,"min_rate":0.04040781,"max_rate":0.04061035541353383,"is_open":false,"open_timestamp":1516753200000.0,"close_timestamp":1516758600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":19.485580670303975,"open_date":"2018-01-24 06:45:00+00:00","close_date":"2018-01-24 07:25:00+00:00","open_rate":5.132e-05,"close_rate":5.157724310776942e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6188000000000006e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6188000000000006e-05,"stop_loss_ratio":0.1,"min_rate":5.132e-05,"max_rate":5.157724310776942e-05,"is_open":false,"open_timestamp":1516776300000.0,"close_timestamp":1516778700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":19.23816852635629,"open_date":"2018-01-24 14:15:00+00:00","close_date":"2018-01-24 14:25:00+00:00","open_rate":5.198e-05,"close_rate":5.432496240601503e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":10,"profit_ratio":0.03990025,"profit_abs":4.5112781954887056e-05,"exit_reason":"roi","initial_stop_loss_abs":4.6782e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6782e-05,"stop_loss_ratio":0.1,"min_rate":5.198e-05,"max_rate":5.432496240601503e-05,"is_open":false,"open_timestamp":1516803300000.0,"close_timestamp":1516803900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":32.74394237066143,"open_date":"2018-01-24 14:50:00+00:00","close_date":"2018-01-24 16:35:00+00:00","open_rate":3.054e-05,"close_rate":3.069308270676692e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":105,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":2.7486000000000004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.7486000000000004e-05,"stop_loss_ratio":0.1,"min_rate":3.054e-05,"max_rate":3.069308270676692e-05,"is_open":false,"open_timestamp":1516805400000.0,"close_timestamp":1516811700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":10.795638562020944,"open_date":"2018-01-24 15:10:00+00:00","close_date":"2018-01-24 16:15:00+00:00","open_rate":9.263e-05,"close_rate":9.309431077694236e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":65,"profit_ratio":0.0,"profit_abs":5.012531328320953e-06,"exit_reason":"roi","initial_stop_loss_abs":8.3367e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.3367e-05,"stop_loss_ratio":0.1,"min_rate":9.263e-05,"max_rate":9.309431077694236e-05,"is_open":false,"open_timestamp":1516806600000.0,"close_timestamp":1516810500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":18.13565469713457,"open_date":"2018-01-24 22:40:00+00:00","close_date":"2018-01-24 23:25:00+00:00","open_rate":5.514e-05,"close_rate":5.54163909774436e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.962599999999999e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.962599999999999e-05,"stop_loss_ratio":0.1,"min_rate":5.514e-05,"max_rate":5.54163909774436e-05,"is_open":false,"open_timestamp":1516833600000.0,"close_timestamp":1516836300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":20.3210729526519,"open_date":"2018-01-25 00:50:00+00:00","close_date":"2018-01-25 01:30:00+00:00","open_rate":4.921e-05,"close_rate":4.9456666666666664e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.4289e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.4289e-05,"stop_loss_ratio":0.1,"min_rate":4.921e-05,"max_rate":4.9456666666666664e-05,"is_open":false,"open_timestamp":1516841400000.0,"close_timestamp":1516843800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.38461538461538464,"open_date":"2018-01-25 08:15:00+00:00","close_date":"2018-01-25 12:15:00+00:00","open_rate":0.0026,"close_rate":0.002613032581453634,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":240,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.00234,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.00234,"stop_loss_ratio":0.1,"min_rate":0.0026,"max_rate":0.002613032581453634,"is_open":false,"open_timestamp":1516868100000.0,"close_timestamp":1516882500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.03571593119825878,"open_date":"2018-01-25 10:25:00+00:00","close_date":"2018-01-25 16:15:00+00:00","open_rate":0.02799871,"close_rate":0.028139054411027563,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":350,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.025198839,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025198839,"stop_loss_ratio":0.1,"min_rate":0.02799871,"max_rate":0.028139054411027563,"is_open":false,"open_timestamp":1516875900000.0,"close_timestamp":1516896900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.024516401717913305,"open_date":"2018-01-25 11:00:00+00:00","close_date":"2018-01-25 11:45:00+00:00","open_rate":0.04078902,"close_rate":0.0409934762406015,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.036710118,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.036710118,"stop_loss_ratio":0.1,"min_rate":0.04078902,"max_rate":0.0409934762406015,"is_open":false,"open_timestamp":1516878000000.0,"close_timestamp":1516880700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"NXT/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":34.602076124567475,"open_date":"2018-01-25 13:05:00+00:00","close_date":"2018-01-25 13:45:00+00:00","open_rate":2.89e-05,"close_rate":2.904486215538847e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":2.601e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":2.601e-05,"stop_loss_ratio":0.1,"min_rate":2.89e-05,"max_rate":2.904486215538847e-05,"is_open":false,"open_timestamp":1516885500000.0,"close_timestamp":1516887900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.02432912439481303,"open_date":"2018-01-25 13:20:00+00:00","close_date":"2018-01-25 14:05:00+00:00","open_rate":0.041103,"close_rate":0.04130903007518797,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0369927,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0369927,"stop_loss_ratio":0.1,"min_rate":0.041103,"max_rate":0.04130903007518797,"is_open":false,"open_timestamp":1516886400000.0,"close_timestamp":1516889100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":18.42299189388357,"open_date":"2018-01-25 15:45:00+00:00","close_date":"2018-01-25 16:15:00+00:00","open_rate":5.428e-05,"close_rate":5.509624060150376e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":4.8852000000000006e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.8852000000000006e-05,"stop_loss_ratio":0.1,"min_rate":5.428e-05,"max_rate":5.509624060150376e-05,"is_open":false,"open_timestamp":1516895100000.0,"close_timestamp":1516896900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":18.47063169560399,"open_date":"2018-01-25 17:45:00+00:00","close_date":"2018-01-25 23:15:00+00:00","open_rate":5.414e-05,"close_rate":5.441137844611528e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":330,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.8726e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.8726e-05,"stop_loss_ratio":0.1,"min_rate":5.414e-05,"max_rate":5.441137844611528e-05,"is_open":false,"open_timestamp":1516902300000.0,"close_timestamp":1516922100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.02415005686130888,"open_date":"2018-01-25 21:15:00+00:00","close_date":"2018-01-25 21:55:00+00:00","open_rate":0.04140777,"close_rate":0.0416153277443609,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.037266993000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.037266993000000005,"stop_loss_ratio":0.1,"min_rate":0.04140777,"max_rate":0.0416153277443609,"is_open":false,"open_timestamp":1516914900000.0,"close_timestamp":1516917300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.3932224183965176,"open_date":"2018-01-26 02:05:00+00:00","close_date":"2018-01-26 02:45:00+00:00","open_rate":0.00254309,"close_rate":0.002555837318295739,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002288781,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002288781,"stop_loss_ratio":0.1,"min_rate":0.00254309,"max_rate":0.002555837318295739,"is_open":false,"open_timestamp":1516932300000.0,"close_timestamp":1516934700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":17.834849295523455,"open_date":"2018-01-26 02:55:00+00:00","close_date":"2018-01-26 15:10:00+00:00","open_rate":5.607e-05,"close_rate":5.6351052631578935e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":735,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":5.0463e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0463e-05,"stop_loss_ratio":0.1,"min_rate":5.607e-05,"max_rate":5.6351052631578935e-05,"is_open":false,"open_timestamp":1516935300000.0,"close_timestamp":1516979400000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.39400171784748983,"open_date":"2018-01-26 06:10:00+00:00","close_date":"2018-01-26 09:25:00+00:00","open_rate":0.00253806,"close_rate":0.0025507821052631577,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":195,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002284254,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002284254,"stop_loss_ratio":0.1,"min_rate":0.00253806,"max_rate":0.0025507821052631577,"is_open":false,"open_timestamp":1516947000000.0,"close_timestamp":1516958700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.024096385542168672,"open_date":"2018-01-26 07:25:00+00:00","close_date":"2018-01-26 09:55:00+00:00","open_rate":0.0415,"close_rate":0.04170802005012531,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":150,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.03735,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.03735,"stop_loss_ratio":0.1,"min_rate":0.0415,"max_rate":0.04170802005012531,"is_open":false,"open_timestamp":1516951500000.0,"close_timestamp":1516960500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":18.793459875963165,"open_date":"2018-01-26 09:55:00+00:00","close_date":"2018-01-26 10:25:00+00:00","open_rate":5.321e-05,"close_rate":5.401015037593984e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":4.7889e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.7889e-05,"stop_loss_ratio":0.1,"min_rate":5.321e-05,"max_rate":5.401015037593984e-05,"is_open":false,"open_timestamp":1516960500000.0,"close_timestamp":1516962300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.036074437437185386,"open_date":"2018-01-26 16:05:00+00:00","close_date":"2018-01-26 16:45:00+00:00","open_rate":0.02772046,"close_rate":0.02785940967418546,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.024948414,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.024948414,"stop_loss_ratio":0.1,"min_rate":0.02772046,"max_rate":0.02785940967418546,"is_open":false,"open_timestamp":1516982700000.0,"close_timestamp":1516985100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010569326272036914,"open_date":"2018-01-26 23:35:00+00:00","close_date":"2018-01-27 00:15:00+00:00","open_rate":0.09461341,"close_rate":0.09508766268170424,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.085152069,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.085152069,"stop_loss_ratio":0.1,"min_rate":0.09461341,"max_rate":0.09508766268170424,"is_open":false,"open_timestamp":1517009700000.0,"close_timestamp":1517012100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":17.809439002671414,"open_date":"2018-01-27 00:35:00+00:00","close_date":"2018-01-27 01:30:00+00:00","open_rate":5.615e-05,"close_rate":5.643145363408521e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":55,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":5.0535e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0535e-05,"stop_loss_ratio":0.1,"min_rate":5.615e-05,"max_rate":5.643145363408521e-05,"is_open":false,"open_timestamp":1517013300000.0,"close_timestamp":1517016600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ADA/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":17.998560115190784,"open_date":"2018-01-27 00:45:00+00:00","close_date":"2018-01-30 04:45:00+00:00","open_rate":5.556e-05,"close_rate":5.144e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":4560,"profit_ratio":-0.07877175,"profit_abs":-7.415406767458598e-05,"exit_reason":"force_exit","initial_stop_loss_abs":5.0004e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":5.0004e-05,"stop_loss_ratio":0.1,"min_rate":5.144e-05,"max_rate":5.556e-05,"is_open":false,"open_timestamp":1517013900000.0,"close_timestamp":1517287500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.014492751522789634,"open_date":"2018-01-27 02:30:00+00:00","close_date":"2018-01-27 11:25:00+00:00","open_rate":0.06900001,"close_rate":0.06934587471177944,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":535,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.062100009000000005,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.062100009000000005,"stop_loss_ratio":0.1,"min_rate":0.06900001,"max_rate":0.06934587471177944,"is_open":false,"open_timestamp":1517020200000.0,"close_timestamp":1517052300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010582027378879436,"open_date":"2018-01-27 06:25:00+00:00","close_date":"2018-01-27 07:05:00+00:00","open_rate":0.09449985,"close_rate":0.0949735334586466,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.085049865,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.085049865,"stop_loss_ratio":0.1,"min_rate":0.09449985,"max_rate":0.0949735334586466,"is_open":false,"open_timestamp":1517034300000.0,"close_timestamp":1517036700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ZEC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.02434885085598385,"open_date":"2018-01-27 09:40:00+00:00","close_date":"2018-01-30 04:40:00+00:00","open_rate":0.0410697,"close_rate":0.03928809,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":4020,"profit_ratio":-0.04815133,"profit_abs":-4.338015617352949e-05,"exit_reason":"force_exit","initial_stop_loss_abs":0.03696273,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.03696273,"stop_loss_ratio":0.1,"min_rate":0.03928809,"max_rate":0.0410697,"is_open":false,"open_timestamp":1517046000000.0,"close_timestamp":1517287200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.03508771929824561,"open_date":"2018-01-27 11:45:00+00:00","close_date":"2018-01-27 12:30:00+00:00","open_rate":0.0285,"close_rate":0.02864285714285714,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.025650000000000003,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025650000000000003,"stop_loss_ratio":0.1,"min_rate":0.0285,"max_rate":0.02864285714285714,"is_open":false,"open_timestamp":1517053500000.0,"close_timestamp":1517056200000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XMR/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.034887307020861215,"open_date":"2018-01-27 12:35:00+00:00","close_date":"2018-01-27 15:25:00+00:00","open_rate":0.02866372,"close_rate":0.02880739779448621,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":170,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.025797348,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.025797348,"stop_loss_ratio":0.1,"min_rate":0.02866372,"max_rate":0.02880739779448621,"is_open":false,"open_timestamp":1517056500000.0,"close_timestamp":1517066700000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.010484268355332824,"open_date":"2018-01-27 15:50:00+00:00","close_date":"2018-01-27 16:50:00+00:00","open_rate":0.095381,"close_rate":0.09585910025062656,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.0858429,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.0858429,"stop_loss_ratio":0.1,"min_rate":0.095381,"max_rate":0.09585910025062656,"is_open":false,"open_timestamp":1517068200000.0,"close_timestamp":1517071800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.014794886650455415,"open_date":"2018-01-27 17:05:00+00:00","close_date":"2018-01-27 17:45:00+00:00","open_rate":0.06759092,"close_rate":0.06792972160401002,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.060831828,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060831828,"stop_loss_ratio":0.1,"min_rate":0.06759092,"max_rate":0.06792972160401002,"is_open":false,"open_timestamp":1517072700000.0,"close_timestamp":1517075100000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.38684569885609726,"open_date":"2018-01-27 23:40:00+00:00","close_date":"2018-01-28 01:05:00+00:00","open_rate":0.00258501,"close_rate":0.002597967443609022,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":85,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.002326509,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002326509,"stop_loss_ratio":0.1,"min_rate":0.00258501,"max_rate":0.002597967443609022,"is_open":false,"open_timestamp":1517096400000.0,"close_timestamp":1517101500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.014928710926711672,"open_date":"2018-01-28 02:25:00+00:00","close_date":"2018-01-28 08:10:00+00:00","open_rate":0.06698502,"close_rate":0.0673207845112782,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":345,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.060286518,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060286518,"stop_loss_ratio":0.1,"min_rate":0.06698502,"max_rate":0.0673207845112782,"is_open":false,"open_timestamp":1517106300000.0,"close_timestamp":1517127000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.014767187899175548,"open_date":"2018-01-28 10:25:00+00:00","close_date":"2018-01-28 16:30:00+00:00","open_rate":0.0677177,"close_rate":0.06805713709273183,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":365,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.06094593000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06094593000000001,"stop_loss_ratio":0.1,"min_rate":0.0677177,"max_rate":0.06805713709273183,"is_open":false,"open_timestamp":1517135100000.0,"close_timestamp":1517157000000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"XLM/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":19.175455417066157,"open_date":"2018-01-28 20:35:00+00:00","close_date":"2018-01-28 21:35:00+00:00","open_rate":5.215e-05,"close_rate":5.2411403508771925e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":60,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":4.6935e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":4.6935e-05,"stop_loss_ratio":0.1,"min_rate":5.215e-05,"max_rate":5.2411403508771925e-05,"is_open":false,"open_timestamp":1517171700000.0,"close_timestamp":1517175300000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.36521808998243305,"open_date":"2018-01-28 22:00:00+00:00","close_date":"2018-01-28 22:30:00+00:00","open_rate":0.00273809,"close_rate":0.002779264285714285,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962207e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002464281,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002464281,"stop_loss_ratio":0.1,"min_rate":0.00273809,"max_rate":0.002779264285714285,"is_open":false,"open_timestamp":1517176800000.0,"close_timestamp":1517178600000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"ETC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.3641236272539253,"open_date":"2018-01-29 00:00:00+00:00","close_date":"2018-01-29 00:30:00+00:00","open_rate":0.00274632,"close_rate":0.002787618045112782,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":30,"profit_ratio":0.00997506,"profit_abs":1.5037593984962424e-05,"exit_reason":"roi","initial_stop_loss_abs":0.002471688,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.002471688,"stop_loss_ratio":0.1,"min_rate":0.00274632,"max_rate":0.002787618045112782,"is_open":false,"open_timestamp":1517184000000.0,"close_timestamp":1517185800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"LTC/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.061634117689115045,"open_date":"2018-01-29 02:15:00+00:00","close_date":"2018-01-29 03:00:00+00:00","open_rate":0.01622478,"close_rate":0.016306107218045113,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":45,"profit_ratio":0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.014602302,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.014602302,"stop_loss_ratio":0.1,"min_rate":0.01622478,"max_rate":0.016306107218045113,"is_open":false,"open_timestamp":1517192100000.0,"close_timestamp":1517194800000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.014492753623188404,"open_date":"2018-01-29 03:05:00+00:00","close_date":"2018-01-29 03:45:00+00:00","open_rate":0.069,"close_rate":0.06934586466165413,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320519e-06,"exit_reason":"roi","initial_stop_loss_abs":0.06210000000000001,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.06210000000000001,"stop_loss_ratio":0.1,"min_rate":0.069,"max_rate":0.06934586466165413,"is_open":false,"open_timestamp":1517195100000.0,"close_timestamp":1517197500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":11.42204454597373,"open_date":"2018-01-29 05:20:00+00:00","close_date":"2018-01-29 06:55:00+00:00","open_rate":8.755e-05,"close_rate":8.798884711779448e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":95,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":7.879500000000001e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":7.879500000000001e-05,"stop_loss_ratio":0.1,"min_rate":8.755e-05,"max_rate":8.798884711779448e-05,"is_open":false,"open_timestamp":1517203200000.0,"close_timestamp":1517208900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.014650376815016871,"open_date":"2018-01-29 07:00:00+00:00","close_date":"2018-01-29 19:25:00+00:00","open_rate":0.06825763,"close_rate":0.06859977350877192,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":745,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.061431867,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.061431867,"stop_loss_ratio":0.1,"min_rate":0.06825763,"max_rate":0.06859977350877192,"is_open":false,"open_timestamp":1517209200000.0,"close_timestamp":1517253900000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"DASH/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":0.014894490408841846,"open_date":"2018-01-29 19:45:00+00:00","close_date":"2018-01-29 20:25:00+00:00","open_rate":0.06713892,"close_rate":0.06747545593984962,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":40,"profit_ratio":-0.0,"profit_abs":5.012531328320736e-06,"exit_reason":"roi","initial_stop_loss_abs":0.060425028000000006,"initial_stop_loss_ratio":0.1,"stop_loss_abs":0.060425028000000006,"stop_loss_ratio":0.1,"min_rate":0.06713892,"max_rate":0.06747545593984962,"is_open":false,"open_timestamp":1517255100000.0,"close_timestamp":1517257500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null},{"pair":"TRX/BTC","stake_amount":0.001,"max_stake_amount":0.001,"amount":11.193194537721066,"open_date":"2018-01-29 23:30:00+00:00","close_date":"2018-01-30 04:45:00+00:00","open_rate":8.934e-05,"close_rate":8.8e-05,"fee_open":0.0025,"fee_close":0.0025,"trade_duration":315,"profit_ratio":-0.0199116,"profit_abs":-1.4998880680546292e-05,"exit_reason":"force_exit","initial_stop_loss_abs":8.0406e-05,"initial_stop_loss_ratio":0.1,"stop_loss_abs":8.0406e-05,"stop_loss_ratio":0.1,"min_rate":8.8e-05,"max_rate":8.934e-05,"is_open":false,"open_timestamp":1517268600000.0,"close_timestamp":1517287500000.0,"is_short":false,"leverage":1.0,"enter_tag":null,"orders":null}],"locks":[],"best_pair":{"key":"LTC/BTC","trades":8,"profit_mean":0.00748129625,"profit_mean_pct":0.748129625,"profit_sum":0.05985037,"profit_sum_pct":5.99,"profit_total_abs":0.00010025062656641558,"profit_total":0.010025062656641558,"profit_total_pct":1.0,"duration_avg":"1:59:00","wins":8,"draws":0,"losses":0},"worst_pair":{"key":"XMR/BTC","trades":16,"profit_mean":-0.0027899012500000007,"profit_mean_pct":-0.2789901250000001,"profit_sum":-0.04463842000000001,"profit_sum_pct":-4.46,"profit_total_abs":3.533834586465928e-05,"profit_total":0.003533834586465928,"profit_total_pct":0.35,"duration_avg":"8:41:00","wins":15,"draws":0,"losses":1},"results_per_pair":[{"key":"XLM/BTC","trades":21,"profit_mean":0.0026243899999999994,"profit_mean_pct":0.2624389999999999,"profit_sum":0.05511218999999999,"profit_sum_pct":5.51,"profit_total_abs":0.00016065162907268006,"profit_total":0.016065162907268005,"profit_total_pct":1.61,"duration_avg":"3:21:00","wins":20,"draws":0,"losses":1},{"key":"ETC/BTC","trades":20,"profit_mean":0.0022568569999999997,"profit_mean_pct":0.22568569999999996,"profit_sum":0.04513713999999999,"profit_sum_pct":4.51,"profit_total_abs":0.00014561403508771753,"profit_total":0.014561403508771753,"profit_total_pct":1.46,"duration_avg":"1:45:00","wins":19,"draws":0,"losses":1},{"key":"ETH/BTC","trades":21,"profit_mean":0.0009500057142857142,"profit_mean_pct":0.09500057142857142,"profit_sum":0.01995012,"profit_sum_pct":2.0,"profit_total_abs":0.00012531328320801774,"profit_total":0.012531328320801774,"profit_total_pct":1.25,"duration_avg":"2:17:00","wins":21,"draws":0,"losses":0},{"key":"ADA/BTC","trades":29,"profit_mean":-0.0011598141379310352,"profit_mean_pct":-0.11598141379310352,"profit_sum":-0.03363461000000002,"profit_sum_pct":-3.36,"profit_total_abs":0.00011156021803969656,"profit_total":0.011156021803969657,"profit_total_pct":1.12,"duration_avg":"5:35:00","wins":27,"draws":0,"losses":2},{"key":"TRX/BTC","trades":15,"profit_mean":0.0023467073333333323,"profit_mean_pct":0.23467073333333321,"profit_sum":0.035200609999999986,"profit_sum_pct":3.52,"profit_total_abs":0.00011056502909388873,"profit_total":0.011056502909388873,"profit_total_pct":1.11,"duration_avg":"2:28:00","wins":13,"draws":0,"losses":2},{"key":"DASH/BTC","trades":16,"profit_mean":0.0018703237499999997,"profit_mean_pct":0.18703237499999997,"profit_sum":0.029925179999999996,"profit_sum_pct":2.99,"profit_total_abs":0.0001102756892230564,"profit_total":0.01102756892230564,"profit_total_pct":1.1,"duration_avg":"3:03:00","wins":16,"draws":0,"losses":0},{"key":"LTC/BTC","trades":8,"profit_mean":0.00748129625,"profit_mean_pct":0.748129625,"profit_sum":0.05985037,"profit_sum_pct":5.99,"profit_total_abs":0.00010025062656641558,"profit_total":0.010025062656641558,"profit_total_pct":1.0,"duration_avg":"1:59:00","wins":8,"draws":0,"losses":0},{"key":"ZEC/BTC","trades":21,"profit_mean":-0.00039290904761904774,"profit_mean_pct":-0.03929090476190478,"profit_sum":-0.008251090000000003,"profit_sum_pct":-0.83,"profit_total_abs":9.697072101945111e-05,"profit_total":0.009697072101945111,"profit_total_pct":0.97,"duration_avg":"4:17:00","wins":20,"draws":0,"losses":1},{"key":"NXT/BTC","trades":12,"profit_mean":-0.0012261025000000006,"profit_mean_pct":-0.12261025000000006,"profit_sum":-0.014713230000000008,"profit_sum_pct":-1.47,"profit_total_abs":4.536340852130151e-05,"profit_total":0.004536340852130151,"profit_total_pct":0.45,"duration_avg":"0:57:00","wins":11,"draws":0,"losses":1},{"key":"XMR/BTC","trades":16,"profit_mean":-0.0027899012500000007,"profit_mean_pct":-0.2789901250000001,"profit_sum":-0.04463842000000001,"profit_sum_pct":-4.46,"profit_total_abs":3.533834586465928e-05,"profit_total":0.003533834586465928,"profit_total_pct":0.35,"duration_avg":"8:41:00","wins":15,"draws":0,"losses":1},{"key":"TOTAL","trades":179,"profit_mean":0.0008041243575418989,"profit_mean_pct":0.0804124357541899,"profit_sum":0.1439382599999999,"profit_sum_pct":14.39,"profit_total_abs":0.0010419029856968845,"profit_total":0.10419029856968845,"profit_total_pct":10.42,"duration_avg":"3:40:00","wins":170,"draws":0,"losses":9}],"results_per_enter_tag":[{"key":"buy_tag","trades":1,"profit_mean":0.03990025,"profit_mean_pct":3.9900249999999997,"profit_sum":0.03990025,"profit_sum_pct":3.99,"profit_total_abs":4.5112781954887056e-05,"profit_total":0.004511278195488706,"profit_total_pct":0.45,"duration_avg":"0:15:00","wins":1,"draws":0,"losses":0},{"key":"TOTAL","trades":179,"profit_mean":0.0008041243575418989,"profit_mean_pct":0.0804124357541899,"profit_sum":0.1439382599999999,"profit_sum_pct":14.39,"profit_total_abs":0.0010419029856968845,"profit_total":0.10419029856968845,"profit_total_pct":10.42,"duration_avg":"3:40:00","wins":170,"draws":0,"losses":9}],"exit_reason_summary":[{"exit_reason":"roi","trades":170,"wins":170,"draws":0,"losses":0,"profit_mean":0.005398268352941177,"profit_mean_pct":0.54,"profit_sum":0.91770562,"profit_sum_pct":91.77,"profit_total_abs":0.0017744360902255465,"profit_total":0.30590187333333335,"profit_total_pct":30.59},{"exit_reason":"stop_loss","trades":6,"wins":0,"draws":0,"losses":6,"profit_mean":-0.10448878000000002,"profit_mean_pct":-10.45,"profit_sum":-0.6269326800000001,"profit_sum_pct":-62.69,"profit_total_abs":-0.0006000000000000003,"profit_total":-0.20897756000000003,"profit_total_pct":-20.9},{"exit_reason":"force_exit","trades":3,"wins":0,"draws":0,"losses":3,"profit_mean":-0.04894489333333333,"profit_mean_pct":-4.89,"profit_sum":-0.14683468,"profit_sum_pct":-14.68,"profit_total_abs":-0.00013253310452866177,"profit_total":-0.04894489333333333,"profit_total_pct":-4.89}],"left_open_trades":[{"key":"TRX/BTC","trades":1,"profit_mean":-0.0199116,"profit_mean_pct":-1.9911600000000003,"profit_sum":-0.0199116,"profit_sum_pct":-1.99,"profit_total_abs":-1.4998880680546292e-05,"profit_total":-0.0014998880680546292,"profit_total_pct":-0.15,"duration_avg":"5:15:00","wins":0,"draws":0,"losses":1},{"key":"ZEC/BTC","trades":1,"profit_mean":-0.04815133,"profit_mean_pct":-4.815133,"profit_sum":-0.04815133,"profit_sum_pct":-4.82,"profit_total_abs":-4.338015617352949e-05,"profit_total":-0.004338015617352949,"profit_total_pct":-0.43,"duration_avg":"2 days, 19:00:00","wins":0,"draws":0,"losses":1},{"key":"ADA/BTC","trades":1,"profit_mean":-0.07877175,"profit_mean_pct":-7.877175,"profit_sum":-0.07877175,"profit_sum_pct":-7.88,"profit_total_abs":-7.415406767458598e-05,"profit_total":-0.007415406767458598,"profit_total_pct":-0.74,"duration_avg":"3 days, 4:00:00","wins":0,"draws":0,"losses":1},{"key":"TOTAL","trades":3,"profit_mean":-0.04894489333333333,"profit_mean_pct":-4.894489333333333,"profit_sum":-0.14683468,"profit_sum_pct":-14.68,"profit_total_abs":-0.00013253310452866177,"profit_total":-0.013253310452866176,"profit_total_pct":-1.33,"duration_avg":"2 days, 1:25:00","wins":0,"draws":0,"losses":3}],"total_trades":179,"trade_count_long":179,"trade_count_short":0,"total_volume":0.17900000000000005,"avg_stake_amount":0.0010000000000000002,"profit_mean":0.0008041243575418989,"profit_median":0.0,"profit_total":0.10419029856968845,"profit_total_long":0.10419029856968845,"profit_total_short":0.0,"profit_total_abs":0.0010419029856968845,"profit_total_long_abs":0.0010419029856968845,"profit_total_short_abs":0.0,"cagr":5.712688499973264,"profit_factor":2.4223288739520954,"backtest_start":"2018-01-10 07:15:00","backtest_start_ts":1515568500000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":"2020-10-01 18:00:00+00:00","backtest_run_end_ts":"2020-10-01 18:01:00+00:00","trades_per_day":9.42,"market_change":1.22,"pairlist":["TRX/BTC","ADA/BTC","XLM/BTC","ETH/BTC","XMR/BTC","ZEC/BTC","NXT/BTC","LTC/BTC","ETC/BTC","DASH/BTC"],"stake_amount":0.001,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":0.01,"dry_run_wallet":0.01,"final_balance":0.011041902985696884,"rejected_signals":0,"timedout_entry_orders":0,"timedout_exit_orders":0,"canceled_trade_entries":0,"canceled_entry_orders":0,"replaced_entry_orders":0,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timeframe_detail":"","timerange":"","enable_protections":false,"strategy_name":"StrategyTestV3","stoploss":0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":false,"ignore_roi_if_entry_signal":false,"backtest_best_day":0.17955111999999998,"backtest_worst_day":-0.14683468,"backtest_best_day_abs":0.000245614,"backtest_worst_day_abs":-0.0001325331,"winning_days":19,"draw_days":0,"losing_days":2,"daily_profit":[["2018-01-10",0.000245614],["2018-01-11",0.0001055138],["2018-01-12",4.51128e-05],["2018-01-13",3.00752e-05],["2018-01-14",3.50877e-05],["2018-01-15",6.51629e-05],["2018-01-16",5.11278e-05],["2018-01-17",7.01754e-05],["2018-01-18",8.5213e-05],["2018-01-19",3.00752e-05],["2018-01-20",2.50627e-05],["2018-01-21",4.01003e-05],["2018-01-22",7.01754e-05],["2018-01-23",8.5213e-05],["2018-01-24",8.02005e-05],["2018-01-25",-4.48622e-05],["2018-01-26",4.01003e-05],["2018-01-27",4.01003e-05],["2018-01-28",3.50877e-05],["2018-01-29",4.01003e-05],["2018-01-30",-0.0001325331]],"wins":48,"losses":9,"draws":122,"holding_avg":"3:40:00","holding_avg_s":13200.0,"winner_holding_avg":"0:24:00","winner_holding_avg_s":1440.0,"loser_holding_avg":"1 day, 5:57:00","loser_holding_avg_s":107820.0,"max_drawdown":0.21142322000000008,"max_drawdown_account":0.018740312808228732,"max_relative_drawdown":0.018740312808228732,"max_drawdown_abs":0.0002000000000000001,"drawdown_start":"2018-01-16 19:30:00","drawdown_start_ts":1516131000000.0,"drawdown_end":"2018-01-16 22:25:00","drawdown_end_ts":1516141500000.0,"max_drawdown_low":0.0004721804511278108,"max_drawdown_high":0.0006721804511278109,"csum_min":0.010045112781954888,"csum_max":0.011069172932330812}},"strategy_comparison":[{"key":"StrategyTestV3","trades":179,"profit_mean":0.0008041243575418989,"profit_mean_pct":0.0804124357541899,"profit_sum":0.1439382599999999,"profit_sum_pct":14.39,"profit_total_abs":0.0010419029856968845,"profit_total":0.10419029856968845,"profit_total_pct":10.42,"duration_avg":"3:40:00","wins":170,"draws":0,"losses":9,"max_drawdown_account":0.018740312808228732,"max_drawdown_abs":"0.0002"}]} From 4d112def172cb9a34520f2876b825c486d7ccbef Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 28 Dec 2022 07:10:11 +0100 Subject: [PATCH 418/421] Remove binance AD from docs page fixes #7921 --- docs/overrides/main.html | 24 ------------------------ docs/stylesheets/ft.extra.css | 15 --------------- 2 files changed, 39 deletions(-) diff --git a/docs/overrides/main.html b/docs/overrides/main.html index dfc5264be..cba627ead 100644 --- a/docs/overrides/main.html +++ b/docs/overrides/main.html @@ -11,9 +11,6 @@ {% endif %}