From 05ed1b544f2853ae0054cd22bd15e623abbb3aa9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 8 Aug 2022 15:41:16 +0200 Subject: [PATCH 001/232] Working base for reinforcement learning model --- docs/freqai.md | 4 +- freqtrade/constants.py | 5 +- freqtrade/freqai/data_drawer.py | 16 +- .../ReinforcementLearningExample.py | 147 +++++++++++ freqtrade/freqai/freqai_interface.py | 2 +- .../RL/RLPrediction_agent.py | 162 ++++++++++++ .../prediction_models/RL/RLPrediction_env.py | 230 ++++++++++++++++++ .../freqai/prediction_models/RL/config.py | 37 +++ .../ReinforcementLearningModel.py | 157 ++++++++++++ 9 files changed, 748 insertions(+), 12 deletions(-) create mode 100644 freqtrade/freqai/example_strats/ReinforcementLearningExample.py create mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py create mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_env.py create mode 100644 freqtrade/freqai/prediction_models/RL/config.py create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningModel.py diff --git a/docs/freqai.md b/docs/freqai.md index bba6faaea..032046882 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -123,8 +123,8 @@ Mandatory parameters are marked as **Required**, which means that they are requi | `learning_rate` | Boosting learning rate during regression.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. | | **Extraneous parameters** -| `keras` | If your model makes use of Keras (typical for Tensorflow-based prediction models), activate this flag so that the model save/loading follows Keras standards.
**Datatype:** Boolean. Default: `False`. -| `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction.
**Datatype:** Integer. Default: 2. +| `keras` | If your model makes use of keras (typical of Tensorflow based prediction models), activate this flag so that the model save/loading follows keras standards. Default value `false`
**Datatype:** boolean. +| `conv_width` | The width of a convolutional neural network input tensor or the `ReinforcementLearningModel` `window_size`. This replaces the need for `shift` by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. Default value, 2
**Datatype:** integer. ### Important dataframe key patterns diff --git a/freqtrade/constants.py b/freqtrade/constants.py index ddbc84fa9..4d1891165 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -520,10 +520,7 @@ CONF_SCHEMA = { }, }, "model_training_parameters": { - "type": "object", - "properties": { - "n_estimators": {"type": "integer", "default": 1000} - }, + "type": "object" }, }, "required": [ diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index b3060deff..5282b4f59 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -390,10 +390,13 @@ class FreqaiDataDrawer: save_path = Path(dk.data_path) # Save the trained model - if not dk.keras: + model_type = self.freqai_info.get('model_save_type', 'joblib') + if model_type == 'joblib': dump(model, save_path / f"{dk.model_filename}_model.joblib") - else: + elif model_type == 'keras': model.save(save_path / f"{dk.model_filename}_model.h5") + elif model_type == 'stable_baselines': + model.save(save_path / f"{dk.model_filename}_model.zip") if dk.svm_model is not None: dump(dk.svm_model, save_path / f"{dk.model_filename}_svm_model.joblib") @@ -459,15 +462,18 @@ class FreqaiDataDrawer: dk.data_path / f"{dk.model_filename}_trained_df.pkl" ) + model_type = self.freqai_info.get('model_save_type', 'joblib') # try to access model in memory instead of loading object from disk to save time if dk.live and coin in self.model_dictionary: model = self.model_dictionary[coin] - elif not dk.keras: + elif model_type == 'joblib': model = load(dk.data_path / f"{dk.model_filename}_model.joblib") - else: + elif model_type == 'keras': from tensorflow import keras - model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") + elif model_type == 'stable_baselines': + from stable_baselines3.ppo.ppo import PPO + model = PPO.load(dk.data_path / f"{dk.model_filename}_model.zip") if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file(): dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib") diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample.py new file mode 100644 index 000000000..1bafdbb80 --- /dev/null +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample.py @@ -0,0 +1,147 @@ +import logging +from functools import reduce + +import pandas as pd +import talib.abstract as ta +from pandas import DataFrame + +from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_informative_pair + + +logger = logging.getLogger(__name__) + + +class ReinforcementLearningExample(IStrategy): + """ + Test strategy - used for testing freqAI functionalities. + DO not use in production. + """ + + minimal_roi = {"0": 0.1, "240": -1} + + plot_config = { + "main_plot": {}, + "subplots": { + "prediction": {"prediction": {"color": "blue"}}, + "target_roi": { + "target_roi": {"color": "brown"}, + }, + "do_predict": { + "do_predict": {"color": "brown"}, + }, + }, + } + + process_only_new_candles = True + stoploss = -0.05 + use_exit_signal = True + startup_candle_count: int = 300 + can_short = False + + linear_roi_offset = DecimalParameter( + 0.00, 0.02, default=0.005, space="sell", optimize=False, load=True + ) + max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True) + + def informative_pairs(self): + whitelist_pairs = self.dp.current_whitelist() + corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] + informative_pairs = [] + for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: + for pair in whitelist_pairs: + informative_pairs.append((pair, tf)) + for pair in corr_pairs: + if pair in whitelist_pairs: + continue # avoid duplication + informative_pairs.append((pair, tf)) + return informative_pairs + + def populate_any_indicators( + self, pair, df, tf, informative=None, set_generalized_indicators=False + ): + + coin = pair.split('/')[0] + + with self.freqai.lock: + if informative is None: + informative = self.dp.get_pair_dataframe(pair, tf) + + # first loop is automatically duplicating indicators for time periods + for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: + + t = int(t) + informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + + informative[f"%-{coin}pct-change"] = informative["close"].pct_change() + informative[f"%-{coin}raw_volume"] = informative["volume"] + + # Raw price currently necessary for RL models: + informative[f"%-{coin}raw_price"] = informative["close"] + + indicators = [col for col in informative if col.startswith("%")] + # This loop duplicates and shifts all indicators to add a sense of recency to data + for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): + if n == 0: + continue + informative_shift = informative[indicators].shift(n) + informative_shift = informative_shift.add_suffix("_shift-" + str(n)) + informative = pd.concat((informative, informative_shift), axis=1) + + df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) + skip_columns = [ + (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] + ] + df = df.drop(columns=skip_columns) + + # Add generalized indicators here (because in live, it will call this + # function to populate indicators during training). Notice how we ensure not to + # add them multiple times + if set_generalized_indicators: + df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 + df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + + # user adds targets here by prepending them with &- (see convention below) + # If user wishes to use multiple targets, a multioutput prediction model + # needs to be used such as templates/CatboostPredictionMultiModel.py + df["&-action"] = 2 + + return df + + def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: + + self.freqai_info = self.config["freqai"] + + dataframe = self.freqai.start(dataframe, metadata, self) + + return dataframe + + def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + + enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + + if enter_long_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] + ] = (1, "long") + + enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + + if enter_short_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] + ] = (1, "short") + + return df + + def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + if exit_long_conditions: + df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 + + exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + if exit_short_conditions: + df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 + + return df diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 4106f24e0..b6fde9357 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -657,7 +657,7 @@ class IFreqaiModel(ABC): """ @abstractmethod - def fit(self, data_dictionary: Dict[str, Any]) -> Any: + def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: """ Most regressors use the same function names and arguments e.g. user can drop in LGBMRegressor in place of CatBoostRegressor and all data diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py new file mode 100644 index 000000000..acea025c0 --- /dev/null +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py @@ -0,0 +1,162 @@ +# common library + +import numpy as np +from stable_baselines3 import A2C +from stable_baselines3 import DDPG +from stable_baselines3 import PPO +from stable_baselines3 import SAC +from stable_baselines3 import TD3 +from stable_baselines3.common.callbacks import BaseCallback +from stable_baselines3.common.noise import NormalActionNoise +from stable_baselines3.common.noise import OrnsteinUhlenbeckActionNoise +# from stable_baselines3.common.vec_env import DummyVecEnv + +from freqtrade.freqai.prediction_models.RL import config +# from meta.env_stock_trading.env_stock_trading import StockTradingEnv + +# RL models from stable-baselines + + +MODELS = {"a2c": A2C, "ddpg": DDPG, "td3": TD3, "sac": SAC, "ppo": PPO} + + +MODEL_KWARGS = {x: config.__dict__[f"{x.upper()}_PARAMS"] for x in MODELS.keys()} + + +NOISE = { + "normal": NormalActionNoise, + "ornstein_uhlenbeck": OrnsteinUhlenbeckActionNoise, +} + + +class TensorboardCallback(BaseCallback): + """ + Custom callback for plotting additional values in tensorboard. + """ + + def __init__(self, verbose=0): + super(TensorboardCallback, self).__init__(verbose) + + def _on_step(self) -> bool: + try: + self.logger.record(key="train/reward", value=self.locals["rewards"][0]) + except BaseException: + self.logger.record(key="train/reward", value=self.locals["reward"][0]) + return True + + +class RLPrediction_agent: + """Provides implementations for DRL algorithms + Based on: + https://github.com/AI4Finance-Foundation/FinRL-Meta/blob/master/agents/stablebaselines3_models.py + Attributes + ---------- + env: gym environment class + user-defined class + + Methods + ------- + get_model() + setup DRL algorithms + train_model() + train DRL algorithms in a train dataset + and output the trained model + DRL_prediction() + make a prediction in a test dataset and get results + """ + + def __init__(self, env): + self.env = env + + def get_model( + self, + model_name, + policy="MlpPolicy", + policy_kwargs=None, + model_kwargs=None, + verbose=1, + seed=None, + ): + if model_name not in MODELS: + raise NotImplementedError("NotImplementedError") + + if model_kwargs is None: + model_kwargs = MODEL_KWARGS[model_name] + + if "action_noise" in model_kwargs: + n_actions = self.env.action_space.shape[-1] + model_kwargs["action_noise"] = NOISE[model_kwargs["action_noise"]]( + mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions) + ) + print(model_kwargs) + model = MODELS[model_name]( + policy=policy, + env=self.env, + tensorboard_log=f"{config.TENSORBOARD_LOG_DIR}/{model_name}", + verbose=verbose, + policy_kwargs=policy_kwargs, + seed=seed, + **model_kwargs, + ) + return model + + def train_model(self, model, tb_log_name, total_timesteps=5000): + model = model.learn( + total_timesteps=total_timesteps, + tb_log_name=tb_log_name, + callback=TensorboardCallback(), + ) + return model + + @staticmethod + def DRL_prediction(model, environment): + test_env, test_obs = environment.get_sb_env() + """make a prediction""" + account_memory = [] + actions_memory = [] + test_env.reset() + for i in range(len(environment.df.index.unique())): + action, _states = model.predict(test_obs) + # account_memory = test_env.env_method(method_name="save_asset_memory") + # actions_memory = test_env.env_method(method_name="save_action_memory") + test_obs, rewards, dones, info = test_env.step(action) + if i == (len(environment.df.index.unique()) - 2): + account_memory = test_env.env_method(method_name="save_asset_memory") + actions_memory = test_env.env_method(method_name="save_action_memory") + if dones[0]: + print("hit end!") + break + return account_memory[0], actions_memory[0] + + @staticmethod + def DRL_prediction_load_from_file(model_name, environment, cwd): + if model_name not in MODELS: + raise NotImplementedError("NotImplementedError") + try: + # load agent + model = MODELS[model_name].load(cwd) + print("Successfully load model", cwd) + except BaseException: + raise ValueError("Fail to load agent!") + + # test on the testing env + state = environment.reset() + episode_returns = list() # the cumulative_return / initial_account + episode_total_assets = list() + episode_total_assets.append(environment.initial_total_asset) + done = False + while not done: + action = model.predict(state)[0] + state, reward, done, _ = environment.step(action) + + total_asset = ( + environment.cash + + (environment.price_array[environment.time] * environment.stocks).sum() + ) + episode_total_assets.append(total_asset) + episode_return = total_asset / environment.initial_total_asset + episode_returns.append(episode_return) + + print("episode_return", episode_return) + print("Test Finished!") + return episode_total_assets diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py new file mode 100644 index 000000000..5fef7fbed --- /dev/null +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py @@ -0,0 +1,230 @@ +from enum import Enum + +import gym +import matplotlib.pyplot as plt +import numpy as np +from gym import spaces +from gym.utils import seeding + + +class Actions(Enum): + Hold = 0 + Buy = 1 + Sell = 2 + + +class Positions(Enum): + Short = 0 + Long = 1 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + + +class GymAnytrading(gym.Env): + """ + Based on https://github.com/AminHP/gym-anytrading + """ + + metadata = {'render.modes': ['human']} + + def __init__(self, signal_features, prices, window_size, fee=0.0): + assert signal_features.ndim == 2 + + self.seed() + self.signal_features = signal_features + self.prices = prices + self.window_size = window_size + self.fee = fee + self.shape = (window_size, self.signal_features.shape[1]) + + # spaces + self.action_space = spaces.Discrete(len(Actions)) + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick = self.window_size + self._end_tick = len(self.prices) - 1 + self._done = None + self._current_tick = None + self._last_trade_tick = None + self._position = None + self._position_history = None + self._total_reward = None + self._total_profit = None + self._first_rendering = None + self.history = None + + def seed(self, seed=None): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + self._done = False + self._current_tick = self._start_tick + self._last_trade_tick = self._current_tick - 1 + self._position = Positions.Short + self._position_history = (self.window_size * [None]) + [self._position] + self._total_reward = 0. + self._total_profit = 1. # unit + self._first_rendering = True + self.history = {} + return self._get_observation() + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + step_reward = self._calculate_reward(action) + self._total_reward += step_reward + + self._update_profit(action) + + trade = False + if ((action == Actions.Buy.value and self._position == Positions.Short) or + (action == Actions.Sell.value and self._position == Positions.Long)): + trade = True + + if trade: + self._position = self._position.opposite() + self._last_trade_tick = self._current_tick + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + total_reward=self._total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + def _get_observation(self): + return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + def render(self, mode='human'): + def _plot_position(position, tick): + color = None + if position == Positions.Short: + color = 'red' + elif position == Positions.Long: + color = 'green' + if color: + plt.scatter(tick, self.prices[tick], color=color) + + if self._first_rendering: + self._first_rendering = False + plt.cla() + plt.plot(self.prices) + start_position = self._position_history[self._start_tick] + _plot_position(start_position, self._start_tick) + + _plot_position(self._position, self._current_tick) + + plt.suptitle( + "Total Reward: %.6f" % self._total_reward + ' ~ ' + + "Total Profit: %.6f" % self._total_profit + ) + + plt.pause(0.01) + + def render_all(self, mode='human'): + window_ticks = np.arange(len(self._position_history)) + plt.plot(self.prices) + + short_ticks = [] + long_ticks = [] + for i, tick in enumerate(window_ticks): + if self._position_history[i] == Positions.Short: + short_ticks.append(tick) + elif self._position_history[i] == Positions.Long: + long_ticks.append(tick) + + plt.plot(short_ticks, self.prices[short_ticks], 'ro') + plt.plot(long_ticks, self.prices[long_ticks], 'go') + + plt.suptitle( + "Total Reward: %.6f" % self._total_reward + ' ~ ' + + "Total Profit: %.6f" % self._total_profit + ) + + def close(self): + plt.close() + + def save_rendering(self, filepath): + plt.savefig(filepath) + + def pause_rendering(self): + plt.show() + + def _calculate_reward(self, action): + step_reward = 0 + + trade = False + if ((action == Actions.Buy.value and self._position == Positions.Short) or + (action == Actions.Sell.value and self._position == Positions.Long)): + trade = True + + if trade: + current_price = self.prices[self._current_tick] + last_trade_price = self.prices[self._last_trade_tick] + price_diff = current_price - last_trade_price + + if self._position == Positions.Long: + step_reward += price_diff + + return step_reward + + def _update_profit(self, action): + trade = False + if ((action == Actions.Buy.value and self._position == Positions.Short) or + (action == Actions.Sell.value and self._position == Positions.Long)): + trade = True + + if trade or self._done: + current_price = self.prices[self._current_tick] + last_trade_price = self.prices[self._last_trade_tick] + + if self._position == Positions.Long: + shares = (self._total_profit * (1 - self.fee)) / last_trade_price + self._total_profit = (shares * (1 - self.fee)) * current_price + + def max_possible_profit(self): + current_tick = self._start_tick + last_trade_tick = current_tick - 1 + profit = 1. + + while current_tick <= self._end_tick: + position = None + if self.prices[current_tick] < self.prices[current_tick - 1]: + while (current_tick <= self._end_tick and + self.prices[current_tick] < self.prices[current_tick - 1]): + current_tick += 1 + position = Positions.Short + else: + while (current_tick <= self._end_tick and + self.prices[current_tick] >= self.prices[current_tick - 1]): + current_tick += 1 + position = Positions.Long + + if position == Positions.Long: + current_price = self.prices[current_tick - 1] + last_trade_price = self.prices[last_trade_tick] + shares = profit / last_trade_price + profit = shares * current_price + last_trade_tick = current_tick - 1 + print(profit) + + return profit diff --git a/freqtrade/freqai/prediction_models/RL/config.py b/freqtrade/freqai/prediction_models/RL/config.py new file mode 100644 index 000000000..c45eb2387 --- /dev/null +++ b/freqtrade/freqai/prediction_models/RL/config.py @@ -0,0 +1,37 @@ +# dir +DATA_SAVE_DIR = "datasets" +TRAINED_MODEL_DIR = "trained_models" +TENSORBOARD_LOG_DIR = "tensorboard_log" +RESULTS_DIR = "results" + +# Model Parameters +A2C_PARAMS = {"n_steps": 5, "ent_coef": 0.01, "learning_rate": 0.0007} +PPO_PARAMS = { + "n_steps": 2048, + "ent_coef": 0.01, + "learning_rate": 0.00025, + "batch_size": 64, +} +DDPG_PARAMS = {"batch_size": 128, "buffer_size": 50000, "learning_rate": 0.001} +TD3_PARAMS = { + "batch_size": 100, + "buffer_size": 1000000, + "learning_rate": 0.001, +} +SAC_PARAMS = { + "batch_size": 64, + "buffer_size": 100000, + "learning_rate": 0.0001, + "learning_starts": 100, + "ent_coef": "auto_0.1", +} +ERL_PARAMS = { + "learning_rate": 3e-5, + "batch_size": 2048, + "gamma": 0.985, + "seed": 312, + "net_dimension": 512, + "target_step": 5000, + "eval_gap": 30, +} +RLlib_PARAMS = {"lr": 5e-5, "train_batch_size": 500, "gamma": 0.99} diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningModel.py b/freqtrade/freqai/prediction_models/ReinforcementLearningModel.py new file mode 100644 index 000000000..dded1ac3b --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningModel.py @@ -0,0 +1,157 @@ +import logging +from typing import Any, Tuple, Dict +from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading +from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent +from pandas import DataFrame +import pandas as pd +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +import numpy as np +import numpy.typing as npt +from freqtrade.freqai.freqai_interface import IFreqaiModel + +logger = logging.getLogger(__name__) + + +class ReinforcementLearningModel(IFreqaiModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def train( + self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen + ) -> Any: + """ + Filter the training data and train a model to it. Train makes heavy use of the datakitchen + for storing, saving, loading, and analyzing the data. + :param unfiltered_dataframe: Full dataframe for the current training period + :param metadata: pair metadata from strategy. + :returns: + :model: Trained model which can be used to inference (self.predict) + """ + + logger.info("--------------------Starting training " f"{pair} --------------------") + + # filter the features requested by user in the configuration file and elegantly handle NaNs + features_filtered, labels_filtered = dk.filter_features( + unfiltered_dataframe, + dk.training_features_list, + dk.label_list, + training_filter=True, + ) + + data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( + features_filtered, labels_filtered) + dk.fit_labels() # useless for now, but just satiating append methods + + # normalize all data based on train_dataset only + data_dictionary = dk.normalize_data(data_dictionary) + + # optional additional data cleaning/analysis + self.data_cleaning_train(dk) + + logger.info( + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + ) + logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') + + model = self.fit(data_dictionary, pair) + + if pair not in self.dd.historic_predictions: + self.set_initial_historic_predictions( + data_dictionary['train_features'], model, dk, pair) + + self.dd.save_historic_predictions_to_disk() + + logger.info(f"--------------------done training {pair}--------------------") + + return model + + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + + train_df = data_dictionary["train_features"] + + sep = '/' + coin = pair.split(sep, 1)[0] + price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] + price.reset_index(inplace=True, drop=True) + + model_name = 'ppo' + + env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) + + agent_params = self.freqai_info['model_training_parameters'] + total_timesteps = agent_params.get('total_timesteps', 1000) + + agent = RLPrediction_agent(env_instance) + + model = agent.get_model(model_name, model_kwargs=agent_params) + trained_model = agent.train_model(model=model, + tb_log_name=model_name, + total_timesteps=total_timesteps) + print('Training finished!') + + return trained_model + + def predict( + self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False + ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: + """ + Filter the prediction features data and predict with it. + :param: unfiltered_dataframe: Full dataframe for the current backtest period. + :return: + :pred_df: dataframe containing the predictions + :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove + data (NaNs) or felt uncertain about data (PCA and DI index) + """ + + dk.find_features(unfiltered_dataframe) + filtered_dataframe, _ = dk.filter_features( + unfiltered_dataframe, dk.training_features_list, training_filter=False + ) + filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) + dk.data_dictionary["prediction_features"] = filtered_dataframe + + # optional additional data cleaning/analysis + self.data_cleaning_predict(dk, filtered_dataframe) + + pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) + pred_df.fillna(0, inplace=True) + + return (pred_df, dk.do_predict) + + def rl_model_predict(self, dataframe: DataFrame, + dk: FreqaiDataKitchen, model: Any) -> DataFrame: + + output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) + + def _predict(window): + observations = dataframe.iloc[window.index] + res, _ = model.predict(observations, deterministic=True) + return res + + output = output.rolling(window=self.CONV_WIDTH).apply(_predict) + + return output + + def set_initial_historic_predictions( + self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str + ) -> None: + + pred_df = self.rl_model_predict(df, dk, model) + pred_df.fillna(0, inplace=True) + self.dd.historic_predictions[pair] = pred_df + hist_preds_df = self.dd.historic_predictions[pair] + + for label in hist_preds_df.columns: + if hist_preds_df[label].dtype == object: + continue + hist_preds_df[f'{label}_mean'] = 0 + hist_preds_df[f'{label}_std'] = 0 + + hist_preds_df['do_predict'] = 0 + + if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: + hist_preds_df['DI_values'] = 0 + + for return_str in dk.data['extra_returns_per_train']: + hist_preds_df[return_str] = 0 From c1e7db31306665e5090c667d23e4158cacd2b5c3 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 8 Aug 2022 19:04:53 +0300 Subject: [PATCH 002/232] ReinforcementLearningModel --- .../ReinforcementLearning.py | 157 ++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearning.py diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py new file mode 100644 index 000000000..dded1ac3b --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearning.py @@ -0,0 +1,157 @@ +import logging +from typing import Any, Tuple, Dict +from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading +from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent +from pandas import DataFrame +import pandas as pd +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +import numpy as np +import numpy.typing as npt +from freqtrade.freqai.freqai_interface import IFreqaiModel + +logger = logging.getLogger(__name__) + + +class ReinforcementLearningModel(IFreqaiModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def train( + self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen + ) -> Any: + """ + Filter the training data and train a model to it. Train makes heavy use of the datakitchen + for storing, saving, loading, and analyzing the data. + :param unfiltered_dataframe: Full dataframe for the current training period + :param metadata: pair metadata from strategy. + :returns: + :model: Trained model which can be used to inference (self.predict) + """ + + logger.info("--------------------Starting training " f"{pair} --------------------") + + # filter the features requested by user in the configuration file and elegantly handle NaNs + features_filtered, labels_filtered = dk.filter_features( + unfiltered_dataframe, + dk.training_features_list, + dk.label_list, + training_filter=True, + ) + + data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( + features_filtered, labels_filtered) + dk.fit_labels() # useless for now, but just satiating append methods + + # normalize all data based on train_dataset only + data_dictionary = dk.normalize_data(data_dictionary) + + # optional additional data cleaning/analysis + self.data_cleaning_train(dk) + + logger.info( + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + ) + logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') + + model = self.fit(data_dictionary, pair) + + if pair not in self.dd.historic_predictions: + self.set_initial_historic_predictions( + data_dictionary['train_features'], model, dk, pair) + + self.dd.save_historic_predictions_to_disk() + + logger.info(f"--------------------done training {pair}--------------------") + + return model + + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + + train_df = data_dictionary["train_features"] + + sep = '/' + coin = pair.split(sep, 1)[0] + price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] + price.reset_index(inplace=True, drop=True) + + model_name = 'ppo' + + env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) + + agent_params = self.freqai_info['model_training_parameters'] + total_timesteps = agent_params.get('total_timesteps', 1000) + + agent = RLPrediction_agent(env_instance) + + model = agent.get_model(model_name, model_kwargs=agent_params) + trained_model = agent.train_model(model=model, + tb_log_name=model_name, + total_timesteps=total_timesteps) + print('Training finished!') + + return trained_model + + def predict( + self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False + ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: + """ + Filter the prediction features data and predict with it. + :param: unfiltered_dataframe: Full dataframe for the current backtest period. + :return: + :pred_df: dataframe containing the predictions + :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove + data (NaNs) or felt uncertain about data (PCA and DI index) + """ + + dk.find_features(unfiltered_dataframe) + filtered_dataframe, _ = dk.filter_features( + unfiltered_dataframe, dk.training_features_list, training_filter=False + ) + filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) + dk.data_dictionary["prediction_features"] = filtered_dataframe + + # optional additional data cleaning/analysis + self.data_cleaning_predict(dk, filtered_dataframe) + + pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) + pred_df.fillna(0, inplace=True) + + return (pred_df, dk.do_predict) + + def rl_model_predict(self, dataframe: DataFrame, + dk: FreqaiDataKitchen, model: Any) -> DataFrame: + + output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) + + def _predict(window): + observations = dataframe.iloc[window.index] + res, _ = model.predict(observations, deterministic=True) + return res + + output = output.rolling(window=self.CONV_WIDTH).apply(_predict) + + return output + + def set_initial_historic_predictions( + self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str + ) -> None: + + pred_df = self.rl_model_predict(df, dk, model) + pred_df.fillna(0, inplace=True) + self.dd.historic_predictions[pair] = pred_df + hist_preds_df = self.dd.historic_predictions[pair] + + for label in hist_preds_df.columns: + if hist_preds_df[label].dtype == object: + continue + hist_preds_df[f'{label}_mean'] = 0 + hist_preds_df[f'{label}_std'] = 0 + + hist_preds_df['do_predict'] = 0 + + if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: + hist_preds_df['DI_values'] = 0 + + for return_str in dk.data['extra_returns_per_train']: + hist_preds_df[return_str] = 0 From 2f4d73eb068419eebaef685f2d11b7a3841880d9 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 8 Aug 2022 19:10:03 +0300 Subject: [PATCH 003/232] Revert "ReinforcementLearningModel" This reverts commit 4d8dfe1ff1daa47276eda77118ddf39c13512a85. --- .../ReinforcementLearning.py | 157 ------------------ 1 file changed, 157 deletions(-) delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearning.py diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py deleted file mode 100644 index dded1ac3b..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearning.py +++ /dev/null @@ -1,157 +0,0 @@ -import logging -from typing import Any, Tuple, Dict -from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading -from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent -from pandas import DataFrame -import pandas as pd -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -import numpy as np -import numpy.typing as npt -from freqtrade.freqai.freqai_interface import IFreqaiModel - -logger = logging.getLogger(__name__) - - -class ReinforcementLearningModel(IFreqaiModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def train( - self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen - ) -> Any: - """ - Filter the training data and train a model to it. Train makes heavy use of the datakitchen - for storing, saving, loading, and analyzing the data. - :param unfiltered_dataframe: Full dataframe for the current training period - :param metadata: pair metadata from strategy. - :returns: - :model: Trained model which can be used to inference (self.predict) - """ - - logger.info("--------------------Starting training " f"{pair} --------------------") - - # filter the features requested by user in the configuration file and elegantly handle NaNs - features_filtered, labels_filtered = dk.filter_features( - unfiltered_dataframe, - dk.training_features_list, - dk.label_list, - training_filter=True, - ) - - data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( - features_filtered, labels_filtered) - dk.fit_labels() # useless for now, but just satiating append methods - - # normalize all data based on train_dataset only - data_dictionary = dk.normalize_data(data_dictionary) - - # optional additional data cleaning/analysis - self.data_cleaning_train(dk) - - logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" - ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - - model = self.fit(data_dictionary, pair) - - if pair not in self.dd.historic_predictions: - self.set_initial_historic_predictions( - data_dictionary['train_features'], model, dk, pair) - - self.dd.save_historic_predictions_to_disk() - - logger.info(f"--------------------done training {pair}--------------------") - - return model - - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): - - train_df = data_dictionary["train_features"] - - sep = '/' - coin = pair.split(sep, 1)[0] - price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] - price.reset_index(inplace=True, drop=True) - - model_name = 'ppo' - - env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) - - agent_params = self.freqai_info['model_training_parameters'] - total_timesteps = agent_params.get('total_timesteps', 1000) - - agent = RLPrediction_agent(env_instance) - - model = agent.get_model(model_name, model_kwargs=agent_params) - trained_model = agent.train_model(model=model, - tb_log_name=model_name, - total_timesteps=total_timesteps) - print('Training finished!') - - return trained_model - - def predict( - self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False - ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: - """ - Filter the prediction features data and predict with it. - :param: unfiltered_dataframe: Full dataframe for the current backtest period. - :return: - :pred_df: dataframe containing the predictions - :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove - data (NaNs) or felt uncertain about data (PCA and DI index) - """ - - dk.find_features(unfiltered_dataframe) - filtered_dataframe, _ = dk.filter_features( - unfiltered_dataframe, dk.training_features_list, training_filter=False - ) - filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) - dk.data_dictionary["prediction_features"] = filtered_dataframe - - # optional additional data cleaning/analysis - self.data_cleaning_predict(dk, filtered_dataframe) - - pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) - pred_df.fillna(0, inplace=True) - - return (pred_df, dk.do_predict) - - def rl_model_predict(self, dataframe: DataFrame, - dk: FreqaiDataKitchen, model: Any) -> DataFrame: - - output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) - - def _predict(window): - observations = dataframe.iloc[window.index] - res, _ = model.predict(observations, deterministic=True) - return res - - output = output.rolling(window=self.CONV_WIDTH).apply(_predict) - - return output - - def set_initial_historic_predictions( - self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str - ) -> None: - - pred_df = self.rl_model_predict(df, dk, model) - pred_df.fillna(0, inplace=True) - self.dd.historic_predictions[pair] = pred_df - hist_preds_df = self.dd.historic_predictions[pair] - - for label in hist_preds_df.columns: - if hist_preds_df[label].dtype == object: - continue - hist_preds_df[f'{label}_mean'] = 0 - hist_preds_df[f'{label}_std'] = 0 - - hist_preds_df['do_predict'] = 0 - - if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: - hist_preds_df['DI_values'] = 0 - - for return_str in dk.data['extra_returns_per_train']: - hist_preds_df[return_str] = 0 From ec813434f5ab40094c489498fc6eff32aa5cc923 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 8 Aug 2022 19:12:49 +0300 Subject: [PATCH 004/232] ReinforcementLearningModel --- .../ReinforcementLearning.py | 157 ++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearning.py diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py new file mode 100644 index 000000000..dded1ac3b --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearning.py @@ -0,0 +1,157 @@ +import logging +from typing import Any, Tuple, Dict +from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading +from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent +from pandas import DataFrame +import pandas as pd +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +import numpy as np +import numpy.typing as npt +from freqtrade.freqai.freqai_interface import IFreqaiModel + +logger = logging.getLogger(__name__) + + +class ReinforcementLearningModel(IFreqaiModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def train( + self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen + ) -> Any: + """ + Filter the training data and train a model to it. Train makes heavy use of the datakitchen + for storing, saving, loading, and analyzing the data. + :param unfiltered_dataframe: Full dataframe for the current training period + :param metadata: pair metadata from strategy. + :returns: + :model: Trained model which can be used to inference (self.predict) + """ + + logger.info("--------------------Starting training " f"{pair} --------------------") + + # filter the features requested by user in the configuration file and elegantly handle NaNs + features_filtered, labels_filtered = dk.filter_features( + unfiltered_dataframe, + dk.training_features_list, + dk.label_list, + training_filter=True, + ) + + data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( + features_filtered, labels_filtered) + dk.fit_labels() # useless for now, but just satiating append methods + + # normalize all data based on train_dataset only + data_dictionary = dk.normalize_data(data_dictionary) + + # optional additional data cleaning/analysis + self.data_cleaning_train(dk) + + logger.info( + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + ) + logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') + + model = self.fit(data_dictionary, pair) + + if pair not in self.dd.historic_predictions: + self.set_initial_historic_predictions( + data_dictionary['train_features'], model, dk, pair) + + self.dd.save_historic_predictions_to_disk() + + logger.info(f"--------------------done training {pair}--------------------") + + return model + + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + + train_df = data_dictionary["train_features"] + + sep = '/' + coin = pair.split(sep, 1)[0] + price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] + price.reset_index(inplace=True, drop=True) + + model_name = 'ppo' + + env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) + + agent_params = self.freqai_info['model_training_parameters'] + total_timesteps = agent_params.get('total_timesteps', 1000) + + agent = RLPrediction_agent(env_instance) + + model = agent.get_model(model_name, model_kwargs=agent_params) + trained_model = agent.train_model(model=model, + tb_log_name=model_name, + total_timesteps=total_timesteps) + print('Training finished!') + + return trained_model + + def predict( + self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False + ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: + """ + Filter the prediction features data and predict with it. + :param: unfiltered_dataframe: Full dataframe for the current backtest period. + :return: + :pred_df: dataframe containing the predictions + :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove + data (NaNs) or felt uncertain about data (PCA and DI index) + """ + + dk.find_features(unfiltered_dataframe) + filtered_dataframe, _ = dk.filter_features( + unfiltered_dataframe, dk.training_features_list, training_filter=False + ) + filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) + dk.data_dictionary["prediction_features"] = filtered_dataframe + + # optional additional data cleaning/analysis + self.data_cleaning_predict(dk, filtered_dataframe) + + pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) + pred_df.fillna(0, inplace=True) + + return (pred_df, dk.do_predict) + + def rl_model_predict(self, dataframe: DataFrame, + dk: FreqaiDataKitchen, model: Any) -> DataFrame: + + output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) + + def _predict(window): + observations = dataframe.iloc[window.index] + res, _ = model.predict(observations, deterministic=True) + return res + + output = output.rolling(window=self.CONV_WIDTH).apply(_predict) + + return output + + def set_initial_historic_predictions( + self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str + ) -> None: + + pred_df = self.rl_model_predict(df, dk, model) + pred_df.fillna(0, inplace=True) + self.dd.historic_predictions[pair] = pred_df + hist_preds_df = self.dd.historic_predictions[pair] + + for label in hist_preds_df.columns: + if hist_preds_df[label].dtype == object: + continue + hist_preds_df[f'{label}_mean'] = 0 + hist_preds_df[f'{label}_std'] = 0 + + hist_preds_df['do_predict'] = 0 + + if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: + hist_preds_df['DI_values'] = 0 + + for return_str in dk.data['extra_returns_per_train']: + hist_preds_df[return_str] = 0 From 8eeaab27467fa2e0bdc7314bdb888998bbb20af8 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Fri, 12 Aug 2022 20:25:13 +0300 Subject: [PATCH 005/232] add reward function --- .../RL/RLPrediction_agent.py | 89 +-- .../prediction_models/RL/RLPrediction_env.py | 615 +++++++++++++++--- .../ReinforcementLearning.py | 72 +- .../ReinforcementLearningModel.py | 157 ----- 4 files changed, 597 insertions(+), 336 deletions(-) delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningModel.py diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py index acea025c0..2e271bd02 100644 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py @@ -1,17 +1,15 @@ # common library import numpy as np -from stable_baselines3 import A2C -from stable_baselines3 import DDPG -from stable_baselines3 import PPO -from stable_baselines3 import SAC -from stable_baselines3 import TD3 -from stable_baselines3.common.callbacks import BaseCallback -from stable_baselines3.common.noise import NormalActionNoise -from stable_baselines3.common.noise import OrnsteinUhlenbeckActionNoise -# from stable_baselines3.common.vec_env import DummyVecEnv +from stable_baselines3 import A2C, DDPG, PPO, SAC, TD3 +from stable_baselines3.common.callbacks import BaseCallback, EvalCallback +from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise from freqtrade.freqai.prediction_models.RL import config + + +# from stable_baselines3.common.vec_env import DummyVecEnv + # from meta.env_stock_trading.env_stock_trading import StockTradingEnv # RL models from stable-baselines @@ -74,8 +72,10 @@ class RLPrediction_agent: policy="MlpPolicy", policy_kwargs=None, model_kwargs=None, + reward_kwargs=None, + #total_timesteps=None, verbose=1, - seed=None, + seed=None ): if model_name not in MODELS: raise NotImplementedError("NotImplementedError") @@ -95,68 +95,23 @@ class RLPrediction_agent: tensorboard_log=f"{config.TENSORBOARD_LOG_DIR}/{model_name}", verbose=verbose, policy_kwargs=policy_kwargs, - seed=seed, - **model_kwargs, + #model_kwargs=model_kwargs, + #total_timesteps=model_kwargs["total_timesteps"], + seed=seed + #**model_kwargs, ) + + + + return model - def train_model(self, model, tb_log_name, total_timesteps=5000): + def train_model(self, model, tb_log_name, model_kwargs): + model = model.learn( - total_timesteps=total_timesteps, + total_timesteps=model_kwargs["total_timesteps"], tb_log_name=tb_log_name, + #callback=eval_callback, callback=TensorboardCallback(), ) return model - - @staticmethod - def DRL_prediction(model, environment): - test_env, test_obs = environment.get_sb_env() - """make a prediction""" - account_memory = [] - actions_memory = [] - test_env.reset() - for i in range(len(environment.df.index.unique())): - action, _states = model.predict(test_obs) - # account_memory = test_env.env_method(method_name="save_asset_memory") - # actions_memory = test_env.env_method(method_name="save_action_memory") - test_obs, rewards, dones, info = test_env.step(action) - if i == (len(environment.df.index.unique()) - 2): - account_memory = test_env.env_method(method_name="save_asset_memory") - actions_memory = test_env.env_method(method_name="save_action_memory") - if dones[0]: - print("hit end!") - break - return account_memory[0], actions_memory[0] - - @staticmethod - def DRL_prediction_load_from_file(model_name, environment, cwd): - if model_name not in MODELS: - raise NotImplementedError("NotImplementedError") - try: - # load agent - model = MODELS[model_name].load(cwd) - print("Successfully load model", cwd) - except BaseException: - raise ValueError("Fail to load agent!") - - # test on the testing env - state = environment.reset() - episode_returns = list() # the cumulative_return / initial_account - episode_total_assets = list() - episode_total_assets.append(environment.initial_total_asset) - done = False - while not done: - action = model.predict(state)[0] - state, reward, done, _ = environment.step(action) - - total_asset = ( - environment.cash - + (environment.price_array[environment.time] * environment.stocks).sum() - ) - episode_total_assets.append(total_asset) - episode_return = total_asset / environment.initial_total_asset - episode_returns.append(episode_return) - - print("episode_return", episode_return) - print("Test Finished!") - return episode_total_assets diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py index 5fef7fbed..2bc7e868f 100644 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py @@ -1,47 +1,82 @@ +import logging +import random +from collections import deque from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union import gym -import matplotlib.pyplot as plt +import matplotlib.pylab as plt import numpy as np +import pandas as pd from gym import spaces from gym.utils import seeding +from sklearn.decomposition import PCA, KernelPCA +logger = logging.getLogger(__name__) + +# from bokeh.io import output_notebook +# from bokeh.plotting import figure, show +# from bokeh.models import ( +# CustomJS, +# ColumnDataSource, +# NumeralTickFormatter, +# Span, +# HoverTool, +# Range1d, +# DatetimeTickFormatter, +# Scatter, +# Label, LabelSet +# ) + class Actions(Enum): - Hold = 0 - Buy = 1 - Sell = 2 + Short = 0 + Long = 1 + Neutral = 2 + +class Actions_v2(Enum): + Neutral = 0 + Long_buy = 1 + Long_sell = 2 + Short_buy = 3 + Short_sell = 4 class Positions(Enum): Short = 0 Long = 1 + Neutral = 0.5 def opposite(self): return Positions.Short if self == Positions.Long else Positions.Long +def mean_over_std(x): + std = np.std(x, ddof=1) + mean = np.mean(x) + return mean / std if std > 0 else 0 -class GymAnytrading(gym.Env): - """ - Based on https://github.com/AminHP/gym-anytrading - """ +class DEnv(gym.Env): metadata = {'render.modes': ['human']} - def __init__(self, signal_features, prices, window_size, fee=0.0): - assert signal_features.ndim == 2 + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + assert df.ndim == 2 self.seed() - self.signal_features = signal_features + self.df = df + self.signal_features = self.df self.prices = prices self.window_size = window_size - self.fee = fee - self.shape = (window_size, self.signal_features.shape[1]) + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] - # spaces - self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + self.fee=0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1]) + self.action_space = spaces.Discrete(len(Actions_v2)) + self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) # episode self._start_tick = self.window_size @@ -49,29 +84,56 @@ class GymAnytrading(gym.Env): self._done = None self._current_tick = None self._last_trade_tick = None - self._position = None + self._position = Positions.Neutral self._position_history = None - self._total_reward = None + self.total_reward = None self._total_profit = None self._first_rendering = None self.history = None + self.trade_history = [] + + # self.A_t, self.B_t = 0.000639, 0.00001954 + self.r_t_change = 0. + + self.returns_report = [] + def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] + def reset(self): + self._done = False + + if self.starting_point == True: + self._position_history = (self._start_tick* [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + self._current_tick = self._start_tick - self._last_trade_tick = self._current_tick - 1 - self._position = Positions.Short - self._position_history = (self.window_size * [None]) + [self._position] - self._total_reward = 0. + self._last_trade_tick = None + #self._last_trade_tick = self._current_tick - 1 + self._position = Positions.Neutral + + self.total_reward = 0. self._total_profit = 1. # unit self._first_rendering = True self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self.r_t_change = 0. + + self.returns_report = [] + return self._get_observation() + def step(self, action): self._done = False self._current_tick += 1 @@ -79,34 +141,168 @@ class GymAnytrading(gym.Env): if self._current_tick == self._end_tick: self._done = True - step_reward = self._calculate_reward(action) - self._total_reward += step_reward + self.update_portfolio_log_returns(action) self._update_profit(action) + step_reward = self._calculate_reward(action) + self.total_reward += step_reward - trade = False - if ((action == Actions.Buy.value and self._position == Positions.Short) or - (action == Actions.Sell.value and self._position == Positions.Long)): - trade = True - if trade: - self._position = self._position.opposite() + + + + trade_type = None + if self.is_tradesignal_v2(action): # exclude 3 case not trade + # Update position + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + + temp_position = self._position + if action == Actions_v2.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions_v2.Long_buy.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions_v2.Short_buy.value: + self._position = Positions.Short + trade_type = "short" + elif action == Actions_v2.Long_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions_v2.Short_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + else: + print("case not defined") + + # Update last trade tick self._last_trade_tick = self._current_tick + if trade_type != None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + self._position_history.append(self._position) observation = self._get_observation() info = dict( - total_reward=self._total_reward, - total_profit=self._total_profit, - position=self._position.value + tick = self._current_tick, + total_reward = self.total_reward, + total_profit = self._total_profit, + position = self._position.value ) self._update_history(info) return observation, step_reward, self._done, info + + def processState(self, state): + return state.to_numpy() + + def convert_mlp_Policy(self, obs_): + pass + def _get_observation(self): return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + def get_unrealized_profit(self): + + if self._last_trade_tick == None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price)/last_trade_price + elif self._position == Positions.Long: + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price)/last_trade_price + else: + return 0. + + + def is_tradesignal(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) + or (action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def is_tradesignal_v2(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions_v2.Neutral.value and self._position == Positions.Neutral) or + (action == Actions_v2.Short_buy.value and self._position == Positions.Short) or + (action == Actions_v2.Short_sell.value and self._position == Positions.Short) or + (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or + (action == Actions_v2.Short_sell.value and self._position == Positions.Long) or + + (action == Actions_v2.Long_buy.value and self._position == Positions.Long) or + (action == Actions_v2.Long_sell.value and self._position == Positions.Long) or + (action == Actions_v2.Long_buy.value and self._position == Positions.Short) or + (action == Actions_v2.Long_sell.value and self._position == Positions.Short)) + + + + def _is_trade(self, action: Actions): + return ((action == Actions.Long.value and self._position == Positions.Short) or + (action == Actions.Short.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) + ) + + def _is_trade_v2(self, action: Actions_v2): + return ((action == Actions_v2.Long_buy.value and self._position == Positions.Short) or + (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or + (action == Actions_v2.Neutral.value and self._position == Positions.Long) or + (action == Actions_v2.Neutral.value and self._position == Positions.Short) or + + (action == Actions_v2.Neutral.Short_sell and self._position == Positions.Long) or + (action == Actions_v2.Neutral.Long_sell and self._position == Positions.Short) + ) + + + def is_hold(self, action): + return ((action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def is_hold_v2(self, action): + return ((action == Actions_v2.Short_buy.value and self._position == Positions.Short) + or (action == Actions_v2.Long_buy.value and self._position == Positions.Long)) + + + def add_buy_fee(self, price): + return price * (1 + self.fee) + + def add_sell_fee(self, price): + return price / (1 + self.fee) + def _update_history(self, info): if not self.history: self.history = {key: [] for key in info.keys()} @@ -114,7 +310,9 @@ class GymAnytrading(gym.Env): for key, value in info.items(): self.history[key].append(value) + def render(self, mode='human'): + def _plot_position(position, tick): color = None if position == Positions.Short: @@ -122,7 +320,7 @@ class GymAnytrading(gym.Env): elif position == Positions.Long: color = 'green' if color: - plt.scatter(tick, self.prices[tick], color=color) + plt.scatter(tick, self.prices.loc[tick].open, color=color) if self._first_rendering: self._first_rendering = False @@ -131,100 +329,319 @@ class GymAnytrading(gym.Env): start_position = self._position_history[self._start_tick] _plot_position(start_position, self._start_tick) + plt.cla() + plt.plot(self.prices) _plot_position(self._position, self._current_tick) - plt.suptitle( - "Total Reward: %.6f" % self._total_reward + ' ~ ' + - "Total Profit: %.6f" % self._total_profit - ) - + plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) plt.pause(0.01) - def render_all(self, mode='human'): + + def render_all(self): + plt.figure() window_ticks = np.arange(len(self._position_history)) - plt.plot(self.prices) + plt.plot(self.prices['open'], alpha=0.5) short_ticks = [] long_ticks = [] + neutral_ticks = [] for i, tick in enumerate(window_ticks): if self._position_history[i] == Positions.Short: - short_ticks.append(tick) + short_ticks.append(tick - 1) elif self._position_history[i] == Positions.Long: - long_ticks.append(tick) + long_ticks.append(tick - 1) + elif self._position_history[i] == Positions.Neutral: + neutral_ticks.append(tick - 1) - plt.plot(short_ticks, self.prices[short_ticks], 'ro') - plt.plot(long_ticks, self.prices[long_ticks], 'go') + plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, + 'o', color='grey', ms=3, alpha=0.1) + plt.plot(short_ticks, self.prices.loc[short_ticks].open, + 'o', color='r', ms=3, alpha=0.8) + plt.plot(long_ticks, self.prices.loc[long_ticks].open, + 'o', color='g', ms=3, alpha=0.8) - plt.suptitle( - "Total Reward: %.6f" % self._total_reward + ' ~ ' + - "Total Profit: %.6f" % self._total_profit - ) + plt.suptitle("Generalising") + fig = plt.gcf() + fig.set_size_inches(15, 10) + + + + + def close_trade_report(self): + small_trade = 0 + positive_big_trade = 0 + negative_big_trade = 0 + small_profit = 0.003 + for i in self.close_trade_profit: + if i < small_profit and i > -small_profit: + small_trade+=1 + elif i > small_profit: + positive_big_trade += 1 + elif i < -small_profit: + negative_big_trade += 1 + print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") + + + def report(self): + + # get total trade + long_trade = 0 + short_trade = 0 + neutral_trade = 0 + for trade in self.trade_history: + if trade['type'] == 'long': + long_trade += 1 + + elif trade['type'] == 'short': + short_trade += 1 + else: + neutral_trade += 1 + + negative_trade = 0 + positive_trade = 0 + for tr in self.close_trade_profit: + if tr < 0.: + negative_trade += 1 + + if tr > 0.: + positive_trade += 1 + + total_trade_lr = negative_trade+positive_trade + + + total_trade = long_trade + short_trade + sharp_ratio = self.sharpe_ratio() + sharp_log = self.get_sharpe_ratio() + + from tabulate import tabulate + + headers = ["Performance", ""] + performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], + ["Total reward", "{0:.3f}".format(self.total_reward)], + ["Start profit(unit)", "{0:.2f}".format(1.)], + ["End profit(unit)", "{0:.3f}".format(self._total_profit)], + ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], + ["Sharp log", "{0:.3f}".format(sharp_log)], + # ["Sortino ratio", "{0:.2f}".format(0) + '%'], + ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] + ] + tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") + print(tabulation) + + result = { + "Start": "{0:.2f}".format(1.), + "End": "{0:.2f}".format(self._total_profit), + "Sharp": "{0:.3f}".format(sharp_ratio), + "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) + } + return result def close(self): plt.close() + def get_sharpe_ratio(self): + return mean_over_std(self.get_portfolio_log_returns()) + + def save_rendering(self, filepath): plt.savefig(filepath) + def pause_rendering(self): plt.show() + def _calculate_reward(self, action): - step_reward = 0 + # rw = self.transaction_profit_reward(action) + #rw = self.reward_rr_profit_config(action) + rw = self.reward_rr_profit_config_v2(action) + return rw - trade = False - if ((action == Actions.Buy.value and self._position == Positions.Short) or - (action == Actions.Sell.value and self._position == Positions.Long)): - trade = True - - if trade: - current_price = self.prices[self._current_tick] - last_trade_price = self.prices[self._last_trade_tick] - price_diff = current_price - last_trade_price - - if self._position == Positions.Long: - step_reward += price_diff - - return step_reward def _update_profit(self, action): - trade = False - if ((action == Actions.Buy.value and self._position == Positions.Short) or - (action == Actions.Sell.value and self._position == Positions.Long)): - trade = True - - if trade or self._done: - current_price = self.prices[self._current_tick] - last_trade_price = self.prices[self._last_trade_tick] + #if self._is_trade(action) or self._done: + if self._is_trade_v2(action) or self._done: + pnl = self.get_unrealized_profit() if self._position == Positions.Long: - shares = (self._total_profit * (1 - self.fee)) / last_trade_price - self._total_profit = (shares * (1 - self.fee)) * current_price + self._total_profit = self._total_profit + self._total_profit*pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) - def max_possible_profit(self): - current_tick = self._start_tick - last_trade_tick = current_tick - 1 - profit = 1. + if self._position == Positions.Short: + self._total_profit = self._total_profit + self._total_profit*pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) - while current_tick <= self._end_tick: - position = None - if self.prices[current_tick] < self.prices[current_tick - 1]: - while (current_tick <= self._end_tick and - self.prices[current_tick] < self.prices[current_tick - 1]): - current_tick += 1 - position = Positions.Short - else: - while (current_tick <= self._end_tick and - self.prices[current_tick] >= self.prices[current_tick - 1]): - current_tick += 1 - position = Positions.Long - if position == Positions.Long: - current_price = self.prices[current_tick - 1] - last_trade_price = self.prices[last_trade_tick] - shares = profit / last_trade_price - profit = shares * current_price - last_trade_tick = current_tick - 1 - print(profit) + def most_recent_return(self, action): + """ + We support Long, Neutral and Short positions. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + #if action == Actions.Short.value or action == Actions.Neutral.value: + if action == Actions_v2.Short_buy.value or action == Actions_v2.Neutral.value: + current_price = self.add_sell_fee(current_price) - return profit + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_buy_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + #if action == Actions.Long.value or action == Actions.Neutral.value: + if action == Actions_v2.Long_buy.value or action == Actions_v2.Neutral.value: + current_price = self.add_buy_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_sell_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + + def get_trading_log_return(self): + return self.portfolio_log_returns[self._start_tick:] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick-1].open + + + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward + + def get_bnh_log_return(self): + return np.diff(np.log(self.prices['open'][self._start_tick:])) + + + def transaction_profit_reward(self, action): + rw = 0. + + pt = self.prev_price() + pt_1 = self.current_price() + + + if self._position == Positions.Long: + a_t = 1 + elif self._position == Positions.Short: + a_t = -1 + else: + a_t = 0 + + # close long + if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + rw = a_t*(pt_1 - po)/po + #rw = rw*2 + # close short + elif (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: + pt_1 = self.add_buy_fee(self.current_price()) + po = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + rw = a_t*(pt_1 - po)/po + #rw = rw*2 + else: + rw = a_t*(pt_1 - pt)/pt + + return np.clip(rw, 0, 1) + + + + def reward_rr_profit_config_v2(self, action): + rw = 0. + + pt_1 = self.current_price() + + + if len(self.close_trade_profit) > 0: + # long + if self._position == Positions.Long: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + if action == Actions_v2.Short_buy.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 2 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: + rw = 10 * 1 * 1 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Long_sell.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 5 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: + rw = 10 * 1 * 3 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Neutral.value: + if self.close_trade_profit[-1] > 0: + rw = 2 + elif self.close_trade_profit[-1] < 0: + rw = 2 * -1 + + # short + if self._position == Positions.Short: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + if action == Actions_v2.Long_buy.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 2 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 1 * 1 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Short_sell.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 5 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 1 * 3 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Neutral.value: + if self.close_trade_profit[-1] > 0: + rw = 2 + elif self.close_trade_profit[-1] < 0: + rw = 2 * -1 + + return np.clip(rw, 0, 1) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py index dded1ac3b..e208707eb 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearning.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearning.py @@ -1,13 +1,19 @@ import logging -from typing import Any, Tuple, Dict -from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading -from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent -from pandas import DataFrame -import pandas as pd -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from typing import Any, Dict, Tuple + import numpy as np import numpy.typing as npt +import pandas as pd +from pandas import DataFrame +from stable_baselines.common.callbacks import CallbackList, CheckpointCallback, EvalCallback + +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel +from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent +#from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading +from freqtrade.freqai.prediction_models.RL.RLPrediction_env import DEnv +from freqtrade.persistence import Trade + logger = logging.getLogger(__name__) @@ -69,29 +75,69 @@ class ReinforcementLearningModel(IFreqaiModel): def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): train_df = data_dictionary["train_features"] + # train_labels = data_dictionary["train_labels"] + test_df = data_dictionary["test_features"] + # test_labels = data_dictionary["test_labels"] + + # sep = '/' + # coin = pair.split(sep, 1)[0] + # price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] + # price.reset_index(inplace=True, drop=True) + # price = price.to_frame() + price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - sep = '/' - coin = pair.split(sep, 1)[0] - price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] - price.reset_index(inplace=True, drop=True) model_name = 'ppo' - env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) + #env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) agent_params = self.freqai_info['model_training_parameters'] - total_timesteps = agent_params.get('total_timesteps', 1000) + reward_params = self.freqai_info['model_reward_parameters'] + env_instance = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) agent = RLPrediction_agent(env_instance) + # checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./logs/') + # eval_callback = EvalCallback(test_df, best_model_save_path='./models/', + # log_path='./logs/', eval_freq=10000, + # deterministic=True, render=False) + + # #Create the callback list + # callback = CallbackList([checkpoint_callback, eval_callback]) + model = agent.get_model(model_name, model_kwargs=agent_params) trained_model = agent.train_model(model=model, tb_log_name=model_name, - total_timesteps=total_timesteps) + model_kwargs=agent_params) + #eval_callback=callback) + + print('Training finished!') return trained_model + def get_state_info(self, pair): + open_trades = Trade.get_trades(trade_filter=Trade.is_open.is_(True)) + market_side = 0.5 + current_profit = 0 + for trade in open_trades: + if trade.pair == pair: + current_value = trade.open_trade_value + openrate = trade.open_rate + if 'long' in trade.enter_tag: + market_side = 1 + else: + market_side = 0 + current_profit = current_value / openrate -1 + + total_profit = 0 + closed_trades = Trade.get_trades(trade_filter=[Trade.is_open.is_(False), Trade.pair == pair]) + for trade in closed_trades: + total_profit += trade.close_profit + + return market_side, current_profit, total_profit + + def predict( self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningModel.py b/freqtrade/freqai/prediction_models/ReinforcementLearningModel.py deleted file mode 100644 index dded1ac3b..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningModel.py +++ /dev/null @@ -1,157 +0,0 @@ -import logging -from typing import Any, Tuple, Dict -from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading -from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent -from pandas import DataFrame -import pandas as pd -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -import numpy as np -import numpy.typing as npt -from freqtrade.freqai.freqai_interface import IFreqaiModel - -logger = logging.getLogger(__name__) - - -class ReinforcementLearningModel(IFreqaiModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def train( - self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen - ) -> Any: - """ - Filter the training data and train a model to it. Train makes heavy use of the datakitchen - for storing, saving, loading, and analyzing the data. - :param unfiltered_dataframe: Full dataframe for the current training period - :param metadata: pair metadata from strategy. - :returns: - :model: Trained model which can be used to inference (self.predict) - """ - - logger.info("--------------------Starting training " f"{pair} --------------------") - - # filter the features requested by user in the configuration file and elegantly handle NaNs - features_filtered, labels_filtered = dk.filter_features( - unfiltered_dataframe, - dk.training_features_list, - dk.label_list, - training_filter=True, - ) - - data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( - features_filtered, labels_filtered) - dk.fit_labels() # useless for now, but just satiating append methods - - # normalize all data based on train_dataset only - data_dictionary = dk.normalize_data(data_dictionary) - - # optional additional data cleaning/analysis - self.data_cleaning_train(dk) - - logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" - ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - - model = self.fit(data_dictionary, pair) - - if pair not in self.dd.historic_predictions: - self.set_initial_historic_predictions( - data_dictionary['train_features'], model, dk, pair) - - self.dd.save_historic_predictions_to_disk() - - logger.info(f"--------------------done training {pair}--------------------") - - return model - - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): - - train_df = data_dictionary["train_features"] - - sep = '/' - coin = pair.split(sep, 1)[0] - price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] - price.reset_index(inplace=True, drop=True) - - model_name = 'ppo' - - env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) - - agent_params = self.freqai_info['model_training_parameters'] - total_timesteps = agent_params.get('total_timesteps', 1000) - - agent = RLPrediction_agent(env_instance) - - model = agent.get_model(model_name, model_kwargs=agent_params) - trained_model = agent.train_model(model=model, - tb_log_name=model_name, - total_timesteps=total_timesteps) - print('Training finished!') - - return trained_model - - def predict( - self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False - ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: - """ - Filter the prediction features data and predict with it. - :param: unfiltered_dataframe: Full dataframe for the current backtest period. - :return: - :pred_df: dataframe containing the predictions - :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove - data (NaNs) or felt uncertain about data (PCA and DI index) - """ - - dk.find_features(unfiltered_dataframe) - filtered_dataframe, _ = dk.filter_features( - unfiltered_dataframe, dk.training_features_list, training_filter=False - ) - filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) - dk.data_dictionary["prediction_features"] = filtered_dataframe - - # optional additional data cleaning/analysis - self.data_cleaning_predict(dk, filtered_dataframe) - - pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) - pred_df.fillna(0, inplace=True) - - return (pred_df, dk.do_predict) - - def rl_model_predict(self, dataframe: DataFrame, - dk: FreqaiDataKitchen, model: Any) -> DataFrame: - - output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) - - def _predict(window): - observations = dataframe.iloc[window.index] - res, _ = model.predict(observations, deterministic=True) - return res - - output = output.rolling(window=self.CONV_WIDTH).apply(_predict) - - return output - - def set_initial_historic_predictions( - self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str - ) -> None: - - pred_df = self.rl_model_predict(df, dk, model) - pred_df.fillna(0, inplace=True) - self.dd.historic_predictions[pair] = pred_df - hist_preds_df = self.dd.historic_predictions[pair] - - for label in hist_preds_df.columns: - if hist_preds_df[label].dtype == object: - continue - hist_preds_df[f'{label}_mean'] = 0 - hist_preds_df[f'{label}_std'] = 0 - - hist_preds_df['do_predict'] = 0 - - if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: - hist_preds_df['DI_values'] = 0 - - for return_str in dk.data['extra_returns_per_train']: - hist_preds_df[return_str] = 0 From 01232e9a1f8e28e3611e38af3816edb026600767 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Sat, 13 Aug 2022 18:48:58 +0300 Subject: [PATCH 006/232] callback function and TDQN model added --- freqtrade/freqai/data_drawer.py | 4 +- .../RL/RLPrediction_agent_v2.py | 225 ++++++ .../RL/RLPrediction_env_v2.py | 645 ++++++++++++++++++ .../prediction_models/RLPredictionModel.py | 253 +++++++ 4 files changed, 1126 insertions(+), 1 deletion(-) create mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py create mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py create mode 100644 freqtrade/freqai/prediction_models/RLPredictionModel.py diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 5282b4f59..f9d56c4b4 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -473,7 +473,9 @@ class FreqaiDataDrawer: model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") elif model_type == 'stable_baselines': from stable_baselines3.ppo.ppo import PPO - model = PPO.load(dk.data_path / f"{dk.model_filename}_model.zip") + from stable_baselines3 import DQN + #model = PPO.load(dk.data_path / f"{dk.model_filename}_model.zip") + model = DQN.load(dk.data_path / f"best_model.zip") if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file(): dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib") diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py new file mode 100644 index 000000000..e6a931e43 --- /dev/null +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py @@ -0,0 +1,225 @@ +import torch as th +from torch import nn +from typing import Dict, List, Tuple, Type, Optional, Any, Union +import gym +from stable_baselines3.common.type_aliases import GymEnv, Schedule +from stable_baselines3.common.torch_layers import ( + BaseFeaturesExtractor, + FlattenExtractor, + CombinedExtractor +) +from stable_baselines3.common.buffers import ReplayBuffer +from stable_baselines3 import DQN + + +from stable_baselines3.common.policies import BasePolicy +#from stable_baselines3.common.policies import register_policy +from stable_baselines3.dqn.policies import ( + QNetwork, DQNPolicy, MultiInputPolicy, + CnnPolicy, DQNPolicy, MlpPolicy) +import torch + + +def create_mlp_( + input_dim: int, + output_dim: int, + net_arch: List[int], + activation_fn: Type[nn.Module] = nn.ReLU, + squash_output: bool = False, +) -> List[nn.Module]: + dropout = 0.2 + if len(net_arch) > 0: + number_of_neural = net_arch[0] + + modules = [ + nn.Linear(input_dim, number_of_neural), + nn.BatchNorm1d(number_of_neural), + nn.LeakyReLU(), + nn.Dropout(dropout), + nn.Linear(number_of_neural, number_of_neural), + nn.BatchNorm1d(number_of_neural), + nn.LeakyReLU(), + nn.Dropout(dropout), + nn.Linear(number_of_neural, number_of_neural), + nn.BatchNorm1d(number_of_neural), + nn.LeakyReLU(), + nn.Dropout(dropout), + nn.Linear(number_of_neural, number_of_neural), + nn.BatchNorm1d(number_of_neural), + nn.LeakyReLU(), + nn.Dropout(dropout), + nn.Linear(number_of_neural, output_dim) + ] + return modules + +class TDQNetwork(QNetwork): + def __init__(self, + observation_space: gym.spaces.Space, + action_space: gym.spaces.Space, + features_extractor: nn.Module, + features_dim: int, + net_arch: Optional[List[int]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + normalize_images: bool = True + ): + super().__init__( + observation_space=observation_space, + action_space=action_space, + features_extractor=features_extractor, + features_dim=features_dim, + net_arch=net_arch, + activation_fn=activation_fn, + normalize_images=normalize_images + ) + action_dim = self.action_space.n + q_net = create_mlp_(self.features_dim, action_dim, self.net_arch, self.activation_fn) + self.q_net = nn.Sequential(*q_net).apply(self.init_weights) + + def init_weights(self, m): + if type(m) == nn.Linear: + torch.nn.init.kaiming_uniform_(m.weight) + + +class TDQNPolicy(DQNPolicy): + + def __init__( + self, + observation_space: gym.spaces.Space, + action_space: gym.spaces.Space, + lr_schedule: Schedule, + net_arch: Optional[List[int]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, + features_extractor_kwargs: Optional[Dict[str, Any]] = None, + normalize_images: bool = True, + optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, + optimizer_kwargs: Optional[Dict[str, Any]] = None, + ): + super().__init__( + observation_space=observation_space, + action_space=action_space, + lr_schedule=lr_schedule, + net_arch=net_arch, + activation_fn=activation_fn, + features_extractor_class=features_extractor_class, + features_extractor_kwargs=features_extractor_kwargs, + normalize_images=normalize_images, + optimizer_class=optimizer_class, + optimizer_kwargs=optimizer_kwargs + ) + + @staticmethod + def init_weights(module: nn.Module, gain: float = 1) -> None: + """ + Orthogonal initialization (used in PPO and A2C) + """ + if isinstance(module, (nn.Linear, nn.Conv2d)): + nn.init.kaiming_uniform_(module.weight) + if module.bias is not None: + module.bias.data.fill_(0.0) + + def make_q_net(self) -> TDQNetwork: + # Make sure we always have separate networks for features extractors etc + net_args = self._update_features_extractor(self.net_args, features_extractor=None) + return TDQNetwork(**net_args).to(self.device) + + +class TMultiInputPolicy(TDQNPolicy): + def __init__( + self, + observation_space: gym.spaces.Space, + action_space: gym.spaces.Space, + lr_schedule: Schedule, + net_arch: Optional[List[int]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, + features_extractor_kwargs: Optional[Dict[str, Any]] = None, + normalize_images: bool = True, + optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, + optimizer_kwargs: Optional[Dict[str, Any]] = None, + ): + super().__init__( + observation_space, + action_space, + lr_schedule, + net_arch, + activation_fn, + features_extractor_class, + features_extractor_kwargs, + normalize_images, + optimizer_class, + optimizer_kwargs, + ) + + +class TDQN(DQN): + + policy_aliases: Dict[str, Type[BasePolicy]] = { + "MlpPolicy": MlpPolicy, + "CnnPolicy": CnnPolicy, + "TMultiInputPolicy": TMultiInputPolicy, + } + + def __init__( + self, + policy: Union[str, Type[TDQNPolicy]], + env: Union[GymEnv, str], + learning_rate: Union[float, Schedule] = 1e-4, + buffer_size: int = 1000000, # 1e6 + learning_starts: int = 50000, + batch_size: int = 32, + tau: float = 1.0, + gamma: float = 0.99, + train_freq: Union[int, Tuple[int, str]] = 4, + gradient_steps: int = 1, + replay_buffer_class: Optional[ReplayBuffer] = None, + replay_buffer_kwargs: Optional[Dict[str, Any]] = None, + optimize_memory_usage: bool = False, + target_update_interval: int = 10000, + exploration_fraction: float = 0.1, + exploration_initial_eps: float = 1.0, + exploration_final_eps: float = 0.05, + max_grad_norm: float = 10, + tensorboard_log: Optional[str] = None, + create_eval_env: bool = False, + policy_kwargs: Optional[Dict[str, Any]] = None, + verbose: int = 1, + seed: Optional[int] = None, + device: Union[th.device, str] = "auto", + _init_setup_model: bool = True, + ): + + super().__init__( + policy=policy, + env=env, + learning_rate=learning_rate, + buffer_size=buffer_size, + learning_starts=learning_starts, + batch_size=batch_size, + tau=tau, + gamma=gamma, + train_freq=train_freq, + gradient_steps=gradient_steps, + replay_buffer_class=replay_buffer_class, # No action noise + replay_buffer_kwargs=replay_buffer_kwargs, + optimize_memory_usage=optimize_memory_usage, + target_update_interval=target_update_interval, + exploration_fraction=exploration_fraction, + exploration_initial_eps=exploration_initial_eps, + exploration_final_eps=exploration_final_eps, + max_grad_norm=max_grad_norm, + tensorboard_log=tensorboard_log, + create_eval_env=create_eval_env, + policy_kwargs=policy_kwargs, + verbose=verbose, + seed=seed, + device=device, + _init_setup_model=_init_setup_model + ) + + + +# try: +# register_policy("TMultiInputPolicy", TMultiInputPolicy) +# except: +# print("already registered") \ No newline at end of file diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py new file mode 100644 index 000000000..ac91cd200 --- /dev/null +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py @@ -0,0 +1,645 @@ +import gym +from gym import spaces +from gym.utils import seeding +from enum import Enum +from sklearn.decomposition import PCA, KernelPCA +import random +import numpy as np +import pandas as pd +from collections import deque +import matplotlib.pylab as plt +from typing import Dict, List, Tuple, Type, Optional, Any, Union, Callable +import logging + +logger = logging.getLogger(__name__) + +# from bokeh.io import output_notebook +# from bokeh.plotting import figure, show +# from bokeh.models import ( +# CustomJS, +# ColumnDataSource, +# NumeralTickFormatter, +# Span, +# HoverTool, +# Range1d, +# DatetimeTickFormatter, +# Scatter, +# Label, LabelSet +# ) + +class Actions(Enum): + Short = 0 + Long = 1 + Neutral = 2 + +class Actions_v2(Enum): + Neutral = 0 + Long_buy = 1 + Long_sell = 2 + Short_buy = 3 + Short_sell = 4 + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + +def mean_over_std(x): + std = np.std(x, ddof=1) + mean = np.mean(x) + return mean / std if std > 0 else 0 + +class DEnv(gym.Env): + + metadata = {'render.modes': ['human']} + + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + assert df.ndim == 2 + + self.seed() + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee=0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1]) + self.action_space = spaces.Discrete(len(Actions_v2)) + self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick = self.window_size + self._end_tick = len(self.prices) - 1 + self._done = None + self._current_tick = None + self._last_trade_tick = None + self._position = Positions.Neutral + self._position_history = None + self.total_reward = None + self._total_profit = None + self._first_rendering = None + self.history = None + self.trade_history = [] + + # self.A_t, self.B_t = 0.000639, 0.00001954 + self.r_t_change = 0. + + self.returns_report = [] + + + def seed(self, seed=None): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + + def reset(self): + + self._done = False + + if self.starting_point == True: + self._position_history = (self._start_tick* [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + #self._last_trade_tick = self._current_tick - 1 + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self._first_rendering = True + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self.r_t_change = 0. + + self.returns_report = [] + + return self._get_observation() + + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self._calculate_reward(action) + self.total_reward += step_reward + + + + + + trade_type = None + if self.is_tradesignal_v2(action): # exclude 3 case not trade + # Update position + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + + temp_position = self._position + if action == Actions_v2.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions_v2.Long_buy.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions_v2.Short_buy.value: + self._position = Positions.Short + trade_type = "short" + elif action == Actions_v2.Long_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions_v2.Short_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type != None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick = self._current_tick, + total_reward = self.total_reward, + total_profit = self._total_profit, + position = self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + + def processState(self, state): + return state.to_numpy() + + def convert_mlp_Policy(self, obs_): + pass + + def _get_observation(self): + return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + + def get_unrealized_profit(self): + + if self._last_trade_tick == None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price)/last_trade_price + elif self._position == Positions.Long: + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price)/last_trade_price + else: + return 0. + + + def is_tradesignal(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) + or (action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def is_tradesignal_v2(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions_v2.Neutral.value and self._position == Positions.Neutral) or + (action == Actions_v2.Short_buy.value and self._position == Positions.Short) or + (action == Actions_v2.Short_sell.value and self._position == Positions.Short) or + (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or + (action == Actions_v2.Short_sell.value and self._position == Positions.Long) or + + (action == Actions_v2.Long_buy.value and self._position == Positions.Long) or + (action == Actions_v2.Long_sell.value and self._position == Positions.Long) or + (action == Actions_v2.Long_buy.value and self._position == Positions.Short) or + (action == Actions_v2.Long_sell.value and self._position == Positions.Short)) + + + + def _is_trade(self, action: Actions): + return ((action == Actions.Long.value and self._position == Positions.Short) or + (action == Actions.Short.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) + ) + + def _is_trade_v2(self, action: Actions_v2): + return ((action == Actions_v2.Long_buy.value and self._position == Positions.Short) or + (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or + (action == Actions_v2.Neutral.value and self._position == Positions.Long) or + (action == Actions_v2.Neutral.value and self._position == Positions.Short) or + + (action == Actions_v2.Neutral.Short_sell and self._position == Positions.Long) or + (action == Actions_v2.Neutral.Long_sell and self._position == Positions.Short) + ) + + + def is_hold(self, action): + return ((action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def is_hold_v2(self, action): + return ((action == Actions_v2.Short_buy.value and self._position == Positions.Short) + or (action == Actions_v2.Long_buy.value and self._position == Positions.Long)) + + + def add_buy_fee(self, price): + return price * (1 + self.fee) + + def add_sell_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + + def render(self, mode='human'): + + def _plot_position(position, tick): + color = None + if position == Positions.Short: + color = 'red' + elif position == Positions.Long: + color = 'green' + if color: + plt.scatter(tick, self.prices.loc[tick].open, color=color) + + if self._first_rendering: + self._first_rendering = False + plt.cla() + plt.plot(self.prices) + start_position = self._position_history[self._start_tick] + _plot_position(start_position, self._start_tick) + + plt.cla() + plt.plot(self.prices) + _plot_position(self._position, self._current_tick) + + plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) + plt.pause(0.01) + + + def render_all(self): + plt.figure() + window_ticks = np.arange(len(self._position_history)) + plt.plot(self.prices['open'], alpha=0.5) + + short_ticks = [] + long_ticks = [] + neutral_ticks = [] + for i, tick in enumerate(window_ticks): + if self._position_history[i] == Positions.Short: + short_ticks.append(tick - 1) + elif self._position_history[i] == Positions.Long: + long_ticks.append(tick - 1) + elif self._position_history[i] == Positions.Neutral: + neutral_ticks.append(tick - 1) + + plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, + 'o', color='grey', ms=3, alpha=0.1) + plt.plot(short_ticks, self.prices.loc[short_ticks].open, + 'o', color='r', ms=3, alpha=0.8) + plt.plot(long_ticks, self.prices.loc[long_ticks].open, + 'o', color='g', ms=3, alpha=0.8) + + plt.suptitle("Generalising") + fig = plt.gcf() + fig.set_size_inches(15, 10) + + + + + def close_trade_report(self): + small_trade = 0 + positive_big_trade = 0 + negative_big_trade = 0 + small_profit = 0.003 + for i in self.close_trade_profit: + if i < small_profit and i > -small_profit: + small_trade+=1 + elif i > small_profit: + positive_big_trade += 1 + elif i < -small_profit: + negative_big_trade += 1 + print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") + + + def report(self): + + # get total trade + long_trade = 0 + short_trade = 0 + neutral_trade = 0 + for trade in self.trade_history: + if trade['type'] == 'long': + long_trade += 1 + + elif trade['type'] == 'short': + short_trade += 1 + else: + neutral_trade += 1 + + negative_trade = 0 + positive_trade = 0 + for tr in self.close_trade_profit: + if tr < 0.: + negative_trade += 1 + + if tr > 0.: + positive_trade += 1 + + total_trade_lr = negative_trade+positive_trade + + + total_trade = long_trade + short_trade + sharp_ratio = self.sharpe_ratio() + sharp_log = self.get_sharpe_ratio() + + from tabulate import tabulate + + headers = ["Performance", ""] + performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], + ["Total reward", "{0:.3f}".format(self.total_reward)], + ["Start profit(unit)", "{0:.2f}".format(1.)], + ["End profit(unit)", "{0:.3f}".format(self._total_profit)], + ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], + ["Sharp log", "{0:.3f}".format(sharp_log)], + # ["Sortino ratio", "{0:.2f}".format(0) + '%'], + ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] + ] + tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") + print(tabulation) + + result = { + "Start": "{0:.2f}".format(1.), + "End": "{0:.2f}".format(self._total_profit), + "Sharp": "{0:.3f}".format(sharp_ratio), + "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) + } + return result + + def close(self): + plt.close() + + def get_sharpe_ratio(self): + return mean_over_std(self.get_portfolio_log_returns()) + + + def save_rendering(self, filepath): + plt.savefig(filepath) + + + def pause_rendering(self): + plt.show() + + + def _calculate_reward(self, action): + # rw = self.transaction_profit_reward(action) + #rw = self.reward_rr_profit_config(action) + rw = self.reward_rr_profit_config_v2(action) + return rw + + + def _update_profit(self, action): + #if self._is_trade(action) or self._done: + if self._is_trade_v2(action) or self._done: + pnl = self.get_unrealized_profit() + + if self._position == Positions.Long: + self._total_profit = self._total_profit + self._total_profit*pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + if self._position == Positions.Short: + self._total_profit = self._total_profit + self._total_profit*pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + + def most_recent_return(self, action): + """ + We support Long, Neutral and Short positions. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + #if action == Actions.Short.value or action == Actions.Neutral.value: + if action == Actions_v2.Short_buy.value or action == Actions_v2.Neutral.value: + current_price = self.add_sell_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_buy_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + #if action == Actions.Long.value or action == Actions.Neutral.value: + if action == Actions_v2.Long_buy.value or action == Actions_v2.Neutral.value: + current_price = self.add_buy_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_sell_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + + def get_trading_log_return(self): + return self.portfolio_log_returns[self._start_tick:] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick-1].open + + + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward + + def get_bnh_log_return(self): + return np.diff(np.log(self.prices['open'][self._start_tick:])) + + + def transaction_profit_reward(self, action): + rw = 0. + + pt = self.prev_price() + pt_1 = self.current_price() + + + if self._position == Positions.Long: + a_t = 1 + elif self._position == Positions.Short: + a_t = -1 + else: + a_t = 0 + + # close long + if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + rw = a_t*(pt_1 - po)/po + #rw = rw*2 + # close short + elif (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: + pt_1 = self.add_buy_fee(self.current_price()) + po = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + rw = a_t*(pt_1 - po)/po + #rw = rw*2 + else: + rw = a_t*(pt_1 - pt)/pt + + return np.clip(rw, 0, 1) + + + + def reward_rr_profit_config_v2(self, action): + rw = 0. + + pt_1 = self.current_price() + + + if len(self.close_trade_profit) > 0: + # long + if self._position == Positions.Long: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + if action == Actions_v2.Short_buy.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 2 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: + rw = 10 * 1 * 1 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Long_sell.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 5 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: + rw = 10 * 1 * 3 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Neutral.value: + if self.close_trade_profit[-1] > 0: + rw = 2 + elif self.close_trade_profit[-1] < 0: + rw = 2 * -1 + + # short + if self._position == Positions.Short: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + if action == Actions_v2.Long_buy.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 2 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 1 * 1 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Short_sell.value: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + rw = 10 * 5 + elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 1 * 3 + elif self.close_trade_profit[-1] < 0: + rw = 10 * -1 + elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = 10 * 3 * -1 + + if action == Actions_v2.Neutral.value: + if self.close_trade_profit[-1] > 0: + rw = 2 + elif self.close_trade_profit[-1] < 0: + rw = 2 * -1 + + return np.clip(rw, 0, 1) \ No newline at end of file diff --git a/freqtrade/freqai/prediction_models/RLPredictionModel.py b/freqtrade/freqai/prediction_models/RLPredictionModel.py new file mode 100644 index 000000000..b6903dd43 --- /dev/null +++ b/freqtrade/freqai/prediction_models/RLPredictionModel.py @@ -0,0 +1,253 @@ +import logging +from typing import Any, Dict, Tuple +#from matplotlib.colors import DivergingNorm + +from pandas import DataFrame +import pandas as pd +from freqtrade.exceptions import OperationalException +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +import tensorflow as tf +from freqtrade.freqai.prediction_models.BaseTensorFlowModel import BaseTensorFlowModel +from freqtrade.freqai.freqai_interface import IFreqaiModel +from tensorflow.keras.layers import Input, Conv1D, Dense, MaxPooling1D, Flatten, Dropout +from tensorflow.keras.models import Model +import numpy as np +import copy + +from keras.layers import * +import random + + +logger = logging.getLogger(__name__) + +# tf.config.run_functions_eagerly(True) +# tf.data.experimental.enable_debug_mode() + +import os +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +os.environ["CUDA_VISIBLE_DEVICES"] = "-1" + +MAX_EPOCHS = 10 +LOOKBACK = 8 + + +class RLPredictionModel_v2(IFreqaiModel): + """ + User created prediction model. The class needs to override three necessary + functions, predict(), fit(). + """ + + def fit(self, data_dictionary: Dict, pair) -> Any: + """ + User sets up the training and test data to fit their desired model here + :params: + :data_dictionary: the dictionary constructed by DataHandler to hold + all the training and test data/labels. + """ + + train_df = data_dictionary["train_features"] + train_labels = data_dictionary["train_labels"] + test_df = data_dictionary["test_features"] + test_labels = data_dictionary["test_labels"] + n_labels = len(train_labels.columns) + if n_labels > 1: + raise OperationalException( + "Neural Net not yet configured for multi-targets. Please " + " reduce number of targets to 1 in strategy." + ) + + n_features = len(data_dictionary["train_features"].columns) + BATCH_SIZE = self.freqai_info.get("batch_size", 64) + input_dims = [BATCH_SIZE, self.CONV_WIDTH, n_features] + + + w1 = WindowGenerator( + input_width=self.CONV_WIDTH, + label_width=1, + shift=1, + train_df=train_df, + val_df=test_df, + train_labels=train_labels, + val_labels=test_labels, + batch_size=BATCH_SIZE, + ) + + + # train_agent() + #pair = self.dd.historical_data[pair] + #gym_env = FreqtradeEnv(data=train_df, prices=0.01, windows_size=100, pair=pair, stake_amount=100) + + # sep = '/' + # coin = pair.split(sep, 1)[0] + + # # df1 = train_df.filter(regex='price') + # # df2 = df1.filter(regex='raw') + + # # df3 = df2.filter(regex=f"{coin}") + # # print(df3) + + # price = train_df[f"%-{coin}raw_price_5m"] + # gym_env = RLPrediction_GymAnytrading(signal_features=train_df, prices=price, window_size=100) + # sac = RLPrediction_Agent(gym_env) + + # print(sac) + + # return 0 + + + + return model + + def predict( + self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first=True + ) -> Tuple[DataFrame, DataFrame]: + """ + Filter the prediction features data and predict with it. + :param: unfiltered_dataframe: Full dataframe for the current backtest period. + :return: + :predictions: np.array of predictions + :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove + data (NaNs) or felt uncertain about data (PCA and DI index) + """ + + dk.find_features(unfiltered_dataframe) + filtered_dataframe, _ = dk.filter_features( + unfiltered_dataframe, dk.training_features_list, training_filter=False + ) + filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) + dk.data_dictionary["prediction_features"] = filtered_dataframe + + # optional additional data cleaning/analysis + self.data_cleaning_predict(dk, filtered_dataframe) + + if first: + full_df = dk.data_dictionary["prediction_features"] + + w1 = WindowGenerator( + input_width=self.CONV_WIDTH, + label_width=1, + shift=1, + test_df=full_df, + batch_size=len(full_df), + ) + + predictions = self.model.predict(w1.inference) + len_diff = len(dk.do_predict) - len(predictions) + if len_diff > 0: + dk.do_predict = dk.do_predict[len_diff:] + + else: + data = dk.data_dictionary["prediction_features"] + data = tf.expand_dims(data, axis=0) + predictions = self.model(data, training=False) + + predictions = predictions[:, 0] + pred_df = DataFrame(predictions, columns=dk.label_list) + + pred_df = dk.denormalize_labels_from_metadata(pred_df) + + return (pred_df, np.ones(len(pred_df))) + + + def set_initial_historic_predictions( + self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str + ) -> None: + + pass + # w1 = WindowGenerator( + # input_width=self.CONV_WIDTH, label_width=1, shift=1, test_df=df, batch_size=len(df) + # ) + + # trained_predictions = model.predict(w1.inference) + # #trained_predictions = trained_predictions[:, 0, 0] + # trained_predictions = trained_predictions[:, 0] + + # n_lost_points = len(df) - len(trained_predictions) + # pred_df = DataFrame(trained_predictions, columns=dk.label_list) + # zeros_df = DataFrame(np.zeros((n_lost_points, len(dk.label_list))), columns=dk.label_list) + # pred_df = pd.concat([zeros_df, pred_df], axis=0) + + # pred_df = dk.denormalize_labels_from_metadata(pred_df) + + + + # self.dd.historic_predictions[pair] = DataFrame() + # self.dd.historic_predictions[pair] = copy.deepcopy(pred_df) + + +class WindowGenerator: + def __init__( + self, + input_width, + label_width, + shift, + train_df=None, + val_df=None, + test_df=None, + train_labels=None, + val_labels=None, + test_labels=None, + batch_size=None, + ): + # Store the raw data. + self.train_df = train_df + self.val_df = val_df + self.test_df = test_df + self.train_labels = train_labels + self.val_labels = val_labels + self.test_labels = test_labels + self.batch_size = batch_size + self.input_width = input_width + self.label_width = label_width + self.shift = shift + + self.total_window_size = input_width + shift + + self.input_slice = slice(0, input_width) + self.input_indices = np.arange(self.total_window_size)[self.input_slice] + + def make_dataset(self, data, labels=None): + data = np.array(data, dtype=np.float32) + if labels is not None: + labels = np.array(labels, dtype=np.float32) + ds = tf.keras.preprocessing.timeseries_dataset_from_array( + data=data, + targets=labels, + sequence_length=self.total_window_size, + sequence_stride=1, + sampling_rate=1, + shuffle=False, + batch_size=self.batch_size, + ) + + return ds + + @property + def train(self): + + + + return self.make_dataset(self.train_df, self.train_labels) + + @property + def val(self): + return self.make_dataset(self.val_df, self.val_labels) + + @property + def test(self): + return self.make_dataset(self.test_df, self.test_labels) + + @property + def inference(self): + return self.make_dataset(self.test_df) + + @property + def example(self): + """Get and cache an example batch of `inputs, labels` for plotting.""" + result = getattr(self, "_example", None) + if result is None: + # No example batch was found, so get one from the `.train` dataset + result = next(iter(self.train)) + # And cache it for next time + self._example = result + return result \ No newline at end of file From cd3fe44424357d80e1e944131298a332c6afb30d Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Sat, 13 Aug 2022 20:05:21 +0300 Subject: [PATCH 007/232] callback function and TDQN model added --- .../ReinforcementLearning.py | 143 ++++++++++++++---- 1 file changed, 116 insertions(+), 27 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py index e208707eb..5783baba8 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearning.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearning.py @@ -5,14 +5,23 @@ import numpy as np import numpy.typing as npt import pandas as pd from pandas import DataFrame -from stable_baselines.common.callbacks import CallbackList, CheckpointCallback, EvalCallback from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent +from freqtrade.freqai.prediction_models.RL.RLPrediction_agent_v2 import TDQN #from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading from freqtrade.freqai.prediction_models.RL.RLPrediction_env import DEnv from freqtrade.persistence import Trade +from stable_baselines3.common.vec_env import SubprocVecEnv +from stable_baselines3.common.monitor import Monitor + +import torch as th +from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, StopTrainingOnRewardThreshold +from stable_baselines3.common.buffers import ReplayBuffer +from stable_baselines3 import PPO + + logger = logging.getLogger(__name__) @@ -74,47 +83,127 @@ class ReinforcementLearningModel(IFreqaiModel): def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): - train_df = data_dictionary["train_features"] - # train_labels = data_dictionary["train_labels"] - test_df = data_dictionary["test_features"] - # test_labels = data_dictionary["test_labels"] + # train_df = data_dictionary["train_features"] + # # train_labels = data_dictionary["train_labels"] + # test_df = data_dictionary["test_features"] + # # test_labels = data_dictionary["test_labels"] - # sep = '/' - # coin = pair.split(sep, 1)[0] - # price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] - # price.reset_index(inplace=True, drop=True) - # price = price.to_frame() - price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + # # sep = '/' + # # coin = pair.split(sep, 1)[0] + # # price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] + # # price.reset_index(inplace=True, drop=True) + # # price = price.to_frame() + # price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + # price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) + + # #train_env = GymAnytrading(train_df, price, self.CONV_WIDTH) + + # agent_params = self.freqai_info['model_training_parameters'] + # reward_params = self.freqai_info['model_reward_parameters'] - model_name = 'ppo' + # train_env = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + # #eval_env = DEnv(df=test_df, prices=price_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + + # #env_instance = SubprocVecEnv([DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params)]) + # #train_env.reset() + # #eval_env.reset() + + # # model + + # #policy_kwargs = dict(net_arch=[512, 512, 512]) + # policy_kwargs = dict(activation_fn=th.nn.Tanh, + # net_arch=[256, 256, 256]) + # agent = RLPrediction_agent(train_env) + # #eval_agent = RLPrediction_agent(eval_env) - #env_instance = GymAnytrading(train_df, price, self.CONV_WIDTH) + # # PPO + # model_name = 'ppo' + # model = agent.get_model(model_name, model_kwargs=agent_params, policy_kwargs=policy_kwargs) + # trained_model = agent.train_model(model=model, + # tb_log_name=model_name, + # model_kwargs=agent_params, + # train_df=train_df, + # test_df=test_df, + # price=price, + # price_test=price_test, + # window_size=self.CONV_WIDTH) + + + # # best_model = eval_agent.train_model(model=model, + # # tb_log_name=model_name, + # # model_kwargs=agent_params, + # # eval=eval_env) + + + # # TDQN + # # model_name = 'TDQN' + # # model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log='./tensorboard_log/', + # # learning_rate=agent_params["learning_rate"], gamma=0.9, + # # target_update_interval=5000, buffer_size=50000, + # # exploration_initial_eps=1, exploration_final_eps=0.1, + # # replay_buffer_class=ReplayBuffer + # # ) + + # # trained_model = agent.train_model(model=model, + # # tb_log_name=model_name, + # # model_kwargs=agent_params) + # #model.learn( + # # total_timesteps=5000, + # # callback=callback + # # ) agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] - env_instance = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - agent = RLPrediction_agent(env_instance) + # price data for model training and evaluation + price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) + + # environments + train_env = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + eval = DEnv(df=test_df, prices=price_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + eval_env = Monitor(eval, ".") + eval_env.reset() - # checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./logs/') - # eval_callback = EvalCallback(test_df, best_model_save_path='./models/', - # log_path='./logs/', eval_freq=10000, - # deterministic=True, render=False) + # this should be in config - TODO + agent_type = 'tdqn' - # #Create the callback list - # callback = CallbackList([checkpoint_callback, eval_callback]) + path = self.dk.data_path + eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/{agent_type}/logs/", eval_freq=10000, + deterministic=True, render=False) - model = agent.get_model(model_name, model_kwargs=agent_params) - trained_model = agent.train_model(model=model, - tb_log_name=model_name, - model_kwargs=agent_params) - #eval_callback=callback) + # model arch + policy_kwargs = dict(activation_fn=th.nn.Tanh, + net_arch=[512, 512, 512]) + + + if agent_type == 'tdqn': + model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/{agent_type}/tensorboard/", + learning_rate=0.00025, gamma=0.9, + target_update_interval=5000, buffer_size=50000, + exploration_initial_eps=1, exploration_final_eps=0.1, + replay_buffer_class=ReplayBuffer + ) + elif agent_type == 'ppo': + model = PPO('MultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/{agent_type}/tensorboard/", + learning_rate=0.00025, gamma=0.9 + ) + + model.learn( + total_timesteps=agent_params["total_timesteps"], + callback=eval_callback + ) print('Training finished!') - return trained_model + return model + + def get_state_info(self, pair): open_trades = Trade.get_trades(trade_filter=Trade.is_open.is_(True)) From 9b895500b306cf04c608931ae22e244901ba4fdf Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Sun, 14 Aug 2022 16:24:20 +0300 Subject: [PATCH 008/232] initial commit - new dev branch --- .../RL/RLPrediction_agent.py | 30 +- ...agent_v2.py => RLPrediction_agent_TDQN.py} | 59 +- .../RL/RLPrediction_env_TDQN_3ac.py | 513 ++++++++++++++ ...on_env.py => RLPrediction_env_TDQN_5ac.py} | 416 +++++------ .../RL/RLPrediction_env_v2.py | 645 ------------------ .../ReinforcementLearning.py | 67 +- 6 files changed, 810 insertions(+), 920 deletions(-) rename freqtrade/freqai/prediction_models/RL/{RLPrediction_agent_v2.py => RLPrediction_agent_TDQN.py} (93%) create mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py rename freqtrade/freqai/prediction_models/RL/{RLPrediction_env.py => RLPrediction_env_TDQN_5ac.py} (55%) delete mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py index 2e271bd02..26b31f6e9 100644 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py @@ -1,11 +1,15 @@ # common library +import gym import numpy as np from stable_baselines3 import A2C, DDPG, PPO, SAC, TD3 -from stable_baselines3.common.callbacks import BaseCallback, EvalCallback +from stable_baselines3.common.callbacks import (BaseCallback, CallbackList, CheckpointCallback, + EvalCallback, StopTrainingOnRewardThreshold) from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise from freqtrade.freqai.prediction_models.RL import config +#from freqtrade.freqai.prediction_models.RL.RLPrediction_agent_v2 import TDQN +from freqtrade.freqai.prediction_models.RL.RLPrediction_env import DEnv # from stable_baselines3.common.vec_env import DummyVecEnv @@ -106,12 +110,30 @@ class RLPrediction_agent: return model - def train_model(self, model, tb_log_name, model_kwargs): + def train_model(self, model, tb_log_name, model_kwargs, train_df, test_df, price, price_test, window_size): + + + agent_params = self.freqai_info['model_training_parameters'] + reward_params = self.freqai_info['model_reward_parameters'] + train_env = DEnv(df=train_df, prices=price, window_size=window_size, reward_kwargs=reward_params) + eval_env = DEnv(df=test_df, prices=price_test, window_size=window_size, reward_kwargs=reward_params) + + # checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./logs/', + # name_prefix='rl_model') + + checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./logs/') + + eval_callback = EvalCallback(eval_env, best_model_save_path='./logs/best_model', log_path='./logs/results', eval_freq=500) + #callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=-200, verbose=1) + + # Create the callback list + callback = CallbackList([checkpoint_callback, eval_callback]) + model = model.learn( total_timesteps=model_kwargs["total_timesteps"], tb_log_name=tb_log_name, - #callback=eval_callback, - callback=TensorboardCallback(), + callback=callback, + #callback=TensorboardCallback(), ) return model diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_TDQN.py similarity index 93% rename from freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py rename to freqtrade/freqai/prediction_models/RL/RLPrediction_agent_TDQN.py index e6a931e43..0aa3512a1 100644 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_v2.py +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_TDQN.py @@ -1,23 +1,18 @@ -import torch as th -from torch import nn -from typing import Dict, List, Tuple, Type, Optional, Any, Union +from typing import Any, Dict, List, Optional, Tuple, Type, Union + import gym -from stable_baselines3.common.type_aliases import GymEnv, Schedule -from stable_baselines3.common.torch_layers import ( - BaseFeaturesExtractor, - FlattenExtractor, - CombinedExtractor -) -from stable_baselines3.common.buffers import ReplayBuffer -from stable_baselines3 import DQN - - -from stable_baselines3.common.policies import BasePolicy -#from stable_baselines3.common.policies import register_policy -from stable_baselines3.dqn.policies import ( - QNetwork, DQNPolicy, MultiInputPolicy, - CnnPolicy, DQNPolicy, MlpPolicy) import torch +import torch as th +from stable_baselines3 import DQN +from stable_baselines3.common.buffers import ReplayBuffer +from stable_baselines3.common.policies import BasePolicy +from stable_baselines3.common.torch_layers import (BaseFeaturesExtractor, CombinedExtractor, + FlattenExtractor) +from stable_baselines3.common.type_aliases import GymEnv, Schedule +#from stable_baselines3.common.policies import register_policy +from stable_baselines3.dqn.policies import (CnnPolicy, DQNPolicy, MlpPolicy, MultiInputPolicy, + QNetwork) +from torch import nn def create_mlp_( @@ -30,7 +25,7 @@ def create_mlp_( dropout = 0.2 if len(net_arch) > 0: number_of_neural = net_arch[0] - + modules = [ nn.Linear(input_dim, number_of_neural), nn.BatchNorm1d(number_of_neural), @@ -69,19 +64,19 @@ class TDQNetwork(QNetwork): features_dim=features_dim, net_arch=net_arch, activation_fn=activation_fn, - normalize_images=normalize_images + normalize_images=normalize_images ) action_dim = self.action_space.n q_net = create_mlp_(self.features_dim, action_dim, self.net_arch, self.activation_fn) self.q_net = nn.Sequential(*q_net).apply(self.init_weights) - + def init_weights(self, m): if type(m) == nn.Linear: torch.nn.init.kaiming_uniform_(m.weight) - - + + class TDQNPolicy(DQNPolicy): - + def __init__( self, observation_space: gym.spaces.Space, @@ -107,7 +102,7 @@ class TDQNPolicy(DQNPolicy): optimizer_class=optimizer_class, optimizer_kwargs=optimizer_kwargs ) - + @staticmethod def init_weights(module: nn.Module, gain: float = 1) -> None: """ @@ -117,13 +112,13 @@ class TDQNPolicy(DQNPolicy): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: module.bias.data.fill_(0.0) - + def make_q_net(self) -> TDQNetwork: # Make sure we always have separate networks for features extractors etc net_args = self._update_features_extractor(self.net_args, features_extractor=None) return TDQNetwork(**net_args).to(self.device) - + class TMultiInputPolicy(TDQNPolicy): def __init__( self, @@ -150,8 +145,8 @@ class TMultiInputPolicy(TDQNPolicy): optimizer_class, optimizer_kwargs, ) - - + + class TDQN(DQN): policy_aliases: Dict[str, Type[BasePolicy]] = { @@ -216,10 +211,10 @@ class TDQN(DQN): device=device, _init_setup_model=_init_setup_model ) - - + + # try: # register_policy("TMultiInputPolicy", TMultiInputPolicy) # except: -# print("already registered") \ No newline at end of file +# print("already registered") diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py new file mode 100644 index 000000000..184ec57ec --- /dev/null +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py @@ -0,0 +1,513 @@ +import logging +import random +from collections import deque +from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union + +import gym +import matplotlib.pylab as plt +import numpy as np +import pandas as pd +from gym import spaces +from gym.utils import seeding + +logger = logging.getLogger(__name__) + +class Actions(Enum): + Short = 0 + Long = 1 + Neutral = 2 + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + +def mean_over_std(x): + std = np.std(x, ddof=1) + mean = np.mean(x) + return mean / std if std > 0 else 0 + +class DEnv(gym.Env): + + metadata = {'render.modes': ['human']} + + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + assert df.ndim == 2 + + self.seed() + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee=0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1]) + self.action_space = spaces.Discrete(len(Actions)) + self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick = self.window_size + self._end_tick = len(self.prices) - 1 + self._done = None + self._current_tick = None + self._last_trade_tick = None + self._position = Positions.Neutral + self._position_history = None + self.total_reward = None + self._total_profit = None + self._first_rendering = None + self.history = None + self.trade_history = [] + + # self.A_t, self.B_t = 0.000639, 0.00001954 + self.r_t_change = 0. + + self.returns_report = [] + + def seed(self, seed=None): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + + self._done = False + + if self.starting_point == True: + self._position_history = (self._start_tick* [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + #self._last_trade_tick = self._current_tick - 1 + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self._first_rendering = True + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self.r_t_change = 0. + + self.returns_report = [] + + return self._get_observation() + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self._calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): # exclude 3 case not trade + # Update position + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + temp_position = self._position + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type != None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick = self._current_tick, + total_reward = self.total_reward, + total_profit = self._total_profit, + position = self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + # def processState(self, state): + # return state.to_numpy() + + # def convert_mlp_Policy(self, obs_): + # pass + + def _get_observation(self): + return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + def get_unrealized_profit(self): + + if self._last_trade_tick == None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price)/last_trade_price + elif self._position == Positions.Long: + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price)/last_trade_price + else: + return 0. + + def is_tradesignal(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) + or (action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def _is_trade(self, action: Actions): + return ((action == Actions.Long.value and self._position == Positions.Short) or + (action == Actions.Short.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) + ) + + def is_hold(self, action): + return ((action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def add_buy_fee(self, price): + return price * (1 + self.fee) + + def add_sell_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + + # def render(self, mode='human'): + # def _plot_position(position, tick): + # color = None + # if position == Positions.Short: + # color = 'red' + # elif position == Positions.Long: + # color = 'green' + # if color: + # plt.scatter(tick, self.prices.loc[tick].open, color=color) + # if self._first_rendering: + # self._first_rendering = False + # plt.cla() + # plt.plot(self.prices) + # start_position = self._position_history[self._start_tick] + # _plot_position(start_position, self._start_tick) + # plt.cla() + # plt.plot(self.prices) + # _plot_position(self._position, self._current_tick) + # plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) + # plt.pause(0.01) + + # def render_all(self): + # plt.figure() + # window_ticks = np.arange(len(self._position_history)) + # plt.plot(self.prices['open'], alpha=0.5) + # short_ticks = [] + # long_ticks = [] + # neutral_ticks = [] + # for i, tick in enumerate(window_ticks): + # if self._position_history[i] == Positions.Short: + # short_ticks.append(tick - 1) + # elif self._position_history[i] == Positions.Long: + # long_ticks.append(tick - 1) + # elif self._position_history[i] == Positions.Neutral: + # neutral_ticks.append(tick - 1) + # plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, + # 'o', color='grey', ms=3, alpha=0.1) + # plt.plot(short_ticks, self.prices.loc[short_ticks].open, + # 'o', color='r', ms=3, alpha=0.8) + # plt.plot(long_ticks, self.prices.loc[long_ticks].open, + # 'o', color='g', ms=3, alpha=0.8) + # plt.suptitle("Generalising") + # fig = plt.gcf() + # fig.set_size_inches(15, 10) + + # def close_trade_report(self): + # small_trade = 0 + # positive_big_trade = 0 + # negative_big_trade = 0 + # small_profit = 0.003 + # for i in self.close_trade_profit: + # if i < small_profit and i > -small_profit: + # small_trade+=1 + # elif i > small_profit: + # positive_big_trade += 1 + # elif i < -small_profit: + # negative_big_trade += 1 + # print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") + + # def report(self): + # # get total trade + # long_trade = 0 + # short_trade = 0 + # neutral_trade = 0 + # for trade in self.trade_history: + # if trade['type'] == 'long': + # long_trade += 1 + # elif trade['type'] == 'short': + # short_trade += 1 + # else: + # neutral_trade += 1 + # negative_trade = 0 + # positive_trade = 0 + # for tr in self.close_trade_profit: + # if tr < 0.: + # negative_trade += 1 + # if tr > 0.: + # positive_trade += 1 + # total_trade_lr = negative_trade+positive_trade + # total_trade = long_trade + short_trade + # sharp_ratio = self.sharpe_ratio() + # sharp_log = self.get_sharpe_ratio() + # from tabulate import tabulate + # headers = ["Performance", ""] + # performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], + # ["Total reward", "{0:.3f}".format(self.total_reward)], + # ["Start profit(unit)", "{0:.2f}".format(1.)], + # ["End profit(unit)", "{0:.3f}".format(self._total_profit)], + # ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], + # ["Sharp log", "{0:.3f}".format(sharp_log)], + # # ["Sortino ratio", "{0:.2f}".format(0) + '%'], + # ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] + # ] + # tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") + # print(tabulation) + # result = { + # "Start": "{0:.2f}".format(1.), + # "End": "{0:.2f}".format(self._total_profit), + # "Sharp": "{0:.3f}".format(sharp_ratio), + # "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) + # } + # return result + + # def close(self): + # plt.close() + + def get_sharpe_ratio(self): + return mean_over_std(self.get_portfolio_log_returns()) + + # def save_rendering(self, filepath): + # plt.savefig(filepath) + + # def pause_rendering(self): + # plt.show() + + def _calculate_reward(self, action): + # rw = self.transaction_profit_reward(action) + #rw = self.reward_rr_profit_config(action) + rw = self.profit_only_when_close_reward(action) + #rw = self.profit_only_when_close_reward_aim(action) + return rw + + def _update_profit(self, action): + if self._is_trade(action) or self._done: + pnl = self.get_unrealized_profit() + + if self._position == Positions.Long: + self._total_profit = self._total_profit + self._total_profit*pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + if self._position == Positions.Short: + self._total_profit = self._total_profit + self._total_profit*pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + def most_recent_return(self, action): + """ + We support Long, Neutral and Short positions. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + if action == Actions.Short.value or action == Actions.Neutral.value: + current_price = self.add_sell_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_buy_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + if action == Actions.Long.value or action == Actions.Neutral.value: + current_price = self.add_buy_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_sell_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + # def get_trading_log_return(self): + # return self.portfolio_log_returns[self._start_tick:] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick-1].open + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward + + # def get_bnh_log_return(self): + # return np.diff(np.log(self.prices['open'][self._start_tick:])) + + def transaction_profit_reward(self, action): + rw = 0. + + pt = self.prev_price() + pt_1 = self.current_price() + + + if self._position == Positions.Long: + a_t = 1 + elif self._position == Positions.Short: + a_t = -1 + else: + a_t = 0 + + # close long + if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: + pt_1 = self.add_sell_fee(self.current_price()) + po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + + rw = a_t*(pt_1 - po)/po + #rw = rw*2 + # close short + elif (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: + pt_1 = self.add_buy_fee(self.current_price()) + po = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + rw = a_t*(pt_1 - po)/po + #rw = rw*2 + else: + rw = a_t*(pt_1 - pt)/pt + + return np.clip(rw, 0, 1) + + def profit_only_when_close_reward_aim(self, action): + + if self._last_trade_tick == None: + return 0. + + # close long + if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + + # close short + if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + + return 0. + + def profit_only_when_close_reward(self, action): + + if self._last_trade_tick == None: + return 0. + + # close long + if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + # close short + if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + return 0. \ No newline at end of file diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py similarity index 55% rename from freqtrade/freqai/prediction_models/RL/RLPrediction_env.py rename to freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py index 2bc7e868f..9b01579e8 100644 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_env.py +++ b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py @@ -2,6 +2,7 @@ import logging import random from collections import deque from enum import Enum +#from sklearn.decomposition import PCA, KernelPCA from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union import gym @@ -10,7 +11,6 @@ import numpy as np import pandas as pd from gym import spaces from gym.utils import seeding -from sklearn.decomposition import PCA, KernelPCA logger = logging.getLogger(__name__) @@ -29,12 +29,8 @@ logger = logging.getLogger(__name__) # Label, LabelSet # ) -class Actions(Enum): - Short = 0 - Long = 1 - Neutral = 2 -class Actions_v2(Enum): +class Actions(Enum): Neutral = 0 Long_buy = 1 Long_sell = 2 @@ -75,7 +71,7 @@ class DEnv(gym.Env): # # spaces self.shape = (window_size, self.signal_features.shape[1]) - self.action_space = spaces.Discrete(len(Actions_v2)) + self.action_space = spaces.Discrete(len(Actions)) self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) # episode @@ -152,7 +148,7 @@ class DEnv(gym.Env): trade_type = None - if self.is_tradesignal_v2(action): # exclude 3 case not trade + if self.is_tradesignal(action): # exclude 3 case not trade # Update position """ Action: Neutral, position: Long -> Close Long @@ -167,19 +163,19 @@ class DEnv(gym.Env): temp_position = self._position - if action == Actions_v2.Neutral.value: + if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions_v2.Long_buy.value: + elif action == Actions.Long_buy.value: self._position = Positions.Long trade_type = "long" - elif action == Actions_v2.Short_buy.value: + elif action == Actions.Short_buy.value: self._position = Positions.Short trade_type = "short" - elif action == Actions_v2.Long_sell.value: + elif action == Actions.Long_sell.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions_v2.Short_sell.value: + elif action == Actions.Short_sell.value: self._position = Positions.Neutral trade_type = "neutral" else: @@ -208,11 +204,11 @@ class DEnv(gym.Env): return observation, step_reward, self._done, info - def processState(self, state): - return state.to_numpy() + # def processState(self, state): + # return state.to_numpy() - def convert_mlp_Policy(self, obs_): - pass + # def convert_mlp_Policy(self, obs_): + # pass def _get_observation(self): return self.signal_features[(self._current_tick - self.window_size):self._current_tick] @@ -245,46 +241,26 @@ class DEnv(gym.Env): Action: Long, position: Long -> Hold Long Action: Short, position: Short -> Hold Short """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) - or (action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def is_tradesignal_v2(self, action): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions_v2.Neutral.value and self._position == Positions.Neutral) or - (action == Actions_v2.Short_buy.value and self._position == Positions.Short) or - (action == Actions_v2.Short_sell.value and self._position == Positions.Short) or - (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or - (action == Actions_v2.Short_sell.value and self._position == Positions.Long) or - - (action == Actions_v2.Long_buy.value and self._position == Positions.Long) or - (action == Actions_v2.Long_sell.value and self._position == Positions.Long) or - (action == Actions_v2.Long_buy.value and self._position == Positions.Short) or - (action == Actions_v2.Long_sell.value and self._position == Positions.Short)) + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or + (action == Actions.Short_buy.value and self._position == Positions.Short) or + (action == Actions.Short_sell.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Short_sell.value and self._position == Positions.Long) or + (action == Actions.Long_buy.value and self._position == Positions.Long) or + (action == Actions.Long_sell.value and self._position == Positions.Long) or + (action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Long_sell.value and self._position == Positions.Short)) def _is_trade(self, action: Actions): - return ((action == Actions.Long.value and self._position == Positions.Short) or - (action == Actions.Short.value and self._position == Positions.Long) or + return ((action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) - ) + (action == Actions.Neutral.value and self._position == Positions.Short) or - def _is_trade_v2(self, action: Actions_v2): - return ((action == Actions_v2.Long_buy.value and self._position == Positions.Short) or - (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or - (action == Actions_v2.Neutral.value and self._position == Positions.Long) or - (action == Actions_v2.Neutral.value and self._position == Positions.Short) or - - (action == Actions_v2.Neutral.Short_sell and self._position == Positions.Long) or - (action == Actions_v2.Neutral.Long_sell and self._position == Positions.Short) + (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or + (action == Actions.Neutral.Long_sell and self._position == Positions.Short) ) @@ -292,9 +268,6 @@ class DEnv(gym.Env): return ((action == Actions.Short.value and self._position == Positions.Short) or (action == Actions.Long.value and self._position == Positions.Long)) - def is_hold_v2(self, action): - return ((action == Actions_v2.Short_buy.value and self._position == Positions.Short) - or (action == Actions_v2.Long_buy.value and self._position == Positions.Long)) def add_buy_fee(self, price): @@ -311,156 +284,158 @@ class DEnv(gym.Env): self.history[key].append(value) - def render(self, mode='human'): + # def render(self, mode='human'): - def _plot_position(position, tick): - color = None - if position == Positions.Short: - color = 'red' - elif position == Positions.Long: - color = 'green' - if color: - plt.scatter(tick, self.prices.loc[tick].open, color=color) + # def _plot_position(position, tick): + # color = None + # if position == Positions.Short: + # color = 'red' + # elif position == Positions.Long: + # color = 'green' + # if color: + # plt.scatter(tick, self.prices.loc[tick].open, color=color) - if self._first_rendering: - self._first_rendering = False - plt.cla() - plt.plot(self.prices) - start_position = self._position_history[self._start_tick] - _plot_position(start_position, self._start_tick) + # if self._first_rendering: + # self._first_rendering = False + # plt.cla() + # plt.plot(self.prices) + # start_position = self._position_history[self._start_tick] + # _plot_position(start_position, self._start_tick) - plt.cla() - plt.plot(self.prices) - _plot_position(self._position, self._current_tick) + # plt.cla() + # plt.plot(self.prices) + # _plot_position(self._position, self._current_tick) - plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) - plt.pause(0.01) + # plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) + # plt.pause(0.01) - def render_all(self): - plt.figure() - window_ticks = np.arange(len(self._position_history)) - plt.plot(self.prices['open'], alpha=0.5) + # def render_all(self): + # plt.figure() + # window_ticks = np.arange(len(self._position_history)) + # plt.plot(self.prices['open'], alpha=0.5) - short_ticks = [] - long_ticks = [] - neutral_ticks = [] - for i, tick in enumerate(window_ticks): - if self._position_history[i] == Positions.Short: - short_ticks.append(tick - 1) - elif self._position_history[i] == Positions.Long: - long_ticks.append(tick - 1) - elif self._position_history[i] == Positions.Neutral: - neutral_ticks.append(tick - 1) + # short_ticks = [] + # long_ticks = [] + # neutral_ticks = [] + # for i, tick in enumerate(window_ticks): + # if self._position_history[i] == Positions.Short: + # short_ticks.append(tick - 1) + # elif self._position_history[i] == Positions.Long: + # long_ticks.append(tick - 1) + # elif self._position_history[i] == Positions.Neutral: + # neutral_ticks.append(tick - 1) - plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, - 'o', color='grey', ms=3, alpha=0.1) - plt.plot(short_ticks, self.prices.loc[short_ticks].open, - 'o', color='r', ms=3, alpha=0.8) - plt.plot(long_ticks, self.prices.loc[long_ticks].open, - 'o', color='g', ms=3, alpha=0.8) + # plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, + # 'o', color='grey', ms=3, alpha=0.1) + # plt.plot(short_ticks, self.prices.loc[short_ticks].open, + # 'o', color='r', ms=3, alpha=0.8) + # plt.plot(long_ticks, self.prices.loc[long_ticks].open, + # 'o', color='g', ms=3, alpha=0.8) - plt.suptitle("Generalising") - fig = plt.gcf() - fig.set_size_inches(15, 10) + # plt.suptitle("Generalising") + # fig = plt.gcf() + # fig.set_size_inches(15, 10) - def close_trade_report(self): - small_trade = 0 - positive_big_trade = 0 - negative_big_trade = 0 - small_profit = 0.003 - for i in self.close_trade_profit: - if i < small_profit and i > -small_profit: - small_trade+=1 - elif i > small_profit: - positive_big_trade += 1 - elif i < -small_profit: - negative_big_trade += 1 - print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") + # def close_trade_report(self): + # small_trade = 0 + # positive_big_trade = 0 + # negative_big_trade = 0 + # small_profit = 0.003 + # for i in self.close_trade_profit: + # if i < small_profit and i > -small_profit: + # small_trade+=1 + # elif i > small_profit: + # positive_big_trade += 1 + # elif i < -small_profit: + # negative_big_trade += 1 + # print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") - def report(self): + # def report(self): - # get total trade - long_trade = 0 - short_trade = 0 - neutral_trade = 0 - for trade in self.trade_history: - if trade['type'] == 'long': - long_trade += 1 + # # get total trade + # long_trade = 0 + # short_trade = 0 + # neutral_trade = 0 + # for trade in self.trade_history: + # if trade['type'] == 'long': + # long_trade += 1 - elif trade['type'] == 'short': - short_trade += 1 - else: - neutral_trade += 1 + # elif trade['type'] == 'short': + # short_trade += 1 + # else: + # neutral_trade += 1 - negative_trade = 0 - positive_trade = 0 - for tr in self.close_trade_profit: - if tr < 0.: - negative_trade += 1 + # negative_trade = 0 + # positive_trade = 0 + # for tr in self.close_trade_profit: + # if tr < 0.: + # negative_trade += 1 - if tr > 0.: - positive_trade += 1 + # if tr > 0.: + # positive_trade += 1 - total_trade_lr = negative_trade+positive_trade + # total_trade_lr = negative_trade+positive_trade - total_trade = long_trade + short_trade - sharp_ratio = self.sharpe_ratio() - sharp_log = self.get_sharpe_ratio() + # total_trade = long_trade + short_trade + # sharp_ratio = self.sharpe_ratio() + # sharp_log = self.get_sharpe_ratio() - from tabulate import tabulate + # from tabulate import tabulate - headers = ["Performance", ""] - performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], - ["Total reward", "{0:.3f}".format(self.total_reward)], - ["Start profit(unit)", "{0:.2f}".format(1.)], - ["End profit(unit)", "{0:.3f}".format(self._total_profit)], - ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], - ["Sharp log", "{0:.3f}".format(sharp_log)], - # ["Sortino ratio", "{0:.2f}".format(0) + '%'], - ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] - ] - tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") - print(tabulation) + # headers = ["Performance", ""] + # performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], + # ["Total reward", "{0:.3f}".format(self.total_reward)], + # ["Start profit(unit)", "{0:.2f}".format(1.)], + # ["End profit(unit)", "{0:.3f}".format(self._total_profit)], + # ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], + # ["Sharp log", "{0:.3f}".format(sharp_log)], + # # ["Sortino ratio", "{0:.2f}".format(0) + '%'], + # ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] + # ] + # tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") + # print(tabulation) - result = { - "Start": "{0:.2f}".format(1.), - "End": "{0:.2f}".format(self._total_profit), - "Sharp": "{0:.3f}".format(sharp_ratio), - "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) - } - return result + # result = { + # "Start": "{0:.2f}".format(1.), + # "End": "{0:.2f}".format(self._total_profit), + # "Sharp": "{0:.3f}".format(sharp_ratio), + # "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) + # } + # return result - def close(self): - plt.close() + # def close(self): + # plt.close() def get_sharpe_ratio(self): return mean_over_std(self.get_portfolio_log_returns()) - def save_rendering(self, filepath): - plt.savefig(filepath) + # def save_rendering(self, filepath): + # plt.savefig(filepath) - def pause_rendering(self): - plt.show() + # def pause_rendering(self): + # plt.show() def _calculate_reward(self, action): # rw = self.transaction_profit_reward(action) #rw = self.reward_rr_profit_config(action) - rw = self.reward_rr_profit_config_v2(action) + #rw = self.reward_rr_profit_config(action) # main + #rw = self.profit_only_when_close_reward(action) + rw = self.profit_only_when_close_reward_aim(action) return rw def _update_profit(self, action): #if self._is_trade(action) or self._done: - if self._is_trade_v2(action) or self._done: + if self._is_trade(action) or self._done: pnl = self.get_unrealized_profit() if self._position == Positions.Long: @@ -485,7 +460,7 @@ class DEnv(gym.Env): if self._position == Positions.Long: current_price = self.prices.iloc[self._current_tick].open #if action == Actions.Short.value or action == Actions.Neutral.value: - if action == Actions_v2.Short_buy.value or action == Actions_v2.Neutral.value: + if action == Actions.Short_buy.value or action == Actions.Neutral.value: current_price = self.add_sell_fee(current_price) previous_price = self.prices.iloc[self._current_tick - 1].open @@ -500,7 +475,7 @@ class DEnv(gym.Env): if self._position == Positions.Short: current_price = self.prices.iloc[self._current_tick].open #if action == Actions.Long.value or action == Actions.Neutral.value: - if action == Actions_v2.Long_buy.value or action == Actions_v2.Neutral.value: + if action == Actions.Long_buy.value or action == Actions.Neutral.value: current_price = self.add_buy_fee(current_price) previous_price = self.prices.iloc[self._current_tick - 1].open @@ -574,8 +549,57 @@ class DEnv(gym.Env): return np.clip(rw, 0, 1) + def profit_only_when_close_reward(self, action): - def reward_rr_profit_config_v2(self, action): + if self._last_trade_tick == None: + return 0. + + # close long + if action == Actions.Long_sell.value and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + # close short + if action == Actions.Short_buy.value and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + return 0. + + def profit_only_when_close_reward_aim(self, action): + + if self._last_trade_tick == None: + return 0. + + # close long + if action == Actions.Long_sell.value and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + if action == Actions.Long_sell.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + + # close short + if action == Actions.Short_buy.value and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + if action == Actions.Short_buy.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + + return 0. + + def reward_rr_profit_config(self, action): rw = 0. pt_1 = self.current_price() @@ -587,61 +611,61 @@ class DEnv(gym.Env): pt_1 = self.add_sell_fee(self.current_price()) po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - if action == Actions_v2.Short_buy.value: + if action == Actions.Short_buy.value: if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 2 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: - rw = 10 * 1 * 1 + rw = 15 + elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < self.profit_aim * self.rr: + rw = -1 elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 + rw = -10 elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 + rw = -15 - if action == Actions_v2.Long_sell.value: + if action == Actions.Long_sell.value: if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 5 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: - rw = 10 * 1 * 3 + rw = 20 + elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < self.profit_aim * self.rr: + rw = -1 elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 + rw = -15 elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 + rw = -25 - if action == Actions_v2.Neutral.value: - if self.close_trade_profit[-1] > 0: - rw = 2 + if action == Actions.Neutral.value: + if self.close_trade_profit[-1] > 0.005: + rw = 0 elif self.close_trade_profit[-1] < 0: - rw = 2 * -1 + rw = 0 # short if self._position == Positions.Short: pt_1 = self.add_sell_fee(self.current_price()) po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - if action == Actions_v2.Long_buy.value: + if action == Actions.Long_buy.value: if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 2 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 1 * 1 + rw = 15 + elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = -1 elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 + rw = -10 elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 + rw =- -25 - if action == Actions_v2.Short_sell.value: + if action == Actions.Short_sell.value: if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 5 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 1 * 3 + rw = 20 + elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: + rw = -1 elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 + rw = -15 elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 + rw = -25 - if action == Actions_v2.Neutral.value: - if self.close_trade_profit[-1] > 0: - rw = 2 + if action == Actions.Neutral.value: + if self.close_trade_profit[-1] > 0.005: + rw = 0 elif self.close_trade_profit[-1] < 0: - rw = 2 * -1 + rw = 0 return np.clip(rw, 0, 1) diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py deleted file mode 100644 index ac91cd200..000000000 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py +++ /dev/null @@ -1,645 +0,0 @@ -import gym -from gym import spaces -from gym.utils import seeding -from enum import Enum -from sklearn.decomposition import PCA, KernelPCA -import random -import numpy as np -import pandas as pd -from collections import deque -import matplotlib.pylab as plt -from typing import Dict, List, Tuple, Type, Optional, Any, Union, Callable -import logging - -logger = logging.getLogger(__name__) - -# from bokeh.io import output_notebook -# from bokeh.plotting import figure, show -# from bokeh.models import ( -# CustomJS, -# ColumnDataSource, -# NumeralTickFormatter, -# Span, -# HoverTool, -# Range1d, -# DatetimeTickFormatter, -# Scatter, -# Label, LabelSet -# ) - -class Actions(Enum): - Short = 0 - Long = 1 - Neutral = 2 - -class Actions_v2(Enum): - Neutral = 0 - Long_buy = 1 - Long_sell = 2 - Short_buy = 3 - Short_sell = 4 - - -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 - - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long - -def mean_over_std(x): - std = np.std(x, ddof=1) - mean = np.mean(x) - return mean / std if std > 0 else 0 - -class DEnv(gym.Env): - - metadata = {'render.modes': ['human']} - - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): - assert df.ndim == 2 - - self.seed() - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] - - self.fee=0.0015 - - # # spaces - self.shape = (window_size, self.signal_features.shape[1]) - self.action_space = spaces.Discrete(len(Actions_v2)) - self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick = self.window_size - self._end_tick = len(self.prices) - 1 - self._done = None - self._current_tick = None - self._last_trade_tick = None - self._position = Positions.Neutral - self._position_history = None - self.total_reward = None - self._total_profit = None - self._first_rendering = None - self.history = None - self.trade_history = [] - - # self.A_t, self.B_t = 0.000639, 0.00001954 - self.r_t_change = 0. - - self.returns_report = [] - - - def seed(self, seed=None): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - - def reset(self): - - self._done = False - - if self.starting_point == True: - self._position_history = (self._start_tick* [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - #self._last_trade_tick = self._current_tick - 1 - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self._first_rendering = True - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - self.r_t_change = 0. - - self.returns_report = [] - - return self._get_observation() - - - def step(self, action): - self._done = False - self._current_tick += 1 - - if self._current_tick == self._end_tick: - self._done = True - - self.update_portfolio_log_returns(action) - - self._update_profit(action) - step_reward = self._calculate_reward(action) - self.total_reward += step_reward - - - - - - trade_type = None - if self.is_tradesignal_v2(action): # exclude 3 case not trade - # Update position - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - - temp_position = self._position - if action == Actions_v2.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions_v2.Long_buy.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions_v2.Short_buy.value: - self._position = Positions.Short - trade_type = "short" - elif action == Actions_v2.Long_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions_v2.Short_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type != None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick = self._current_tick, - total_reward = self.total_reward, - total_profit = self._total_profit, - position = self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info - - - def processState(self, state): - return state.to_numpy() - - def convert_mlp_Policy(self, obs_): - pass - - def _get_observation(self): - return self.signal_features[(self._current_tick - self.window_size):self._current_tick] - - - def get_unrealized_profit(self): - - if self._last_trade_tick == None: - return 0. - - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price)/last_trade_price - elif self._position == Positions.Long: - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price)/last_trade_price - else: - return 0. - - - def is_tradesignal(self, action): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) - or (action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def is_tradesignal_v2(self, action): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions_v2.Neutral.value and self._position == Positions.Neutral) or - (action == Actions_v2.Short_buy.value and self._position == Positions.Short) or - (action == Actions_v2.Short_sell.value and self._position == Positions.Short) or - (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or - (action == Actions_v2.Short_sell.value and self._position == Positions.Long) or - - (action == Actions_v2.Long_buy.value and self._position == Positions.Long) or - (action == Actions_v2.Long_sell.value and self._position == Positions.Long) or - (action == Actions_v2.Long_buy.value and self._position == Positions.Short) or - (action == Actions_v2.Long_sell.value and self._position == Positions.Short)) - - - - def _is_trade(self, action: Actions): - return ((action == Actions.Long.value and self._position == Positions.Short) or - (action == Actions.Short.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) - ) - - def _is_trade_v2(self, action: Actions_v2): - return ((action == Actions_v2.Long_buy.value and self._position == Positions.Short) or - (action == Actions_v2.Short_buy.value and self._position == Positions.Long) or - (action == Actions_v2.Neutral.value and self._position == Positions.Long) or - (action == Actions_v2.Neutral.value and self._position == Positions.Short) or - - (action == Actions_v2.Neutral.Short_sell and self._position == Positions.Long) or - (action == Actions_v2.Neutral.Long_sell and self._position == Positions.Short) - ) - - - def is_hold(self, action): - return ((action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def is_hold_v2(self, action): - return ((action == Actions_v2.Short_buy.value and self._position == Positions.Short) - or (action == Actions_v2.Long_buy.value and self._position == Positions.Long)) - - - def add_buy_fee(self, price): - return price * (1 + self.fee) - - def add_sell_fee(self, price): - return price / (1 + self.fee) - - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} - - for key, value in info.items(): - self.history[key].append(value) - - - def render(self, mode='human'): - - def _plot_position(position, tick): - color = None - if position == Positions.Short: - color = 'red' - elif position == Positions.Long: - color = 'green' - if color: - plt.scatter(tick, self.prices.loc[tick].open, color=color) - - if self._first_rendering: - self._first_rendering = False - plt.cla() - plt.plot(self.prices) - start_position = self._position_history[self._start_tick] - _plot_position(start_position, self._start_tick) - - plt.cla() - plt.plot(self.prices) - _plot_position(self._position, self._current_tick) - - plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) - plt.pause(0.01) - - - def render_all(self): - plt.figure() - window_ticks = np.arange(len(self._position_history)) - plt.plot(self.prices['open'], alpha=0.5) - - short_ticks = [] - long_ticks = [] - neutral_ticks = [] - for i, tick in enumerate(window_ticks): - if self._position_history[i] == Positions.Short: - short_ticks.append(tick - 1) - elif self._position_history[i] == Positions.Long: - long_ticks.append(tick - 1) - elif self._position_history[i] == Positions.Neutral: - neutral_ticks.append(tick - 1) - - plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, - 'o', color='grey', ms=3, alpha=0.1) - plt.plot(short_ticks, self.prices.loc[short_ticks].open, - 'o', color='r', ms=3, alpha=0.8) - plt.plot(long_ticks, self.prices.loc[long_ticks].open, - 'o', color='g', ms=3, alpha=0.8) - - plt.suptitle("Generalising") - fig = plt.gcf() - fig.set_size_inches(15, 10) - - - - - def close_trade_report(self): - small_trade = 0 - positive_big_trade = 0 - negative_big_trade = 0 - small_profit = 0.003 - for i in self.close_trade_profit: - if i < small_profit and i > -small_profit: - small_trade+=1 - elif i > small_profit: - positive_big_trade += 1 - elif i < -small_profit: - negative_big_trade += 1 - print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") - - - def report(self): - - # get total trade - long_trade = 0 - short_trade = 0 - neutral_trade = 0 - for trade in self.trade_history: - if trade['type'] == 'long': - long_trade += 1 - - elif trade['type'] == 'short': - short_trade += 1 - else: - neutral_trade += 1 - - negative_trade = 0 - positive_trade = 0 - for tr in self.close_trade_profit: - if tr < 0.: - negative_trade += 1 - - if tr > 0.: - positive_trade += 1 - - total_trade_lr = negative_trade+positive_trade - - - total_trade = long_trade + short_trade - sharp_ratio = self.sharpe_ratio() - sharp_log = self.get_sharpe_ratio() - - from tabulate import tabulate - - headers = ["Performance", ""] - performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], - ["Total reward", "{0:.3f}".format(self.total_reward)], - ["Start profit(unit)", "{0:.2f}".format(1.)], - ["End profit(unit)", "{0:.3f}".format(self._total_profit)], - ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], - ["Sharp log", "{0:.3f}".format(sharp_log)], - # ["Sortino ratio", "{0:.2f}".format(0) + '%'], - ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] - ] - tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") - print(tabulation) - - result = { - "Start": "{0:.2f}".format(1.), - "End": "{0:.2f}".format(self._total_profit), - "Sharp": "{0:.3f}".format(sharp_ratio), - "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) - } - return result - - def close(self): - plt.close() - - def get_sharpe_ratio(self): - return mean_over_std(self.get_portfolio_log_returns()) - - - def save_rendering(self, filepath): - plt.savefig(filepath) - - - def pause_rendering(self): - plt.show() - - - def _calculate_reward(self, action): - # rw = self.transaction_profit_reward(action) - #rw = self.reward_rr_profit_config(action) - rw = self.reward_rr_profit_config_v2(action) - return rw - - - def _update_profit(self, action): - #if self._is_trade(action) or self._done: - if self._is_trade_v2(action) or self._done: - pnl = self.get_unrealized_profit() - - if self._position == Positions.Long: - self._total_profit = self._total_profit + self._total_profit*pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - if self._position == Positions.Short: - self._total_profit = self._total_profit + self._total_profit*pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - - def most_recent_return(self, action): - """ - We support Long, Neutral and Short positions. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - #if action == Actions.Short.value or action == Actions.Neutral.value: - if action == Actions_v2.Short_buy.value or action == Actions_v2.Neutral.value: - current_price = self.add_sell_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_buy_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - #if action == Actions.Long.value or action == Actions.Neutral.value: - if action == Actions_v2.Long_buy.value or action == Actions_v2.Neutral.value: - current_price = self.add_buy_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_sell_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - - - def get_trading_log_return(self): - return self.portfolio_log_returns[self._start_tick:] - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - - def current_price(self) -> float: - return self.prices.iloc[self._current_tick].open - - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick-1].open - - - - def sharpe_ratio(self): - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward - - def get_bnh_log_return(self): - return np.diff(np.log(self.prices['open'][self._start_tick:])) - - - def transaction_profit_reward(self, action): - rw = 0. - - pt = self.prev_price() - pt_1 = self.current_price() - - - if self._position == Positions.Long: - a_t = 1 - elif self._position == Positions.Short: - a_t = -1 - else: - a_t = 0 - - # close long - if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - rw = a_t*(pt_1 - po)/po - #rw = rw*2 - # close short - elif (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: - pt_1 = self.add_buy_fee(self.current_price()) - po = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - rw = a_t*(pt_1 - po)/po - #rw = rw*2 - else: - rw = a_t*(pt_1 - pt)/pt - - return np.clip(rw, 0, 1) - - - - def reward_rr_profit_config_v2(self, action): - rw = 0. - - pt_1 = self.current_price() - - - if len(self.close_trade_profit) > 0: - # long - if self._position == Positions.Long: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - if action == Actions_v2.Short_buy.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 2 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: - rw = 10 * 1 * 1 - elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 - - if action == Actions_v2.Long_sell.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 5 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < self.profit_aim * self.rr: - rw = 10 * 1 * 3 - elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 - - if action == Actions_v2.Neutral.value: - if self.close_trade_profit[-1] > 0: - rw = 2 - elif self.close_trade_profit[-1] < 0: - rw = 2 * -1 - - # short - if self._position == Positions.Short: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - if action == Actions_v2.Long_buy.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 2 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 1 * 1 - elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 - - if action == Actions_v2.Short_sell.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 10 * 5 - elif self.close_trade_profit[-1] > 0 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 1 * 3 - elif self.close_trade_profit[-1] < 0: - rw = 10 * -1 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = 10 * 3 * -1 - - if action == Actions_v2.Neutral.value: - if self.close_trade_profit[-1] > 0: - rw = 2 - elif self.close_trade_profit[-1] < 0: - rw = 2 * -1 - - return np.clip(rw, 0, 1) \ No newline at end of file diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py index 5783baba8..60e29d3ab 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearning.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearning.py @@ -4,29 +4,23 @@ from typing import Any, Dict, Tuple import numpy as np import numpy.typing as npt import pandas as pd +import torch as th from pandas import DataFrame +from stable_baselines3 import PPO +from stable_baselines3.common.buffers import ReplayBuffer +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.monitor import Monitor +from stable_baselines3.common.vec_env import SubprocVecEnv from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel -from freqtrade.freqai.prediction_models.RL.RLPrediction_agent import RLPrediction_agent -from freqtrade.freqai.prediction_models.RL.RLPrediction_agent_v2 import TDQN -#from freqtrade.freqai.prediction_models.RL.RLPrediction_env import GymAnytrading -from freqtrade.freqai.prediction_models.RL.RLPrediction_env import DEnv +from freqtrade.freqai.prediction_models.RL.RLPrediction_agent_TDQN import TDQN +from freqtrade.freqai.prediction_models.RL.RLPrediction_env_TDQN_5ac import DEnv +#from freqtrade.freqai.prediction_models.RL.RLPrediction_env_TDQN_3ac import DEnv from freqtrade.persistence import Trade -from stable_baselines3.common.vec_env import SubprocVecEnv -from stable_baselines3.common.monitor import Monitor - -import torch as th -from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, StopTrainingOnRewardThreshold -from stable_baselines3.common.buffers import ReplayBuffer -from stable_baselines3 import PPO - - - logger = logging.getLogger(__name__) - class ReinforcementLearningModel(IFreqaiModel): """ User created Reinforcement Learning Model prediction model. @@ -87,30 +81,22 @@ class ReinforcementLearningModel(IFreqaiModel): # # train_labels = data_dictionary["train_labels"] # test_df = data_dictionary["test_features"] # # test_labels = data_dictionary["test_labels"] - # # sep = '/' # # coin = pair.split(sep, 1)[0] # # price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] # # price.reset_index(inplace=True, drop=True) # # price = price.to_frame() # price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - # price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) - + # price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) # #train_env = GymAnytrading(train_df, price, self.CONV_WIDTH) - # agent_params = self.freqai_info['model_training_parameters'] # reward_params = self.freqai_info['model_reward_parameters'] - - # train_env = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) # #eval_env = DEnv(df=test_df, prices=price_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - # #env_instance = SubprocVecEnv([DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params)]) # #train_env.reset() # #eval_env.reset() - # # model - # #policy_kwargs = dict(net_arch=[512, 512, 512]) # policy_kwargs = dict(activation_fn=th.nn.Tanh, # net_arch=[256, 256, 256]) @@ -124,27 +110,22 @@ class ReinforcementLearningModel(IFreqaiModel): # tb_log_name=model_name, # model_kwargs=agent_params, # train_df=train_df, - # test_df=test_df, - # price=price, - # price_test=price_test, + # test_df=test_df, + # price=price, + # price_test=price_test, # window_size=self.CONV_WIDTH) - - # # best_model = eval_agent.train_model(model=model, # # tb_log_name=model_name, # # model_kwargs=agent_params, # # eval=eval_env) - - # # TDQN # # model_name = 'TDQN' # # model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log='./tensorboard_log/', # # learning_rate=agent_params["learning_rate"], gamma=0.9, - # # target_update_interval=5000, buffer_size=50000, + # # target_update_interval=5000, buffer_size=50000, # # exploration_initial_eps=1, exploration_final_eps=0.1, # # replay_buffer_class=ReplayBuffer # # ) - # # trained_model = agent.train_model(model=model, # # tb_log_name=model_name, # # model_kwargs=agent_params) @@ -157,11 +138,13 @@ class ReinforcementLearningModel(IFreqaiModel): reward_params = self.freqai_info['model_reward_parameters'] train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] + eval_freq = agent_params["eval_cycles"] * len(test_df) + total_timesteps = agent_params["train_cycles"] * len(train_df) # price data for model training and evaluation price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) - + price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) + # environments train_env = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) eval = DEnv(df=test_df, prices=price_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) @@ -173,19 +156,17 @@ class ReinforcementLearningModel(IFreqaiModel): path = self.dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/{agent_type}/logs/", eval_freq=10000, + log_path=f"{path}/{agent_type}/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) - # model arch - policy_kwargs = dict(activation_fn=th.nn.Tanh, - net_arch=[512, 512, 512]) - + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[256, 256, 128]) if agent_type == 'tdqn': model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/{agent_type}/tensorboard/", learning_rate=0.00025, gamma=0.9, - target_update_interval=5000, buffer_size=50000, + target_update_interval=5000, buffer_size=50000, exploration_initial_eps=1, exploration_final_eps=0.1, replay_buffer_class=ReplayBuffer ) @@ -193,9 +174,9 @@ class ReinforcementLearningModel(IFreqaiModel): model = PPO('MultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/{agent_type}/tensorboard/", learning_rate=0.00025, gamma=0.9 ) - + model.learn( - total_timesteps=agent_params["total_timesteps"], + total_timesteps=int(total_timesteps), callback=eval_callback ) From 70b25461f06b6a555d2cc21c3910834514df996a Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Sun, 14 Aug 2022 20:47:58 +0700 Subject: [PATCH 009/232] add rl dependency --- requirements-freqai.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/requirements-freqai.txt b/requirements-freqai.txt index 26e4617af..8d8bb03c5 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -6,3 +6,6 @@ scikit-learn==1.1.2 joblib==1.1.0 catboost==1.0.6; platform_machine != 'aarch64' lightgbm==3.3.2 +torch==1.12.1 +stable-baselines3==1.5.0 +gym==0.21.0 \ No newline at end of file From ecd1f55abc45c677cb688040c01ddfc255dab40d Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Sun, 14 Aug 2022 21:26:34 +0700 Subject: [PATCH 010/232] add rl module --- freqtrade/freqai/prediction_models/ReinforcementLearning.py | 2 +- freqtrade/freqai/rl/BaseRLAgent.py | 0 freqtrade/freqai/rl/__init__.py | 0 3 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 freqtrade/freqai/rl/BaseRLAgent.py create mode 100644 freqtrade/freqai/rl/__init__.py diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py index 60e29d3ab..6ced4749e 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearning.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearning.py @@ -21,7 +21,7 @@ from freqtrade.persistence import Trade logger = logging.getLogger(__name__) -class ReinforcementLearningModel(IFreqaiModel): +class ReinforcementLearning(IFreqaiModel): """ User created Reinforcement Learning Model prediction model. """ diff --git a/freqtrade/freqai/rl/BaseRLAgent.py b/freqtrade/freqai/rl/BaseRLAgent.py new file mode 100644 index 000000000..e69de29bb diff --git a/freqtrade/freqai/rl/__init__.py b/freqtrade/freqai/rl/__init__.py new file mode 100644 index 000000000..e69de29bb From 91683e1dcaa0b966b813d5e418834136828b0cad Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 10:26:44 +0200 Subject: [PATCH 011/232] restructure RL so that user can customize environment --- freqtrade/freqai/RL/BaseRLEnv.py | 318 +++++++++ .../RL/BaseReinforcementLearningModel.py | 230 ++++++ .../TDQNagent.py} | 29 +- freqtrade/freqai/{rl => RL}/__init__.py | 0 .../RL/RLPrediction_agent.py | 139 ---- .../RL/RLPrediction_env_TDQN_3ac.py | 513 ------------- .../RL/RLPrediction_env_TDQN_5ac.py | 671 ------------------ .../freqai/prediction_models/RL/config.py | 37 - .../prediction_models/RLPredictionModel.py | 253 ------- .../ReinforcementLearning.py | 273 ------- .../ReinforcementLearningPPO.py | 155 ++++ .../ReinforcementLearningTDQN.py | 168 +++++ freqtrade/freqai/rl/BaseRLAgent.py | 0 13 files changed, 882 insertions(+), 1904 deletions(-) create mode 100644 freqtrade/freqai/RL/BaseRLEnv.py create mode 100644 freqtrade/freqai/RL/BaseReinforcementLearningModel.py rename freqtrade/freqai/{prediction_models/RL/RLPrediction_agent_TDQN.py => RL/TDQNagent.py} (93%) rename freqtrade/freqai/{rl => RL}/__init__.py (100%) delete mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py delete mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py delete mode 100644 freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py delete mode 100644 freqtrade/freqai/prediction_models/RL/config.py delete mode 100644 freqtrade/freqai/prediction_models/RLPredictionModel.py delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearning.py create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py delete mode 100644 freqtrade/freqai/rl/BaseRLAgent.py diff --git a/freqtrade/freqai/RL/BaseRLEnv.py b/freqtrade/freqai/RL/BaseRLEnv.py new file mode 100644 index 000000000..607262acd --- /dev/null +++ b/freqtrade/freqai/RL/BaseRLEnv.py @@ -0,0 +1,318 @@ +import logging +from enum import Enum +# from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union + +import gym +import numpy as np +from gym import spaces +from gym.utils import seeding + +logger = logging.getLogger(__name__) + + +class Actions(Enum): + Short = 0 + Long = 1 + Neutral = 2 + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + + +def mean_over_std(x): + std = np.std(x, ddof=1) + mean = np.mean(x) + return mean / std if std > 0 else 0 + + +class BaseRLEnv(gym.Env): + + metadata = {'render.modes': ['human']} + + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + assert df.ndim == 2 + + self.seed() + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee = 0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1]) + self.action_space = spaces.Discrete(len(Actions)) + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick = self.window_size + self._end_tick = len(self.prices) - 1 + self._done = None + self._current_tick = None + self._last_trade_tick = None + self._position = Positions.Neutral + self._position_history = None + self.total_reward = None + self._total_profit = None + self._first_rendering = None + self.history = None + self.trade_history = [] + + self.r_t_change = 0. + + self.returns_report = [] + + def seed(self, seed: int = 1): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + + self._done = False + + if self.starting_point is True: + self._position_history = (self._start_tick * [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self._first_rendering = True + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self.r_t_change = 0. + + self.returns_report = [] + + return self._get_observation() + + def step(self, action: int): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self.calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): # exclude 3 case not trade + # Update position + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick=self._current_tick, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + def _get_observation(self): + return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + def get_unrealized_profit(self): + + if self._last_trade_tick is None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price) / last_trade_price + else: + return 0. + + def is_tradesignal(self, action: int): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) + or (action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def _is_trade(self, action: Actions): + return ((action == Actions.Long.value and self._position == Positions.Short) or + (action == Actions.Short.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) + ) + + def is_hold(self, action): + return ((action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def add_buy_fee(self, price): + return price * (1 + self.fee) + + def add_sell_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + def get_sharpe_ratio(self): + return mean_over_std(self.get_portfolio_log_returns()) + + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if (action == Actions.Short.value or + action == Actions.Neutral.value) and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + # close short + if (action == Actions.Long.value or + action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + return 0. + + def _update_profit(self, action): + if self._is_trade(action) or self._done: + pnl = self.get_unrealized_profit() + + if self._position == Positions.Long: + self._total_profit = self._total_profit + self._total_profit * pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + if self._position == Positions.Short: + self._total_profit = self._total_profit + self._total_profit * pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + def most_recent_return(self, action: int): + """ + We support Long, Neutral and Short positions. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + if action == Actions.Short.value or action == Actions.Neutral.value: + current_price = self.add_sell_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_buy_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + if action == Actions.Long.value or action == Actions.Neutral.value: + current_price = self.add_buy_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_sell_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick - 1].open + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py new file mode 100644 index 000000000..accddc94d --- /dev/null +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -0,0 +1,230 @@ +import logging +from typing import Any, Dict, Tuple + +import numpy as np +import numpy.typing as npt +import pandas as pd +from pandas import DataFrame +from abc import abstractmethod +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.freqai_interface import IFreqaiModel +from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv, Actions, Positions +from freqtrade.persistence import Trade + +logger = logging.getLogger(__name__) + + +class BaseReinforcementLearningModel(IFreqaiModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def train( + self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen + ) -> Any: + """ + Filter the training data and train a model to it. Train makes heavy use of the datakitchen + for storing, saving, loading, and analyzing the data. + :param unfiltered_dataframe: Full dataframe for the current training period + :param metadata: pair metadata from strategy. + :returns: + :model: Trained model which can be used to inference (self.predict) + """ + + logger.info("--------------------Starting training " f"{pair} --------------------") + + # filter the features requested by user in the configuration file and elegantly handle NaNs + features_filtered, labels_filtered = dk.filter_features( + unfiltered_dataframe, + dk.training_features_list, + dk.label_list, + training_filter=True, + ) + + data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( + features_filtered, labels_filtered) + dk.fit_labels() # useless for now, but just satiating append methods + + # normalize all data based on train_dataset only + data_dictionary = dk.normalize_data(data_dictionary) + + # optional additional data cleaning/analysis + self.data_cleaning_train(dk) + + logger.info( + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + ) + logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') + + model = self.fit(data_dictionary, pair) + + if pair not in self.dd.historic_predictions: + self.set_initial_historic_predictions( + data_dictionary['train_features'], model, dk, pair) + + self.dd.save_historic_predictions_to_disk() + + logger.info(f"--------------------done training {pair}--------------------") + + return model + + @abstractmethod + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + """ + Agent customizations and abstract Reinforcement Learning customizations + go in here. Abstract method, so this function must be overridden by + user class. + """ + + return + + def get_state_info(self, pair): + open_trades = Trade.get_trades(trade_filter=Trade.is_open.is_(True)) + market_side = 0.5 + current_profit = 0 + for trade in open_trades: + if trade.pair == pair: + current_value = trade.open_trade_value + openrate = trade.open_rate + if 'long' in trade.enter_tag: + market_side = 1 + else: + market_side = 0 + current_profit = current_value / openrate - 1 + + total_profit = 0 + closed_trades = Trade.get_trades( + trade_filter=[Trade.is_open.is_(False), Trade.pair == pair]) + for trade in closed_trades: + total_profit += trade.close_profit + + return market_side, current_profit, total_profit + + def predict( + self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False + ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: + """ + Filter the prediction features data and predict with it. + :param: unfiltered_dataframe: Full dataframe for the current backtest period. + :return: + :pred_df: dataframe containing the predictions + :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove + data (NaNs) or felt uncertain about data (PCA and DI index) + """ + + dk.find_features(unfiltered_dataframe) + filtered_dataframe, _ = dk.filter_features( + unfiltered_dataframe, dk.training_features_list, training_filter=False + ) + filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) + dk.data_dictionary["prediction_features"] = filtered_dataframe + + # optional additional data cleaning/analysis + self.data_cleaning_predict(dk, filtered_dataframe) + + pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) + pred_df.fillna(0, inplace=True) + + return (pred_df, dk.do_predict) + + def rl_model_predict(self, dataframe: DataFrame, + dk: FreqaiDataKitchen, model: Any) -> DataFrame: + + output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) + + def _predict(window): + observations = dataframe.iloc[window.index] + res, _ = model.predict(observations, deterministic=True) + return res + + output = output.rolling(window=self.CONV_WIDTH).apply(_predict) + + return output + + def set_initial_historic_predictions( + self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str + ) -> None: + + pred_df = self.rl_model_predict(df, dk, model) + pred_df.fillna(0, inplace=True) + self.dd.historic_predictions[pair] = pred_df + hist_preds_df = self.dd.historic_predictions[pair] + + for label in hist_preds_df.columns: + if hist_preds_df[label].dtype == object: + continue + hist_preds_df[f'{label}_mean'] = 0 + hist_preds_df[f'{label}_std'] = 0 + + hist_preds_df['do_predict'] = 0 + + if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: + hist_preds_df['DI_values'] = 0 + + for return_str in dk.data['extra_returns_per_train']: + hist_preds_df[return_str] = 0 + + +class MyRLEnv(BaseRLEnv): + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self._calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): # exclude 3 case not trade + # Update position + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick=self._current_tick, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_TDQN.py b/freqtrade/freqai/RL/TDQNagent.py similarity index 93% rename from freqtrade/freqai/prediction_models/RL/RLPrediction_agent_TDQN.py rename to freqtrade/freqai/RL/TDQNagent.py index 0aa3512a1..584f6a8ef 100644 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent_TDQN.py +++ b/freqtrade/freqai/RL/TDQNagent.py @@ -6,11 +6,10 @@ import torch as th from stable_baselines3 import DQN from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.policies import BasePolicy -from stable_baselines3.common.torch_layers import (BaseFeaturesExtractor, CombinedExtractor, +from stable_baselines3.common.torch_layers import (BaseFeaturesExtractor, FlattenExtractor) from stable_baselines3.common.type_aliases import GymEnv, Schedule -#from stable_baselines3.common.policies import register_policy -from stable_baselines3.dqn.policies import (CnnPolicy, DQNPolicy, MlpPolicy, MultiInputPolicy, +from stable_baselines3.dqn.policies import (CnnPolicy, DQNPolicy, MlpPolicy, QNetwork) from torch import nn @@ -47,16 +46,17 @@ def create_mlp_( ] return modules + class TDQNetwork(QNetwork): def __init__(self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - features_extractor: nn.Module, - features_dim: int, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - normalize_images: bool = True - ): + observation_space: gym.spaces.Space, + action_space: gym.spaces.Space, + features_extractor: nn.Module, + features_dim: int, + net_arch: Optional[List[int]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + normalize_images: bool = True + ): super().__init__( observation_space=observation_space, action_space=action_space, @@ -211,10 +211,3 @@ class TDQN(DQN): device=device, _init_setup_model=_init_setup_model ) - - - -# try: -# register_policy("TMultiInputPolicy", TMultiInputPolicy) -# except: -# print("already registered") diff --git a/freqtrade/freqai/rl/__init__.py b/freqtrade/freqai/RL/__init__.py similarity index 100% rename from freqtrade/freqai/rl/__init__.py rename to freqtrade/freqai/RL/__init__.py diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py deleted file mode 100644 index 26b31f6e9..000000000 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_agent.py +++ /dev/null @@ -1,139 +0,0 @@ -# common library - -import gym -import numpy as np -from stable_baselines3 import A2C, DDPG, PPO, SAC, TD3 -from stable_baselines3.common.callbacks import (BaseCallback, CallbackList, CheckpointCallback, - EvalCallback, StopTrainingOnRewardThreshold) -from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise - -from freqtrade.freqai.prediction_models.RL import config -#from freqtrade.freqai.prediction_models.RL.RLPrediction_agent_v2 import TDQN -from freqtrade.freqai.prediction_models.RL.RLPrediction_env import DEnv - - -# from stable_baselines3.common.vec_env import DummyVecEnv - -# from meta.env_stock_trading.env_stock_trading import StockTradingEnv - -# RL models from stable-baselines - - -MODELS = {"a2c": A2C, "ddpg": DDPG, "td3": TD3, "sac": SAC, "ppo": PPO} - - -MODEL_KWARGS = {x: config.__dict__[f"{x.upper()}_PARAMS"] for x in MODELS.keys()} - - -NOISE = { - "normal": NormalActionNoise, - "ornstein_uhlenbeck": OrnsteinUhlenbeckActionNoise, -} - - -class TensorboardCallback(BaseCallback): - """ - Custom callback for plotting additional values in tensorboard. - """ - - def __init__(self, verbose=0): - super(TensorboardCallback, self).__init__(verbose) - - def _on_step(self) -> bool: - try: - self.logger.record(key="train/reward", value=self.locals["rewards"][0]) - except BaseException: - self.logger.record(key="train/reward", value=self.locals["reward"][0]) - return True - - -class RLPrediction_agent: - """Provides implementations for DRL algorithms - Based on: - https://github.com/AI4Finance-Foundation/FinRL-Meta/blob/master/agents/stablebaselines3_models.py - Attributes - ---------- - env: gym environment class - user-defined class - - Methods - ------- - get_model() - setup DRL algorithms - train_model() - train DRL algorithms in a train dataset - and output the trained model - DRL_prediction() - make a prediction in a test dataset and get results - """ - - def __init__(self, env): - self.env = env - - def get_model( - self, - model_name, - policy="MlpPolicy", - policy_kwargs=None, - model_kwargs=None, - reward_kwargs=None, - #total_timesteps=None, - verbose=1, - seed=None - ): - if model_name not in MODELS: - raise NotImplementedError("NotImplementedError") - - if model_kwargs is None: - model_kwargs = MODEL_KWARGS[model_name] - - if "action_noise" in model_kwargs: - n_actions = self.env.action_space.shape[-1] - model_kwargs["action_noise"] = NOISE[model_kwargs["action_noise"]]( - mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions) - ) - print(model_kwargs) - model = MODELS[model_name]( - policy=policy, - env=self.env, - tensorboard_log=f"{config.TENSORBOARD_LOG_DIR}/{model_name}", - verbose=verbose, - policy_kwargs=policy_kwargs, - #model_kwargs=model_kwargs, - #total_timesteps=model_kwargs["total_timesteps"], - seed=seed - #**model_kwargs, - ) - - - - - return model - - def train_model(self, model, tb_log_name, model_kwargs, train_df, test_df, price, price_test, window_size): - - - agent_params = self.freqai_info['model_training_parameters'] - reward_params = self.freqai_info['model_reward_parameters'] - train_env = DEnv(df=train_df, prices=price, window_size=window_size, reward_kwargs=reward_params) - eval_env = DEnv(df=test_df, prices=price_test, window_size=window_size, reward_kwargs=reward_params) - - # checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./logs/', - # name_prefix='rl_model') - - checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./logs/') - - eval_callback = EvalCallback(eval_env, best_model_save_path='./logs/best_model', log_path='./logs/results', eval_freq=500) - #callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=-200, verbose=1) - - # Create the callback list - callback = CallbackList([checkpoint_callback, eval_callback]) - - - model = model.learn( - total_timesteps=model_kwargs["total_timesteps"], - tb_log_name=tb_log_name, - callback=callback, - #callback=TensorboardCallback(), - ) - return model diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py deleted file mode 100644 index 184ec57ec..000000000 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_3ac.py +++ /dev/null @@ -1,513 +0,0 @@ -import logging -import random -from collections import deque -from enum import Enum -from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union - -import gym -import matplotlib.pylab as plt -import numpy as np -import pandas as pd -from gym import spaces -from gym.utils import seeding - -logger = logging.getLogger(__name__) - -class Actions(Enum): - Short = 0 - Long = 1 - Neutral = 2 - - -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 - - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long - -def mean_over_std(x): - std = np.std(x, ddof=1) - mean = np.mean(x) - return mean / std if std > 0 else 0 - -class DEnv(gym.Env): - - metadata = {'render.modes': ['human']} - - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): - assert df.ndim == 2 - - self.seed() - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] - - self.fee=0.0015 - - # # spaces - self.shape = (window_size, self.signal_features.shape[1]) - self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick = self.window_size - self._end_tick = len(self.prices) - 1 - self._done = None - self._current_tick = None - self._last_trade_tick = None - self._position = Positions.Neutral - self._position_history = None - self.total_reward = None - self._total_profit = None - self._first_rendering = None - self.history = None - self.trade_history = [] - - # self.A_t, self.B_t = 0.000639, 0.00001954 - self.r_t_change = 0. - - self.returns_report = [] - - def seed(self, seed=None): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - def reset(self): - - self._done = False - - if self.starting_point == True: - self._position_history = (self._start_tick* [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - #self._last_trade_tick = self._current_tick - 1 - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self._first_rendering = True - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - self.r_t_change = 0. - - self.returns_report = [] - - return self._get_observation() - - def step(self, action): - self._done = False - self._current_tick += 1 - - if self._current_tick == self._end_tick: - self._done = True - - self.update_portfolio_log_returns(action) - - self._update_profit(action) - step_reward = self._calculate_reward(action) - self.total_reward += step_reward - - trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade - # Update position - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - temp_position = self._position - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short.value: - self._position = Positions.Short - trade_type = "short" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type != None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick = self._current_tick, - total_reward = self.total_reward, - total_profit = self._total_profit, - position = self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info - - # def processState(self, state): - # return state.to_numpy() - - # def convert_mlp_Policy(self, obs_): - # pass - - def _get_observation(self): - return self.signal_features[(self._current_tick - self.window_size):self._current_tick] - - def get_unrealized_profit(self): - - if self._last_trade_tick == None: - return 0. - - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price)/last_trade_price - elif self._position == Positions.Long: - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price)/last_trade_price - else: - return 0. - - def is_tradesignal(self, action): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) - or (action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def _is_trade(self, action: Actions): - return ((action == Actions.Long.value and self._position == Positions.Short) or - (action == Actions.Short.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) - ) - - def is_hold(self, action): - return ((action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def add_buy_fee(self, price): - return price * (1 + self.fee) - - def add_sell_fee(self, price): - return price / (1 + self.fee) - - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} - - for key, value in info.items(): - self.history[key].append(value) - - - # def render(self, mode='human'): - # def _plot_position(position, tick): - # color = None - # if position == Positions.Short: - # color = 'red' - # elif position == Positions.Long: - # color = 'green' - # if color: - # plt.scatter(tick, self.prices.loc[tick].open, color=color) - # if self._first_rendering: - # self._first_rendering = False - # plt.cla() - # plt.plot(self.prices) - # start_position = self._position_history[self._start_tick] - # _plot_position(start_position, self._start_tick) - # plt.cla() - # plt.plot(self.prices) - # _plot_position(self._position, self._current_tick) - # plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) - # plt.pause(0.01) - - # def render_all(self): - # plt.figure() - # window_ticks = np.arange(len(self._position_history)) - # plt.plot(self.prices['open'], alpha=0.5) - # short_ticks = [] - # long_ticks = [] - # neutral_ticks = [] - # for i, tick in enumerate(window_ticks): - # if self._position_history[i] == Positions.Short: - # short_ticks.append(tick - 1) - # elif self._position_history[i] == Positions.Long: - # long_ticks.append(tick - 1) - # elif self._position_history[i] == Positions.Neutral: - # neutral_ticks.append(tick - 1) - # plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, - # 'o', color='grey', ms=3, alpha=0.1) - # plt.plot(short_ticks, self.prices.loc[short_ticks].open, - # 'o', color='r', ms=3, alpha=0.8) - # plt.plot(long_ticks, self.prices.loc[long_ticks].open, - # 'o', color='g', ms=3, alpha=0.8) - # plt.suptitle("Generalising") - # fig = plt.gcf() - # fig.set_size_inches(15, 10) - - # def close_trade_report(self): - # small_trade = 0 - # positive_big_trade = 0 - # negative_big_trade = 0 - # small_profit = 0.003 - # for i in self.close_trade_profit: - # if i < small_profit and i > -small_profit: - # small_trade+=1 - # elif i > small_profit: - # positive_big_trade += 1 - # elif i < -small_profit: - # negative_big_trade += 1 - # print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") - - # def report(self): - # # get total trade - # long_trade = 0 - # short_trade = 0 - # neutral_trade = 0 - # for trade in self.trade_history: - # if trade['type'] == 'long': - # long_trade += 1 - # elif trade['type'] == 'short': - # short_trade += 1 - # else: - # neutral_trade += 1 - # negative_trade = 0 - # positive_trade = 0 - # for tr in self.close_trade_profit: - # if tr < 0.: - # negative_trade += 1 - # if tr > 0.: - # positive_trade += 1 - # total_trade_lr = negative_trade+positive_trade - # total_trade = long_trade + short_trade - # sharp_ratio = self.sharpe_ratio() - # sharp_log = self.get_sharpe_ratio() - # from tabulate import tabulate - # headers = ["Performance", ""] - # performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], - # ["Total reward", "{0:.3f}".format(self.total_reward)], - # ["Start profit(unit)", "{0:.2f}".format(1.)], - # ["End profit(unit)", "{0:.3f}".format(self._total_profit)], - # ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], - # ["Sharp log", "{0:.3f}".format(sharp_log)], - # # ["Sortino ratio", "{0:.2f}".format(0) + '%'], - # ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] - # ] - # tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") - # print(tabulation) - # result = { - # "Start": "{0:.2f}".format(1.), - # "End": "{0:.2f}".format(self._total_profit), - # "Sharp": "{0:.3f}".format(sharp_ratio), - # "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) - # } - # return result - - # def close(self): - # plt.close() - - def get_sharpe_ratio(self): - return mean_over_std(self.get_portfolio_log_returns()) - - # def save_rendering(self, filepath): - # plt.savefig(filepath) - - # def pause_rendering(self): - # plt.show() - - def _calculate_reward(self, action): - # rw = self.transaction_profit_reward(action) - #rw = self.reward_rr_profit_config(action) - rw = self.profit_only_when_close_reward(action) - #rw = self.profit_only_when_close_reward_aim(action) - return rw - - def _update_profit(self, action): - if self._is_trade(action) or self._done: - pnl = self.get_unrealized_profit() - - if self._position == Positions.Long: - self._total_profit = self._total_profit + self._total_profit*pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - if self._position == Positions.Short: - self._total_profit = self._total_profit + self._total_profit*pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - def most_recent_return(self, action): - """ - We support Long, Neutral and Short positions. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Short.value or action == Actions.Neutral.value: - current_price = self.add_sell_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_buy_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Long.value or action == Actions.Neutral.value: - current_price = self.add_buy_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_sell_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - - # def get_trading_log_return(self): - # return self.portfolio_log_returns[self._start_tick:] - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - - def current_price(self) -> float: - return self.prices.iloc[self._current_tick].open - - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick-1].open - - def sharpe_ratio(self): - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward - - # def get_bnh_log_return(self): - # return np.diff(np.log(self.prices['open'][self._start_tick:])) - - def transaction_profit_reward(self, action): - rw = 0. - - pt = self.prev_price() - pt_1 = self.current_price() - - - if self._position == Positions.Long: - a_t = 1 - elif self._position == Positions.Short: - a_t = -1 - else: - a_t = 0 - - # close long - if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - rw = a_t*(pt_1 - po)/po - #rw = rw*2 - # close short - elif (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: - pt_1 = self.add_buy_fee(self.current_price()) - po = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - rw = a_t*(pt_1 - po)/po - #rw = rw*2 - else: - rw = a_t*(pt_1 - pt)/pt - - return np.clip(rw, 0, 1) - - def profit_only_when_close_reward_aim(self, action): - - if self._last_trade_tick == None: - return 0. - - # close long - if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - - # close short - if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) - - return 0. - - def profit_only_when_close_reward(self, action): - - if self._last_trade_tick == None: - return 0. - - # close long - if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - # close short - if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - return 0. \ No newline at end of file diff --git a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py b/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py deleted file mode 100644 index 9b01579e8..000000000 --- a/freqtrade/freqai/prediction_models/RL/RLPrediction_env_TDQN_5ac.py +++ /dev/null @@ -1,671 +0,0 @@ -import logging -import random -from collections import deque -from enum import Enum -#from sklearn.decomposition import PCA, KernelPCA -from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union - -import gym -import matplotlib.pylab as plt -import numpy as np -import pandas as pd -from gym import spaces -from gym.utils import seeding - - -logger = logging.getLogger(__name__) - -# from bokeh.io import output_notebook -# from bokeh.plotting import figure, show -# from bokeh.models import ( -# CustomJS, -# ColumnDataSource, -# NumeralTickFormatter, -# Span, -# HoverTool, -# Range1d, -# DatetimeTickFormatter, -# Scatter, -# Label, LabelSet -# ) - - -class Actions(Enum): - Neutral = 0 - Long_buy = 1 - Long_sell = 2 - Short_buy = 3 - Short_sell = 4 - - -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 - - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long - -def mean_over_std(x): - std = np.std(x, ddof=1) - mean = np.mean(x) - return mean / std if std > 0 else 0 - -class DEnv(gym.Env): - - metadata = {'render.modes': ['human']} - - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): - assert df.ndim == 2 - - self.seed() - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] - - self.fee=0.0015 - - # # spaces - self.shape = (window_size, self.signal_features.shape[1]) - self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick = self.window_size - self._end_tick = len(self.prices) - 1 - self._done = None - self._current_tick = None - self._last_trade_tick = None - self._position = Positions.Neutral - self._position_history = None - self.total_reward = None - self._total_profit = None - self._first_rendering = None - self.history = None - self.trade_history = [] - - # self.A_t, self.B_t = 0.000639, 0.00001954 - self.r_t_change = 0. - - self.returns_report = [] - - - def seed(self, seed=None): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - - def reset(self): - - self._done = False - - if self.starting_point == True: - self._position_history = (self._start_tick* [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - #self._last_trade_tick = self._current_tick - 1 - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self._first_rendering = True - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - self.r_t_change = 0. - - self.returns_report = [] - - return self._get_observation() - - - def step(self, action): - self._done = False - self._current_tick += 1 - - if self._current_tick == self._end_tick: - self._done = True - - self.update_portfolio_log_returns(action) - - self._update_profit(action) - step_reward = self._calculate_reward(action) - self.total_reward += step_reward - - - - - - trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade - # Update position - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - - temp_position = self._position - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long_buy.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short_buy.value: - self._position = Positions.Short - trade_type = "short" - elif action == Actions.Long_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Short_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type != None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick = self._current_tick, - total_reward = self.total_reward, - total_profit = self._total_profit, - position = self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info - - - # def processState(self, state): - # return state.to_numpy() - - # def convert_mlp_Policy(self, obs_): - # pass - - def _get_observation(self): - return self.signal_features[(self._current_tick - self.window_size):self._current_tick] - - - def get_unrealized_profit(self): - - if self._last_trade_tick == None: - return 0. - - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price)/last_trade_price - elif self._position == Positions.Long: - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price)/last_trade_price - else: - return 0. - - - def is_tradesignal(self, action): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or - (action == Actions.Short_buy.value and self._position == Positions.Short) or - (action == Actions.Short_sell.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Short_sell.value and self._position == Positions.Long) or - - (action == Actions.Long_buy.value and self._position == Positions.Long) or - (action == Actions.Long_sell.value and self._position == Positions.Long) or - (action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Long_sell.value and self._position == Positions.Short)) - - - def _is_trade(self, action: Actions): - return ((action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) or - - (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or - (action == Actions.Neutral.Long_sell and self._position == Positions.Short) - ) - - - def is_hold(self, action): - return ((action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - - - def add_buy_fee(self, price): - return price * (1 + self.fee) - - def add_sell_fee(self, price): - return price / (1 + self.fee) - - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} - - for key, value in info.items(): - self.history[key].append(value) - - - # def render(self, mode='human'): - - # def _plot_position(position, tick): - # color = None - # if position == Positions.Short: - # color = 'red' - # elif position == Positions.Long: - # color = 'green' - # if color: - # plt.scatter(tick, self.prices.loc[tick].open, color=color) - - # if self._first_rendering: - # self._first_rendering = False - # plt.cla() - # plt.plot(self.prices) - # start_position = self._position_history[self._start_tick] - # _plot_position(start_position, self._start_tick) - - # plt.cla() - # plt.plot(self.prices) - # _plot_position(self._position, self._current_tick) - - # plt.suptitle("Total Reward: %.6f" % self.total_reward + ' ~ ' + "Total Profit: %.6f" % self._total_profit) - # plt.pause(0.01) - - - # def render_all(self): - # plt.figure() - # window_ticks = np.arange(len(self._position_history)) - # plt.plot(self.prices['open'], alpha=0.5) - - # short_ticks = [] - # long_ticks = [] - # neutral_ticks = [] - # for i, tick in enumerate(window_ticks): - # if self._position_history[i] == Positions.Short: - # short_ticks.append(tick - 1) - # elif self._position_history[i] == Positions.Long: - # long_ticks.append(tick - 1) - # elif self._position_history[i] == Positions.Neutral: - # neutral_ticks.append(tick - 1) - - # plt.plot(neutral_ticks, self.prices.loc[neutral_ticks].open, - # 'o', color='grey', ms=3, alpha=0.1) - # plt.plot(short_ticks, self.prices.loc[short_ticks].open, - # 'o', color='r', ms=3, alpha=0.8) - # plt.plot(long_ticks, self.prices.loc[long_ticks].open, - # 'o', color='g', ms=3, alpha=0.8) - - # plt.suptitle("Generalising") - # fig = plt.gcf() - # fig.set_size_inches(15, 10) - - - - - # def close_trade_report(self): - # small_trade = 0 - # positive_big_trade = 0 - # negative_big_trade = 0 - # small_profit = 0.003 - # for i in self.close_trade_profit: - # if i < small_profit and i > -small_profit: - # small_trade+=1 - # elif i > small_profit: - # positive_big_trade += 1 - # elif i < -small_profit: - # negative_big_trade += 1 - # print(f"small trade={small_trade/len(self.close_trade_profit)}; positive_big_trade={positive_big_trade/len(self.close_trade_profit)}; negative_big_trade={negative_big_trade/len(self.close_trade_profit)}") - - - # def report(self): - - # # get total trade - # long_trade = 0 - # short_trade = 0 - # neutral_trade = 0 - # for trade in self.trade_history: - # if trade['type'] == 'long': - # long_trade += 1 - - # elif trade['type'] == 'short': - # short_trade += 1 - # else: - # neutral_trade += 1 - - # negative_trade = 0 - # positive_trade = 0 - # for tr in self.close_trade_profit: - # if tr < 0.: - # negative_trade += 1 - - # if tr > 0.: - # positive_trade += 1 - - # total_trade_lr = negative_trade+positive_trade - - - # total_trade = long_trade + short_trade - # sharp_ratio = self.sharpe_ratio() - # sharp_log = self.get_sharpe_ratio() - - # from tabulate import tabulate - - # headers = ["Performance", ""] - # performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], - # ["Total reward", "{0:.3f}".format(self.total_reward)], - # ["Start profit(unit)", "{0:.2f}".format(1.)], - # ["End profit(unit)", "{0:.3f}".format(self._total_profit)], - # ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], - # ["Sharp log", "{0:.3f}".format(sharp_log)], - # # ["Sortino ratio", "{0:.2f}".format(0) + '%'], - # ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] - # ] - # tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") - # print(tabulation) - - # result = { - # "Start": "{0:.2f}".format(1.), - # "End": "{0:.2f}".format(self._total_profit), - # "Sharp": "{0:.3f}".format(sharp_ratio), - # "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) - # } - # return result - - # def close(self): - # plt.close() - - def get_sharpe_ratio(self): - return mean_over_std(self.get_portfolio_log_returns()) - - - # def save_rendering(self, filepath): - # plt.savefig(filepath) - - - # def pause_rendering(self): - # plt.show() - - - def _calculate_reward(self, action): - # rw = self.transaction_profit_reward(action) - #rw = self.reward_rr_profit_config(action) - #rw = self.reward_rr_profit_config(action) # main - #rw = self.profit_only_when_close_reward(action) - rw = self.profit_only_when_close_reward_aim(action) - return rw - - - def _update_profit(self, action): - #if self._is_trade(action) or self._done: - if self._is_trade(action) or self._done: - pnl = self.get_unrealized_profit() - - if self._position == Positions.Long: - self._total_profit = self._total_profit + self._total_profit*pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - if self._position == Positions.Short: - self._total_profit = self._total_profit + self._total_profit*pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - - def most_recent_return(self, action): - """ - We support Long, Neutral and Short positions. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - #if action == Actions.Short.value or action == Actions.Neutral.value: - if action == Actions.Short_buy.value or action == Actions.Neutral.value: - current_price = self.add_sell_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_buy_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - #if action == Actions.Long.value or action == Actions.Neutral.value: - if action == Actions.Long_buy.value or action == Actions.Neutral.value: - current_price = self.add_buy_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_sell_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - - - def get_trading_log_return(self): - return self.portfolio_log_returns[self._start_tick:] - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - - def current_price(self) -> float: - return self.prices.iloc[self._current_tick].open - - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick-1].open - - - - def sharpe_ratio(self): - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward - - def get_bnh_log_return(self): - return np.diff(np.log(self.prices['open'][self._start_tick:])) - - - def transaction_profit_reward(self, action): - rw = 0. - - pt = self.prev_price() - pt_1 = self.current_price() - - - if self._position == Positions.Long: - a_t = 1 - elif self._position == Positions.Short: - a_t = -1 - else: - a_t = 0 - - # close long - if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - rw = a_t*(pt_1 - po)/po - #rw = rw*2 - # close short - elif (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: - pt_1 = self.add_buy_fee(self.current_price()) - po = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - rw = a_t*(pt_1 - po)/po - #rw = rw*2 - else: - rw = a_t*(pt_1 - pt)/pt - - return np.clip(rw, 0, 1) - - - def profit_only_when_close_reward(self, action): - - if self._last_trade_tick == None: - return 0. - - # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - return 0. - - def profit_only_when_close_reward_aim(self, action): - - if self._last_trade_tick == None: - return 0. - - # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_sell.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - - # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_buy.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) - - return 0. - - def reward_rr_profit_config(self, action): - rw = 0. - - pt_1 = self.current_price() - - - if len(self.close_trade_profit) > 0: - # long - if self._position == Positions.Long: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - if action == Actions.Short_buy.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 15 - elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < self.profit_aim * self.rr: - rw = -1 - elif self.close_trade_profit[-1] < 0: - rw = -10 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = -15 - - if action == Actions.Long_sell.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 20 - elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < self.profit_aim * self.rr: - rw = -1 - elif self.close_trade_profit[-1] < 0: - rw = -15 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = -25 - - if action == Actions.Neutral.value: - if self.close_trade_profit[-1] > 0.005: - rw = 0 - elif self.close_trade_profit[-1] < 0: - rw = 0 - - # short - if self._position == Positions.Short: - pt_1 = self.add_sell_fee(self.current_price()) - po = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - - if action == Actions.Long_buy.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 15 - elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = -1 - elif self.close_trade_profit[-1] < 0: - rw = -10 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw =- -25 - - if action == Actions.Short_sell.value: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - rw = 20 - elif self.close_trade_profit[-1] > 0.01 and self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = -1 - elif self.close_trade_profit[-1] < 0: - rw = -15 - elif self.close_trade_profit[-1] < (self.profit_aim * -1) * self.rr: - rw = -25 - - if action == Actions.Neutral.value: - if self.close_trade_profit[-1] > 0.005: - rw = 0 - elif self.close_trade_profit[-1] < 0: - rw = 0 - - return np.clip(rw, 0, 1) diff --git a/freqtrade/freqai/prediction_models/RL/config.py b/freqtrade/freqai/prediction_models/RL/config.py deleted file mode 100644 index c45eb2387..000000000 --- a/freqtrade/freqai/prediction_models/RL/config.py +++ /dev/null @@ -1,37 +0,0 @@ -# dir -DATA_SAVE_DIR = "datasets" -TRAINED_MODEL_DIR = "trained_models" -TENSORBOARD_LOG_DIR = "tensorboard_log" -RESULTS_DIR = "results" - -# Model Parameters -A2C_PARAMS = {"n_steps": 5, "ent_coef": 0.01, "learning_rate": 0.0007} -PPO_PARAMS = { - "n_steps": 2048, - "ent_coef": 0.01, - "learning_rate": 0.00025, - "batch_size": 64, -} -DDPG_PARAMS = {"batch_size": 128, "buffer_size": 50000, "learning_rate": 0.001} -TD3_PARAMS = { - "batch_size": 100, - "buffer_size": 1000000, - "learning_rate": 0.001, -} -SAC_PARAMS = { - "batch_size": 64, - "buffer_size": 100000, - "learning_rate": 0.0001, - "learning_starts": 100, - "ent_coef": "auto_0.1", -} -ERL_PARAMS = { - "learning_rate": 3e-5, - "batch_size": 2048, - "gamma": 0.985, - "seed": 312, - "net_dimension": 512, - "target_step": 5000, - "eval_gap": 30, -} -RLlib_PARAMS = {"lr": 5e-5, "train_batch_size": 500, "gamma": 0.99} diff --git a/freqtrade/freqai/prediction_models/RLPredictionModel.py b/freqtrade/freqai/prediction_models/RLPredictionModel.py deleted file mode 100644 index b6903dd43..000000000 --- a/freqtrade/freqai/prediction_models/RLPredictionModel.py +++ /dev/null @@ -1,253 +0,0 @@ -import logging -from typing import Any, Dict, Tuple -#from matplotlib.colors import DivergingNorm - -from pandas import DataFrame -import pandas as pd -from freqtrade.exceptions import OperationalException -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -import tensorflow as tf -from freqtrade.freqai.prediction_models.BaseTensorFlowModel import BaseTensorFlowModel -from freqtrade.freqai.freqai_interface import IFreqaiModel -from tensorflow.keras.layers import Input, Conv1D, Dense, MaxPooling1D, Flatten, Dropout -from tensorflow.keras.models import Model -import numpy as np -import copy - -from keras.layers import * -import random - - -logger = logging.getLogger(__name__) - -# tf.config.run_functions_eagerly(True) -# tf.data.experimental.enable_debug_mode() - -import os -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -os.environ["CUDA_VISIBLE_DEVICES"] = "-1" - -MAX_EPOCHS = 10 -LOOKBACK = 8 - - -class RLPredictionModel_v2(IFreqaiModel): - """ - User created prediction model. The class needs to override three necessary - functions, predict(), fit(). - """ - - def fit(self, data_dictionary: Dict, pair) -> Any: - """ - User sets up the training and test data to fit their desired model here - :params: - :data_dictionary: the dictionary constructed by DataHandler to hold - all the training and test data/labels. - """ - - train_df = data_dictionary["train_features"] - train_labels = data_dictionary["train_labels"] - test_df = data_dictionary["test_features"] - test_labels = data_dictionary["test_labels"] - n_labels = len(train_labels.columns) - if n_labels > 1: - raise OperationalException( - "Neural Net not yet configured for multi-targets. Please " - " reduce number of targets to 1 in strategy." - ) - - n_features = len(data_dictionary["train_features"].columns) - BATCH_SIZE = self.freqai_info.get("batch_size", 64) - input_dims = [BATCH_SIZE, self.CONV_WIDTH, n_features] - - - w1 = WindowGenerator( - input_width=self.CONV_WIDTH, - label_width=1, - shift=1, - train_df=train_df, - val_df=test_df, - train_labels=train_labels, - val_labels=test_labels, - batch_size=BATCH_SIZE, - ) - - - # train_agent() - #pair = self.dd.historical_data[pair] - #gym_env = FreqtradeEnv(data=train_df, prices=0.01, windows_size=100, pair=pair, stake_amount=100) - - # sep = '/' - # coin = pair.split(sep, 1)[0] - - # # df1 = train_df.filter(regex='price') - # # df2 = df1.filter(regex='raw') - - # # df3 = df2.filter(regex=f"{coin}") - # # print(df3) - - # price = train_df[f"%-{coin}raw_price_5m"] - # gym_env = RLPrediction_GymAnytrading(signal_features=train_df, prices=price, window_size=100) - # sac = RLPrediction_Agent(gym_env) - - # print(sac) - - # return 0 - - - - return model - - def predict( - self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first=True - ) -> Tuple[DataFrame, DataFrame]: - """ - Filter the prediction features data and predict with it. - :param: unfiltered_dataframe: Full dataframe for the current backtest period. - :return: - :predictions: np.array of predictions - :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove - data (NaNs) or felt uncertain about data (PCA and DI index) - """ - - dk.find_features(unfiltered_dataframe) - filtered_dataframe, _ = dk.filter_features( - unfiltered_dataframe, dk.training_features_list, training_filter=False - ) - filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) - dk.data_dictionary["prediction_features"] = filtered_dataframe - - # optional additional data cleaning/analysis - self.data_cleaning_predict(dk, filtered_dataframe) - - if first: - full_df = dk.data_dictionary["prediction_features"] - - w1 = WindowGenerator( - input_width=self.CONV_WIDTH, - label_width=1, - shift=1, - test_df=full_df, - batch_size=len(full_df), - ) - - predictions = self.model.predict(w1.inference) - len_diff = len(dk.do_predict) - len(predictions) - if len_diff > 0: - dk.do_predict = dk.do_predict[len_diff:] - - else: - data = dk.data_dictionary["prediction_features"] - data = tf.expand_dims(data, axis=0) - predictions = self.model(data, training=False) - - predictions = predictions[:, 0] - pred_df = DataFrame(predictions, columns=dk.label_list) - - pred_df = dk.denormalize_labels_from_metadata(pred_df) - - return (pred_df, np.ones(len(pred_df))) - - - def set_initial_historic_predictions( - self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str - ) -> None: - - pass - # w1 = WindowGenerator( - # input_width=self.CONV_WIDTH, label_width=1, shift=1, test_df=df, batch_size=len(df) - # ) - - # trained_predictions = model.predict(w1.inference) - # #trained_predictions = trained_predictions[:, 0, 0] - # trained_predictions = trained_predictions[:, 0] - - # n_lost_points = len(df) - len(trained_predictions) - # pred_df = DataFrame(trained_predictions, columns=dk.label_list) - # zeros_df = DataFrame(np.zeros((n_lost_points, len(dk.label_list))), columns=dk.label_list) - # pred_df = pd.concat([zeros_df, pred_df], axis=0) - - # pred_df = dk.denormalize_labels_from_metadata(pred_df) - - - - # self.dd.historic_predictions[pair] = DataFrame() - # self.dd.historic_predictions[pair] = copy.deepcopy(pred_df) - - -class WindowGenerator: - def __init__( - self, - input_width, - label_width, - shift, - train_df=None, - val_df=None, - test_df=None, - train_labels=None, - val_labels=None, - test_labels=None, - batch_size=None, - ): - # Store the raw data. - self.train_df = train_df - self.val_df = val_df - self.test_df = test_df - self.train_labels = train_labels - self.val_labels = val_labels - self.test_labels = test_labels - self.batch_size = batch_size - self.input_width = input_width - self.label_width = label_width - self.shift = shift - - self.total_window_size = input_width + shift - - self.input_slice = slice(0, input_width) - self.input_indices = np.arange(self.total_window_size)[self.input_slice] - - def make_dataset(self, data, labels=None): - data = np.array(data, dtype=np.float32) - if labels is not None: - labels = np.array(labels, dtype=np.float32) - ds = tf.keras.preprocessing.timeseries_dataset_from_array( - data=data, - targets=labels, - sequence_length=self.total_window_size, - sequence_stride=1, - sampling_rate=1, - shuffle=False, - batch_size=self.batch_size, - ) - - return ds - - @property - def train(self): - - - - return self.make_dataset(self.train_df, self.train_labels) - - @property - def val(self): - return self.make_dataset(self.val_df, self.val_labels) - - @property - def test(self): - return self.make_dataset(self.test_df, self.test_labels) - - @property - def inference(self): - return self.make_dataset(self.test_df) - - @property - def example(self): - """Get and cache an example batch of `inputs, labels` for plotting.""" - result = getattr(self, "_example", None) - if result is None: - # No example batch was found, so get one from the `.train` dataset - result = next(iter(self.train)) - # And cache it for next time - self._example = result - return result \ No newline at end of file diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearning.py b/freqtrade/freqai/prediction_models/ReinforcementLearning.py deleted file mode 100644 index 6ced4749e..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearning.py +++ /dev/null @@ -1,273 +0,0 @@ -import logging -from typing import Any, Dict, Tuple - -import numpy as np -import numpy.typing as npt -import pandas as pd -import torch as th -from pandas import DataFrame -from stable_baselines3 import PPO -from stable_baselines3.common.buffers import ReplayBuffer -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.monitor import Monitor -from stable_baselines3.common.vec_env import SubprocVecEnv - -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.freqai_interface import IFreqaiModel -from freqtrade.freqai.prediction_models.RL.RLPrediction_agent_TDQN import TDQN -from freqtrade.freqai.prediction_models.RL.RLPrediction_env_TDQN_5ac import DEnv -#from freqtrade.freqai.prediction_models.RL.RLPrediction_env_TDQN_3ac import DEnv -from freqtrade.persistence import Trade - -logger = logging.getLogger(__name__) - -class ReinforcementLearning(IFreqaiModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def train( - self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen - ) -> Any: - """ - Filter the training data and train a model to it. Train makes heavy use of the datakitchen - for storing, saving, loading, and analyzing the data. - :param unfiltered_dataframe: Full dataframe for the current training period - :param metadata: pair metadata from strategy. - :returns: - :model: Trained model which can be used to inference (self.predict) - """ - - logger.info("--------------------Starting training " f"{pair} --------------------") - - # filter the features requested by user in the configuration file and elegantly handle NaNs - features_filtered, labels_filtered = dk.filter_features( - unfiltered_dataframe, - dk.training_features_list, - dk.label_list, - training_filter=True, - ) - - data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( - features_filtered, labels_filtered) - dk.fit_labels() # useless for now, but just satiating append methods - - # normalize all data based on train_dataset only - data_dictionary = dk.normalize_data(data_dictionary) - - # optional additional data cleaning/analysis - self.data_cleaning_train(dk) - - logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" - ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - - model = self.fit(data_dictionary, pair) - - if pair not in self.dd.historic_predictions: - self.set_initial_historic_predictions( - data_dictionary['train_features'], model, dk, pair) - - self.dd.save_historic_predictions_to_disk() - - logger.info(f"--------------------done training {pair}--------------------") - - return model - - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): - - # train_df = data_dictionary["train_features"] - # # train_labels = data_dictionary["train_labels"] - # test_df = data_dictionary["test_features"] - # # test_labels = data_dictionary["test_labels"] - # # sep = '/' - # # coin = pair.split(sep, 1)[0] - # # price = train_df[f"%-{coin}raw_price_{self.config['timeframe']}"] - # # price.reset_index(inplace=True, drop=True) - # # price = price.to_frame() - # price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - # price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) - # #train_env = GymAnytrading(train_df, price, self.CONV_WIDTH) - # agent_params = self.freqai_info['model_training_parameters'] - # reward_params = self.freqai_info['model_reward_parameters'] - # train_env = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - # #eval_env = DEnv(df=test_df, prices=price_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - # #env_instance = SubprocVecEnv([DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params)]) - # #train_env.reset() - # #eval_env.reset() - # # model - # #policy_kwargs = dict(net_arch=[512, 512, 512]) - # policy_kwargs = dict(activation_fn=th.nn.Tanh, - # net_arch=[256, 256, 256]) - # agent = RLPrediction_agent(train_env) - # #eval_agent = RLPrediction_agent(eval_env) - - # # PPO - # model_name = 'ppo' - # model = agent.get_model(model_name, model_kwargs=agent_params, policy_kwargs=policy_kwargs) - # trained_model = agent.train_model(model=model, - # tb_log_name=model_name, - # model_kwargs=agent_params, - # train_df=train_df, - # test_df=test_df, - # price=price, - # price_test=price_test, - # window_size=self.CONV_WIDTH) - # # best_model = eval_agent.train_model(model=model, - # # tb_log_name=model_name, - # # model_kwargs=agent_params, - # # eval=eval_env) - # # TDQN - # # model_name = 'TDQN' - # # model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log='./tensorboard_log/', - # # learning_rate=agent_params["learning_rate"], gamma=0.9, - # # target_update_interval=5000, buffer_size=50000, - # # exploration_initial_eps=1, exploration_final_eps=0.1, - # # replay_buffer_class=ReplayBuffer - # # ) - # # trained_model = agent.train_model(model=model, - # # tb_log_name=model_name, - # # model_kwargs=agent_params) - # #model.learn( - # # total_timesteps=5000, - # # callback=callback - # # ) - - agent_params = self.freqai_info['model_training_parameters'] - reward_params = self.freqai_info['model_reward_parameters'] - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - eval_freq = agent_params["eval_cycles"] * len(test_df) - total_timesteps = agent_params["train_cycles"] * len(train_df) - - # price data for model training and evaluation - price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(test_df.index)) - - # environments - train_env = DEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - eval = DEnv(df=test_df, prices=price_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - eval_env = Monitor(eval, ".") - eval_env.reset() - - # this should be in config - TODO - agent_type = 'tdqn' - - path = self.dk.data_path - eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/{agent_type}/logs/", eval_freq=int(eval_freq), - deterministic=True, render=False) - - # model arch - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) - - if agent_type == 'tdqn': - model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/{agent_type}/tensorboard/", - learning_rate=0.00025, gamma=0.9, - target_update_interval=5000, buffer_size=50000, - exploration_initial_eps=1, exploration_final_eps=0.1, - replay_buffer_class=ReplayBuffer - ) - elif agent_type == 'ppo': - model = PPO('MultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/{agent_type}/tensorboard/", - learning_rate=0.00025, gamma=0.9 - ) - - model.learn( - total_timesteps=int(total_timesteps), - callback=eval_callback - ) - - print('Training finished!') - - return model - - - - def get_state_info(self, pair): - open_trades = Trade.get_trades(trade_filter=Trade.is_open.is_(True)) - market_side = 0.5 - current_profit = 0 - for trade in open_trades: - if trade.pair == pair: - current_value = trade.open_trade_value - openrate = trade.open_rate - if 'long' in trade.enter_tag: - market_side = 1 - else: - market_side = 0 - current_profit = current_value / openrate -1 - - total_profit = 0 - closed_trades = Trade.get_trades(trade_filter=[Trade.is_open.is_(False), Trade.pair == pair]) - for trade in closed_trades: - total_profit += trade.close_profit - - return market_side, current_profit, total_profit - - - def predict( - self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False - ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: - """ - Filter the prediction features data and predict with it. - :param: unfiltered_dataframe: Full dataframe for the current backtest period. - :return: - :pred_df: dataframe containing the predictions - :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove - data (NaNs) or felt uncertain about data (PCA and DI index) - """ - - dk.find_features(unfiltered_dataframe) - filtered_dataframe, _ = dk.filter_features( - unfiltered_dataframe, dk.training_features_list, training_filter=False - ) - filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) - dk.data_dictionary["prediction_features"] = filtered_dataframe - - # optional additional data cleaning/analysis - self.data_cleaning_predict(dk, filtered_dataframe) - - pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) - pred_df.fillna(0, inplace=True) - - return (pred_df, dk.do_predict) - - def rl_model_predict(self, dataframe: DataFrame, - dk: FreqaiDataKitchen, model: Any) -> DataFrame: - - output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) - - def _predict(window): - observations = dataframe.iloc[window.index] - res, _ = model.predict(observations, deterministic=True) - return res - - output = output.rolling(window=self.CONV_WIDTH).apply(_predict) - - return output - - def set_initial_historic_predictions( - self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str - ) -> None: - - pred_df = self.rl_model_predict(df, dk, model) - pred_df.fillna(0, inplace=True) - self.dd.historic_predictions[pair] = pred_df - hist_preds_df = self.dd.historic_predictions[pair] - - for label in hist_preds_df.columns: - if hist_preds_df[label].dtype == object: - continue - hist_preds_df[f'{label}_mean'] = 0 - hist_preds_df[f'{label}_std'] = 0 - - hist_preds_df['do_predict'] = 0 - - if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: - hist_preds_df['DI_values'] = 0 - - for return_str in dk.data['extra_returns_per_train']: - hist_preds_df[return_str] = 0 diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py new file mode 100644 index 000000000..2fa87c432 --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -0,0 +1,155 @@ +import logging +from typing import Any, Dict # , Tuple + +import numpy as np +# import numpy.typing as npt +# import pandas as pd +import torch as th +# from pandas import DataFrame +from stable_baselines3 import PPO +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.monitor import Monitor +# from stable_baselines3.common.vec_env import SubprocVecEnv +from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv, Actions, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel + + +logger = logging.getLogger(__name__) + + +class ReinforcementLearningPPO(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + + agent_params = self.freqai_info['model_training_parameters'] + reward_params = self.freqai_info['model_reward_parameters'] + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + eval_freq = agent_params["eval_cycles"] * len(test_df) + total_timesteps = agent_params["train_cycles"] * len(train_df) + + # price data for model training and evaluation + price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( + len(test_df.index)) + + # environments + train_env = MyRLEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, + reward_kwargs=reward_params) + eval = MyRLEnv(df=test_df, prices=price_test, + window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + eval_env = Monitor(eval, ".") + eval_env.reset() + + path = self.dk.data_path + eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), + deterministic=True, render=False) + + # model arch + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[256, 256, 128]) + + model = PPO('MultiInputPolicy', train_env, policy_kwargs=policy_kwargs, + tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, gamma=0.9 + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=eval_callback + ) + + print('Training finished!') + + return model + + +class MyRLEnv(BaseRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env + """ + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self._calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick=self._current_tick, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if (action == Actions.Short.value or + action == Actions.Neutral.value) and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + # close short + if (action == Actions.Long.value or + action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py new file mode 100644 index 000000000..a022a10ba --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -0,0 +1,168 @@ +import logging +from typing import Any, Dict, Optional + +import numpy as np +import torch as th +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.monitor import Monitor +# from stable_baselines3.common.vec_env import SubprocVecEnv +from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv, Actions, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +from freqtrade.freqai.RL.TDQNagent import TDQN +from stable_baselines3.common.buffers import ReplayBuffer + + +logger = logging.getLogger(__name__) + + +class ReinforcementLearningPPO(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + + agent_params = self.freqai_info['model_training_parameters'] + reward_params = self.freqai_info['model_reward_parameters'] + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + eval_freq = agent_params["eval_cycles"] * len(test_df) + total_timesteps = agent_params["train_cycles"] * len(train_df) + + # price data for model training and evaluation + price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( + len(test_df.index)) + + # environments + train_env = MyRLEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, + reward_kwargs=reward_params) + eval = MyRLEnv(df=test_df, prices=price_test, + window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + eval_env = Monitor(eval, ".") + eval_env.reset() + + path = self.dk.data_path + eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), + deterministic=True, render=False) + + # model arch + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[256, 256, 128]) + + model = TDQN('TMultiInputPolicy', train_env, + policy_kwargs=policy_kwargs, + tensorboard_log=f"{path}/tdqn/tensorboard/", + learning_rate=0.00025, gamma=0.9, + target_update_interval=5000, buffer_size=50000, + exploration_initial_eps=1, exploration_final_eps=0.1, + replay_buffer_class=Optional(ReplayBuffer) + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=eval_callback + ) + + print('Training finished!') + + return model + + +class MyRLEnv(BaseRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env + """ + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self._calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick=self._current_tick, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if action == Actions.Long_sell.value and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + if action == Actions.Long_sell.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + + # close short + if action == Actions.Short_buy.value and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + if action == Actions.Short_buy.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + + return 0. diff --git a/freqtrade/freqai/rl/BaseRLAgent.py b/freqtrade/freqai/rl/BaseRLAgent.py deleted file mode 100644 index e69de29bb..000000000 From d4db5c32812e65d93fde173cd61e57d1a8a035f1 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 10:29:33 +0200 Subject: [PATCH 012/232] ensure TDQN class is properly named --- freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index a022a10ba..f042762e4 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -15,7 +15,7 @@ from stable_baselines3.common.buffers import ReplayBuffer logger = logging.getLogger(__name__) -class ReinforcementLearningPPO(BaseReinforcementLearningModel): +class ReinforcementLearningTDQN(BaseReinforcementLearningModel): """ User created Reinforcement Learning Model prediction model. """ From 6048f60f13195a9db5488c06436d49921156d87d Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 11:11:16 +0200 Subject: [PATCH 013/232] get TDQN working with 5 action environment --- .../ReinforcementLearningTDQN.py | 201 +++++++++++++++--- 1 file changed, 168 insertions(+), 33 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index f042762e4..5ec917719 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -1,16 +1,17 @@ import logging -from typing import Any, Dict, Optional - +from typing import Any, Dict # Optional +from enum import Enum import numpy as np import torch as th from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor # from stable_baselines3.common.vec_env import SubprocVecEnv -from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv, Actions, Positions +from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3.common.buffers import ReplayBuffer - +from gym import spaces +from gym.utils import seeding logger = logging.getLogger(__name__) @@ -57,7 +58,7 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): learning_rate=0.00025, gamma=0.9, target_update_interval=5000, buffer_size=50000, exploration_initial_eps=1, exploration_final_eps=0.1, - replay_buffer_class=Optional(ReplayBuffer) + replay_buffer_class=ReplayBuffer ) model.learn( @@ -70,11 +71,102 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): return model +class Actions(Enum): + Neutral = 0 + Long_buy = 1 + Long_sell = 2 + Short_buy = 3 + Short_sell = 4 + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + + class MyRLEnv(BaseRLEnv): """ - User can override any function in BaseRLEnv and gym.Env + User can override any function in BaseRLEnv and gym.Env. Here the user + Adds 5 actions. """ + metadata = {'render.modes': ['human']} + + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + assert df.ndim == 2 + + self.seed() + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee = 0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1]) + self.action_space = spaces.Discrete(len(Actions)) + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick = self.window_size + self._end_tick = len(self.prices) - 1 + self._done = None + self._current_tick = None + self._last_trade_tick = None + self._position = Positions.Neutral + self._position_history = None + self.total_reward = None + self._total_profit = None + self._first_rendering = None + self.history = None + self.trade_history = [] + + # self.A_t, self.B_t = 0.000639, 0.00001954 + self.r_t_change = 0. + + self.returns_report = [] + + def seed(self, seed=None): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + + self._done = False + + if self.starting_point is True: + self._position_history = (self._start_tick * [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self._first_rendering = True + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self.r_t_change = 0. + + self.returns_report = [] + + return self._get_observation() + def step(self, action): self._done = False self._current_tick += 1 @@ -85,11 +177,12 @@ class MyRLEnv(BaseRLEnv): self.update_portfolio_log_returns(action) self._update_profit(action) - step_reward = self._calculate_reward(action) + step_reward = self.calculate_reward(action) self.total_reward += step_reward trade_type = None - if self.is_tradesignal(action): + if self.is_tradesignal(action): # exclude 3 case not trade + # Update position """ Action: Neutral, position: Long -> Close Long Action: Neutral, position: Short -> Close Short @@ -104,12 +197,18 @@ class MyRLEnv(BaseRLEnv): if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions.Long.value: + elif action == Actions.Long_buy.value: self._position = Positions.Long trade_type = "long" - elif action == Actions.Short.value: + elif action == Actions.Short_buy.value: self._position = Positions.Short trade_type = "short" + elif action == Actions.Long_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Short_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" else: print("case not defined") @@ -136,33 +235,69 @@ class MyRLEnv(BaseRLEnv): return observation, step_reward, self._done, info - def calculate_reward(self, action): + def _get_observation(self): + return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + def get_unrealized_profit(self): if self._last_trade_tick is None: return 0. - # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_sell.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - - # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price) / last_trade_price + else: + return 0. - if action == Actions.Short_buy.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) + def is_tradesignal(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or + (action == Actions.Short_buy.value and self._position == Positions.Short) or + (action == Actions.Short_sell.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Short_sell.value and self._position == Positions.Long) or - return 0. + (action == Actions.Long_buy.value and self._position == Positions.Long) or + (action == Actions.Long_sell.value and self._position == Positions.Long) or + (action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Long_sell.value and self._position == Positions.Short)) + + def _is_trade(self, action): + return ((action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + + (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or + (action == Actions.Neutral.Long_sell and self._position == Positions.Short) + ) + + def is_hold(self, action): + return ((action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def add_buy_fee(self, price): + return price * (1 + self.fee) + + def add_sell_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) From 9c78e6c26f39e903c1bc899d1b3bbc8cc9f4e09a Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 11:24:57 +0200 Subject: [PATCH 014/232] base PPO model only customizes reward for 3AC --- .../ReinforcementLearningPPO.py | 63 +------------------ 1 file changed, 1 insertion(+), 62 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index 2fa87c432..4d995c4e3 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -28,7 +28,7 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): reward_params = self.freqai_info['model_reward_parameters'] train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = agent_params["eval_cycles"] * len(test_df) + eval_freq = agent_params.get("eval_cycles", 4) * len(test_df) total_timesteps = agent_params["train_cycles"] * len(train_df) # price data for model training and evaluation @@ -72,67 +72,6 @@ class MyRLEnv(BaseRLEnv): User can override any function in BaseRLEnv and gym.Env """ - def step(self, action): - self._done = False - self._current_tick += 1 - - if self._current_tick == self._end_tick: - self._done = True - - self.update_portfolio_log_returns(action) - - self._update_profit(action) - step_reward = self._calculate_reward(action) - self.total_reward += step_reward - - trade_type = None - if self.is_tradesignal(action): - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short.value: - self._position = Positions.Short - trade_type = "short" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type is not None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, - 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick=self._current_tick, - total_reward=self.total_reward, - total_profit=self._total_profit, - position=self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info - def calculate_reward(self, action): if self._last_trade_tick is None: From 718c9d044010470fc8be207608c4ad2ebf29fd1c Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 12:29:44 +0300 Subject: [PATCH 015/232] action fix --- .../ReinforcementLearningTDQN.py | 69 +++++++------------ 1 file changed, 26 insertions(+), 43 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index 5ec917719..8f5fe4e03 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -72,11 +72,9 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): class Actions(Enum): - Neutral = 0 - Long_buy = 1 - Long_sell = 2 - Short_buy = 3 - Short_sell = 4 + Short = 0 + Long = 1 + Neutral = 2 class Positions(Enum): @@ -181,36 +179,31 @@ class MyRLEnv(BaseRLEnv): self.total_reward += step_reward trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade + if self.is_tradesignal(action): # exclude 3 case not trade # Update position """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short + + Action: Short, position: Neutral -> Open Short Action: Short, position: Long -> Close Long and Open Short """ - + + temp_position = self._position if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions.Long_buy.value: + elif action == Actions.Long.value: self._position = Positions.Long trade_type = "long" - elif action == Actions.Short_buy.value: + elif action == Actions.Short.value: self._position = Positions.Short trade_type = "short" - elif action == Actions.Long_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Short_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" else: - print("case not defined") + print("case not define") # Update last trade tick self._last_trade_tick = self._current_tick @@ -257,33 +250,23 @@ class MyRLEnv(BaseRLEnv): return 0. def is_tradesignal(self, action): - # trade signal + # trade signal """ not trade signal is : - Action: Neutral, position: Neutral -> Nothing + Action: Neutral, position: Neutral -> Nothing Action: Long, position: Long -> Hold Long Action: Short, position: Short -> Hold Short """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or - (action == Actions.Short_buy.value and self._position == Positions.Short) or - (action == Actions.Short_sell.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Short_sell.value and self._position == Positions.Long) or + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) + or (action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) - (action == Actions.Long_buy.value and self._position == Positions.Long) or - (action == Actions.Long_sell.value and self._position == Positions.Long) or - (action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Long_sell.value and self._position == Positions.Short)) - - def _is_trade(self, action): - return ((action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) or - - (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or - (action == Actions.Neutral.Long_sell and self._position == Positions.Short) - ) + def _is_trade(self, action: Actions): + return ((action == Actions.Long.value and self._position == Positions.Short) or + (action == Actions.Short.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) + ) def is_hold(self, action): return ((action == Actions.Short.value and self._position == Positions.Short) From 096533bcb9e3f2c3685986c00ffbecd2eddb0f18 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 12:45:08 +0300 Subject: [PATCH 016/232] 3ac to 5ac --- .../ReinforcementLearningTDQN.py | 69 ++++++++++++------- 1 file changed, 43 insertions(+), 26 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index 8f5fe4e03..5ec917719 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -72,9 +72,11 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): class Actions(Enum): - Short = 0 - Long = 1 - Neutral = 2 + Neutral = 0 + Long_buy = 1 + Long_sell = 2 + Short_buy = 3 + Short_sell = 4 class Positions(Enum): @@ -179,31 +181,36 @@ class MyRLEnv(BaseRLEnv): self.total_reward += step_reward trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade + if self.is_tradesignal(action): # exclude 3 case not trade # Update position """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short + + Action: Short, position: Neutral -> Open Short Action: Short, position: Long -> Close Long and Open Short """ - - temp_position = self._position + if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions.Long.value: + elif action == Actions.Long_buy.value: self._position = Positions.Long trade_type = "long" - elif action == Actions.Short.value: + elif action == Actions.Short_buy.value: self._position = Positions.Short trade_type = "short" + elif action == Actions.Long_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Short_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" else: - print("case not define") + print("case not defined") # Update last trade tick self._last_trade_tick = self._current_tick @@ -250,23 +257,33 @@ class MyRLEnv(BaseRLEnv): return 0. def is_tradesignal(self, action): - # trade signal + # trade signal """ not trade signal is : - Action: Neutral, position: Neutral -> Nothing + Action: Neutral, position: Neutral -> Nothing Action: Long, position: Long -> Hold Long Action: Short, position: Short -> Hold Short """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) - or (action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or + (action == Actions.Short_buy.value and self._position == Positions.Short) or + (action == Actions.Short_sell.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Short_sell.value and self._position == Positions.Long) or - def _is_trade(self, action: Actions): - return ((action == Actions.Long.value and self._position == Positions.Short) or - (action == Actions.Short.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) - ) + (action == Actions.Long_buy.value and self._position == Positions.Long) or + (action == Actions.Long_sell.value and self._position == Positions.Long) or + (action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Long_sell.value and self._position == Positions.Short)) + + def _is_trade(self, action): + return ((action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + + (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or + (action == Actions.Neutral.Long_sell and self._position == Positions.Short) + ) def is_hold(self, action): return ((action == Actions.Short.value and self._position == Positions.Short) From 926023935f52a69d7e8830843729978121b68bc3 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 12:13:37 +0200 Subject: [PATCH 017/232] make base 3ac and base 5ac environments. TDQN defaults to 3AC. --- .../RL/{BaseRLEnv.py => Base3ActionRLEnv.py} | 2 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 364 ++++++++++++++++++ .../RL/BaseReinforcementLearningModel.py | 4 +- .../ReinforcementLearningPPO.py | 4 +- .../ReinforcementLearningTDQN.py | 266 +++---------- 5 files changed, 417 insertions(+), 223 deletions(-) rename freqtrade/freqai/RL/{BaseRLEnv.py => Base3ActionRLEnv.py} (99%) create mode 100644 freqtrade/freqai/RL/Base5ActionRLEnv.py diff --git a/freqtrade/freqai/RL/BaseRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py similarity index 99% rename from freqtrade/freqai/RL/BaseRLEnv.py rename to freqtrade/freqai/RL/Base3ActionRLEnv.py index 607262acd..443ce7025 100644 --- a/freqtrade/freqai/RL/BaseRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -31,7 +31,7 @@ def mean_over_std(x): return mean / std if std > 0 else 0 -class BaseRLEnv(gym.Env): +class Base3ActionRLEnv(gym.Env): metadata = {'render.modes': ['human']} diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py new file mode 100644 index 000000000..01fb77481 --- /dev/null +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -0,0 +1,364 @@ +import logging +from enum import Enum +# from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union + +import gym +import numpy as np +from gym import spaces +from gym.utils import seeding + +logger = logging.getLogger(__name__) + + +class Actions(Enum): + Neutral = 0 + Long_buy = 1 + Long_sell = 2 + Short_buy = 3 + Short_sell = 4 + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + + +def mean_over_std(x): + std = np.std(x, ddof=1) + mean = np.mean(x) + return mean / std if std > 0 else 0 + + +class Base5ActionRLEnv(gym.Env): + """ + Base class for a 5 action environment + """ + metadata = {'render.modes': ['human']} + + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + assert df.ndim == 2 + + self.seed() + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee = 0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1]) + self.action_space = spaces.Discrete(len(Actions)) + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick = self.window_size + self._end_tick = len(self.prices) - 1 + self._done = None + self._current_tick = None + self._last_trade_tick = None + self._position = Positions.Neutral + self._position_history = None + self.total_reward = None + self._total_profit = None + self._first_rendering = None + self.history = None + self.trade_history = [] + + # self.A_t, self.B_t = 0.000639, 0.00001954 + self.r_t_change = 0. + + self.returns_report = [] + + def seed(self, seed=None): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + + self._done = False + + if self.starting_point is True: + self._position_history = (self._start_tick * [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self._first_rendering = True + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self.r_t_change = 0. + + self.returns_report = [] + + return self._get_observation() + + def step(self, action): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self.calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): # exclude 3 case not trade + # Update position + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long_buy.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short_buy.value: + self._position = Positions.Short + trade_type = "short" + elif action == Actions.Long_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Short_sell.value: + self._position = Positions.Neutral + trade_type = "neutral" + else: + print("case not defined") + + # Update last trade tick + self._last_trade_tick = self._current_tick + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if self._total_profit < 0.2: + self._done = True + + self._position_history.append(self._position) + observation = self._get_observation() + info = dict( + tick=self._current_tick, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + self._update_history(info) + + return observation, step_reward, self._done, info + + # def processState(self, state): + # return state.to_numpy() + + # def convert_mlp_Policy(self, obs_): + # pass + + def _get_observation(self): + return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + + def get_unrealized_profit(self): + + if self._last_trade_tick is None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price) / last_trade_price + else: + return 0. + + def is_tradesignal(self, action): + # trade signal + """ + not trade signal is : + Action: Neutral, position: Neutral -> Nothing + Action: Long, position: Long -> Hold Long + Action: Short, position: Short -> Hold Short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or + (action == Actions.Short_buy.value and self._position == Positions.Short) or + (action == Actions.Short_sell.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Short_sell.value and self._position == Positions.Long) or + + (action == Actions.Long_buy.value and self._position == Positions.Long) or + (action == Actions.Long_sell.value and self._position == Positions.Long) or + (action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Long_sell.value and self._position == Positions.Short)) + + def _is_trade(self, action: Actions): + return ((action == Actions.Long_buy.value and self._position == Positions.Short) or + (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + + (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or + (action == Actions.Neutral.Long_sell and self._position == Positions.Short) + ) + + def is_hold(self, action): + return ((action == Actions.Short.value and self._position == Positions.Short) + or (action == Actions.Long.value and self._position == Positions.Long)) + + def add_buy_fee(self, price): + return price * (1 + self.fee) + + def add_sell_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + def get_sharpe_ratio(self): + return mean_over_std(self.get_portfolio_log_returns()) + + def _update_profit(self, action): + # if self._is_trade(action) or self._done: + if self._is_trade(action) or self._done: + pnl = self.get_unrealized_profit() + + if self._position == Positions.Long: + self._total_profit = self._total_profit + self._total_profit * pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + if self._position == Positions.Short: + self._total_profit = self._total_profit + self._total_profit * pnl + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + def most_recent_return(self, action): + """ + We support Long, Neutral and Short positions. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + # if action == Actions.Short.value or action == Actions.Neutral.value: + if action == Actions.Short_buy.value or action == Actions.Neutral.value: + current_price = self.add_sell_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_buy_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + # if action == Actions.Long.value or action == Actions.Neutral.value: + if action == Actions.Long_buy.value or action == Actions.Neutral.value: + current_price = self.add_buy_fee(current_price) + + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_sell_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + def get_trading_log_return(self): + return self.portfolio_log_returns[self._start_tick:] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick - 1].open + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward + + def get_bnh_log_return(self): + return np.diff(np.log(self.prices['open'][self._start_tick:])) + + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if action == Actions.Long_sell.value and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + if action == Actions.Long_sell.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + + # close short + if action == Actions.Short_buy.value and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + if action == Actions.Short_buy.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + + return 0. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index accddc94d..a28b88c42 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -8,7 +8,7 @@ from pandas import DataFrame from abc import abstractmethod from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel -from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv, Actions, Positions +from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.persistence import Trade logger = logging.getLogger(__name__) @@ -165,7 +165,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): hist_preds_df[return_str] = 0 -class MyRLEnv(BaseRLEnv): +class MyRLEnv(Base3ActionRLEnv): def step(self, action): self._done = False diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index 4d995c4e3..cc56852df 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -10,7 +10,7 @@ from stable_baselines3 import PPO from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor # from stable_baselines3.common.vec_env import SubprocVecEnv -from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv, Actions, Positions +from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel @@ -67,7 +67,7 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): return model -class MyRLEnv(BaseRLEnv): +class MyRLEnv(Base3ActionRLEnv): """ User can override any function in BaseRLEnv and gym.Env """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index 5ec917719..2a8570d3e 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -1,17 +1,14 @@ import logging from typing import Any, Dict # Optional -from enum import Enum -import numpy as np import torch as th from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor # from stable_baselines3.common.vec_env import SubprocVecEnv -from freqtrade.freqai.RL.BaseRLEnv import BaseRLEnv +from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3.common.buffers import ReplayBuffer -from gym import spaces -from gym.utils import seeding +import numpy as np logger = logging.getLogger(__name__) @@ -71,233 +68,66 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): return model -class Actions(Enum): - Neutral = 0 - Long_buy = 1 - Long_sell = 2 - Short_buy = 3 - Short_sell = 4 - - -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 - - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long - - -class MyRLEnv(BaseRLEnv): +class MyRLEnv(Base3ActionRLEnv): """ - User can override any function in BaseRLEnv and gym.Env. Here the user - Adds 5 actions. + User can override any function in BaseRLEnv and gym.Env """ - metadata = {'render.modes': ['human']} - - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): - assert df.ndim == 2 - - self.seed() - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] - - self.fee = 0.0015 - - # # spaces - self.shape = (window_size, self.signal_features.shape[1]) - self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick = self.window_size - self._end_tick = len(self.prices) - 1 - self._done = None - self._current_tick = None - self._last_trade_tick = None - self._position = Positions.Neutral - self._position_history = None - self.total_reward = None - self._total_profit = None - self._first_rendering = None - self.history = None - self.trade_history = [] - - # self.A_t, self.B_t = 0.000639, 0.00001954 - self.r_t_change = 0. - - self.returns_report = [] - - def seed(self, seed=None): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - def reset(self): - - self._done = False - - if self.starting_point is True: - self._position_history = (self._start_tick * [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self._first_rendering = True - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - self.r_t_change = 0. - - self.returns_report = [] - - return self._get_observation() - - def step(self, action): - self._done = False - self._current_tick += 1 - - if self._current_tick == self._end_tick: - self._done = True - - self.update_portfolio_log_returns(action) - - self._update_profit(action) - step_reward = self.calculate_reward(action) - self.total_reward += step_reward - - trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade - # Update position - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long_buy.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short_buy.value: - self._position = Positions.Short - trade_type = "short" - elif action == Actions.Long_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Short_sell.value: - self._position = Positions.Neutral - trade_type = "neutral" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type is not None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, - 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick=self._current_tick, - total_reward=self.total_reward, - total_profit=self._total_profit, - position=self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info - - def _get_observation(self): - return self.signal_features[(self._current_tick - self.window_size):self._current_tick] - - def get_unrealized_profit(self): + def calculate_reward(self, action): if self._last_trade_tick is None: return 0. - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price) / last_trade_price - elif self._position == Positions.Long: - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + # close long + if (action == Actions.Short.value or + action == Actions.Neutral.value) and self._position == Positions.Long: last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price) / last_trade_price - else: - return 0. + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) - def is_tradesignal(self, action): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or - (action == Actions.Short_buy.value and self._position == Positions.Short) or - (action == Actions.Short_sell.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Short_sell.value and self._position == Positions.Long) or + # close short + if (action == Actions.Long.value or + action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) - (action == Actions.Long_buy.value and self._position == Positions.Long) or - (action == Actions.Long_sell.value and self._position == Positions.Long) or - (action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Long_sell.value and self._position == Positions.Short)) + return 0. - def _is_trade(self, action): - return ((action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) or +# User can inherit and customize 5 action environment +# class MyRLEnv(Base5ActionRLEnv): +# """ +# User can override any function in BaseRLEnv and gym.Env. Here the user +# Adds 5 actions. +# """ - (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or - (action == Actions.Neutral.Long_sell and self._position == Positions.Short) - ) +# def calculate_reward(self, action): - def is_hold(self, action): - return ((action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) +# if self._last_trade_tick is None: +# return 0. - def add_buy_fee(self, price): - return price * (1 + self.fee) +# # close long +# if action == Actions.Long_sell.value and self._position == Positions.Long: +# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) +# return float(np.log(current_price) - np.log(last_trade_price)) - def add_sell_fee(self, price): - return price / (1 + self.fee) +# if action == Actions.Long_sell.value and self._position == Positions.Long: +# if self.close_trade_profit[-1] > self.profit_aim * self.rr: +# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) +# return float((np.log(current_price) - np.log(last_trade_price)) * 2) - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} +# # close short +# if action == Actions.Short_buy.value and self._position == Positions.Short: +# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) +# return float(np.log(last_trade_price) - np.log(current_price)) - for key, value in info.items(): - self.history[key].append(value) +# if action == Actions.Short_buy.value and self._position == Positions.Short: +# if self.close_trade_profit[-1] > self.profit_aim * self.rr: +# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) +# return float((np.log(last_trade_price) - np.log(current_price)) * 2) + +# return 0. From 13cd18dc9a84e36fbc11ed2d17a2a65ebdaecdb5 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 14:05:01 +0300 Subject: [PATCH 018/232] PPO policy change + verbose=1 --- .../freqai/prediction_models/ReinforcementLearningPPO.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index cc56852df..5bc33bff1 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -53,8 +53,8 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[256, 256, 128]) - model = PPO('MultiInputPolicy', train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, gamma=0.9 + model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, + tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, gamma=0.9, verbose=1 ) model.learn( From 1c81ec601683205460ccf1cd86b2522249e0d255 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 14:20:57 +0300 Subject: [PATCH 019/232] 3ac and 5ac example strategies --- ....py => ReinforcementLearningExample3ac.py} | 2 +- .../ReinforcementLearningExample5ac.py | 147 ++++++++++++++++++ 2 files changed, 148 insertions(+), 1 deletion(-) rename freqtrade/freqai/example_strats/{ReinforcementLearningExample.py => ReinforcementLearningExample3ac.py} (99%) create mode 100644 freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py similarity index 99% rename from freqtrade/freqai/example_strats/ReinforcementLearningExample.py rename to freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py index 1bafdbb80..8473fc6a9 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py @@ -11,7 +11,7 @@ from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_ logger = logging.getLogger(__name__) -class ReinforcementLearningExample(IStrategy): +class RLExample3ac(IStrategy): """ Test strategy - used for testing freqAI functionalities. DO not use in production. diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py new file mode 100644 index 000000000..1da9a8ab1 --- /dev/null +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -0,0 +1,147 @@ +import logging +from functools import reduce + +import pandas as pd +import talib.abstract as ta +from pandas import DataFrame + +from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_informative_pair + + +logger = logging.getLogger(__name__) + + +class RLExample5ac(IStrategy): + """ + Test strategy - used for testing freqAI functionalities. + DO not use in production. + """ + + minimal_roi = {"0": 0.1, "240": -1} + + plot_config = { + "main_plot": {}, + "subplots": { + "prediction": {"prediction": {"color": "blue"}}, + "target_roi": { + "target_roi": {"color": "brown"}, + }, + "do_predict": { + "do_predict": {"color": "brown"}, + }, + }, + } + + process_only_new_candles = True + stoploss = -0.05 + use_exit_signal = True + startup_candle_count: int = 300 + can_short = False + + linear_roi_offset = DecimalParameter( + 0.00, 0.02, default=0.005, space="sell", optimize=False, load=True + ) + max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True) + + def informative_pairs(self): + whitelist_pairs = self.dp.current_whitelist() + corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] + informative_pairs = [] + for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: + for pair in whitelist_pairs: + informative_pairs.append((pair, tf)) + for pair in corr_pairs: + if pair in whitelist_pairs: + continue # avoid duplication + informative_pairs.append((pair, tf)) + return informative_pairs + + def populate_any_indicators( + self, pair, df, tf, informative=None, set_generalized_indicators=False + ): + + coin = pair.split('/')[0] + + with self.freqai.lock: + if informative is None: + informative = self.dp.get_pair_dataframe(pair, tf) + + # first loop is automatically duplicating indicators for time periods + for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: + + t = int(t) + informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + + informative[f"%-{coin}pct-change"] = informative["close"].pct_change() + informative[f"%-{coin}raw_volume"] = informative["volume"] + + # Raw price currently necessary for RL models: + informative[f"%-{coin}raw_price"] = informative["close"] + + indicators = [col for col in informative if col.startswith("%")] + # This loop duplicates and shifts all indicators to add a sense of recency to data + for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): + if n == 0: + continue + informative_shift = informative[indicators].shift(n) + informative_shift = informative_shift.add_suffix("_shift-" + str(n)) + informative = pd.concat((informative, informative_shift), axis=1) + + df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) + skip_columns = [ + (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] + ] + df = df.drop(columns=skip_columns) + + # Add generalized indicators here (because in live, it will call this + # function to populate indicators during training). Notice how we ensure not to + # add them multiple times + if set_generalized_indicators: + df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 + df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + + # user adds targets here by prepending them with &- (see convention below) + # If user wishes to use multiple targets, a multioutput prediction model + # needs to be used such as templates/CatboostPredictionMultiModel.py + df["&-action"] = 2 + + return df + + def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: + + self.freqai_info = self.config["freqai"] + + dataframe = self.freqai.start(dataframe, metadata, self) + + return dataframe + + def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + + enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + + if enter_long_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] + ] = (1, "long") + + enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] + + if enter_short_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] + ] = (1, "short") + + return df + + def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + if exit_long_conditions: + df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 + + exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4] + if exit_short_conditions: + df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 + + return df From cf0731095f91fdec5adc84087f10404baf009c60 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 14:23:00 +0300 Subject: [PATCH 020/232] type fix --- .../freqai/example_strats/ReinforcementLearningExample3ac.py | 2 +- .../freqai/example_strats/ReinforcementLearningExample5ac.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py index 8473fc6a9..2173f3d2f 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py @@ -11,7 +11,7 @@ from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_ logger = logging.getLogger(__name__) -class RLExample3ac(IStrategy): +class ReinforcementLearningExample3ac(IStrategy): """ Test strategy - used for testing freqAI functionalities. DO not use in production. diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index 1da9a8ab1..bf0d91390 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -11,7 +11,7 @@ from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_ logger = logging.getLogger(__name__) -class RLExample5ac(IStrategy): +class ReinforcementLearningExample5ac(IStrategy): """ Test strategy - used for testing freqAI functionalities. DO not use in production. From acf3484e8857854d37deefd30fd631b3dbcf336c Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 13:46:12 +0200 Subject: [PATCH 021/232] add multiprocessing variant of ReinforcementLearningPPO --- freqtrade/freqai/RL/Base3ActionRLEnv.py | 6 +- .../ReinforcementLearningPPO_multiproc.py | 114 ++++++++++++++++++ 2 files changed, 118 insertions(+), 2 deletions(-) create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index 443ce7025..5e8bff024 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -35,10 +35,12 @@ class Base3ActionRLEnv(gym.Env): metadata = {'render.modes': ['human']} - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, + id: str = 'baseenv-1', seed: int = 1): assert df.ndim == 2 - self.seed() + self.id = id + self.seed(seed) self.df = df self.signal_features = self.df self.prices = prices diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py new file mode 100644 index 000000000..1b2873334 --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -0,0 +1,114 @@ +import logging +from typing import Any, Dict # , Tuple + +import numpy as np +# import numpy.typing as npt +# import pandas as pd +import torch as th +# from pandas import DataFrame +from typing import Callable +from stable_baselines3 import PPO +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.vec_env import SubprocVecEnv +from stable_baselines3.common.utils import set_random_seed +from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +import gym +logger = logging.getLogger(__name__) + + +def make_env(env_id: str, rank: int, seed: int, train_df, price, + reward_params, window_size) -> Callable: + """ + Utility function for multiprocessed env. + + :param env_id: (str) the environment ID + :param num_env: (int) the number of environment you wish to have in subprocesses + :param seed: (int) the inital seed for RNG + :param rank: (int) index of the subprocess + :return: (Callable) + """ + def _init() -> gym.Env: + + env = MyRLEnv(df=train_df, prices=price, window_size=window_size, + reward_kwargs=reward_params, id=env_id, seed=seed + rank) + return env + set_random_seed(seed) + return _init + + +class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + + agent_params = self.freqai_info['model_training_parameters'] + reward_params = self.freqai_info['model_reward_parameters'] + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + eval_freq = agent_params.get("eval_cycles", 4) * len(test_df) + total_timesteps = agent_params["train_cycles"] * len(train_df) + + # price data for model training and evaluation + price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( + len(test_df.index)) + + env_id = "CartPole-v1" + num_cpu = 4 + train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, + self.CONV_WIDTH) for i in range(num_cpu)]) + + eval_env = SubprocVecEnv([make_env(env_id, i, 1, test_df, price_test, reward_params, + self.CONV_WIDTH) for i in range(num_cpu)]) + + path = self.dk.data_path + eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), + deterministic=True, render=False) + + # model arch + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[256, 256, 128]) + + model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, + tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, gamma=0.9, verbose=1 + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=eval_callback + ) + + print('Training finished!') + + return model + + +class MyRLEnv(Base3ActionRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env + """ + + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if (action == Actions.Short.value or + action == Actions.Neutral.value) and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + # close short + if (action == Actions.Long.value or + action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + return 0. From 6d8e838a8f1443c7f915cd6283850b134c36a7d9 Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Mon, 15 Aug 2022 22:07:42 +0700 Subject: [PATCH 022/232] update tensorboard dependency --- requirements-freqai.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements-freqai.txt b/requirements-freqai.txt index 8d8bb03c5..869606365 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -8,4 +8,5 @@ catboost==1.0.6; platform_machine != 'aarch64' lightgbm==3.3.2 torch==1.12.1 stable-baselines3==1.5.0 -gym==0.21.0 \ No newline at end of file +gym==0.21.0 +tensorboard==2.9.1 \ No newline at end of file From b1fc5a06ca0265203b53296069f25b1bffce1891 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 14:33:08 +0300 Subject: [PATCH 023/232] example config added --- .../config_reinforcementlearning_example.json | 110 ++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 config_examples/config_reinforcementlearning_example.json diff --git a/config_examples/config_reinforcementlearning_example.json b/config_examples/config_reinforcementlearning_example.json new file mode 100644 index 000000000..89b33653d --- /dev/null +++ b/config_examples/config_reinforcementlearning_example.json @@ -0,0 +1,110 @@ +{ + "trading_mode": "futures", + "new_pairs_days": 30, + "margin_mode": "isolated", + "max_open_trades": 8, + "stake_currency": "USDT", + "stake_amount": 1000, + "tradable_balance_ratio": 1, + "fiat_display_currency": "USD", + "dry_run": true, + "timeframe": "3m", + "dataformat_ohlcv": "json", + "dry_run_wallet": 12000, + "cancel_open_orders_on_exit": true, + "unfilledtimeout": { + "entry": 10, + "exit": 30 + }, + "exchange": { + "name": "binance", + "key": "", + "secret": "", + "ccxt_config": { + "enableRateLimit": true + }, + "ccxt_async_config": { + "enableRateLimit": true, + "rateLimit": 200 + }, + "pair_whitelist": [ + "1INCH/USDT", + "AAVE/USDT" + ], + "pair_blacklist": [] + }, + "entry_pricing": { + "price_side": "same", + "purge_old_models": true, + "use_order_book": true, + "order_book_top": 1, + "price_last_balance": 0.0, + "check_depth_of_market": { + "enabled": false, + "bids_to_ask_delta": 1 + } + }, + "exit_pricing": { + "price_side": "other", + "use_order_book": true, + "order_book_top": 1 + }, + "pairlists": [ + { + "method": "StaticPairList" + } + ], + "freqai": { + "model_save_type": "stable_baselines", + "conv_width": 10, + "follow_mode": false, + "purge_old_models": true, + "expiration_hours": 1, + "train_period_days": 10, + "backtest_period_days": 2, + "identifier": "test_rl9", + "feature_parameters": { + "include_corr_pairlist": [ + "BTC/USDT", + "ETH/USDT" + ], + "include_timeframes": [ + "3m", + "15m" + ], + "label_period_candles": 80, + "include_shifted_candles": 0, + "DI_threshold": 0, + "weight_factor": 0.9, + "principal_component_analysis": false, + "use_SVM_to_remove_outliers": false, + "svm_params": {"shuffle": true, "nu": 0.1}, + "stratify_training_data": 0, + "indicator_max_period_candles": 10, + "indicator_periods_candles": [5] + }, + "data_split_parameters": { + "test_size": 0.5, + "random_state": 1, + "shuffle": false + }, + "model_training_parameters": { + "n_steps": 2048, + "ent_coef": 0.005, + "learning_rate": 0.000025, + "batch_size": 256, + "eval_cycles" : 5, + "train_cycles" : 15 + }, + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.01 + } + }, + "bot_name": "RL_test", + "force_entry_enable": true, + "initial_state": "running", + "internals": { + "process_throttle_secs": 5 + } +} \ No newline at end of file From 48bb51b458dfd02dff006c866e9b96ddf070ec06 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 14:41:24 +0300 Subject: [PATCH 024/232] example config added --- config_examples/config_reinforcementlearning_example.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/config_examples/config_reinforcementlearning_example.json b/config_examples/config_reinforcementlearning_example.json index 89b33653d..29f088ef3 100644 --- a/config_examples/config_reinforcementlearning_example.json +++ b/config_examples/config_reinforcementlearning_example.json @@ -8,7 +8,7 @@ "tradable_balance_ratio": 1, "fiat_display_currency": "USD", "dry_run": true, - "timeframe": "3m", + "timeframe": "5m", "dataformat_ohlcv": "json", "dry_run_wallet": 12000, "cancel_open_orders_on_exit": true, @@ -62,15 +62,15 @@ "expiration_hours": 1, "train_period_days": 10, "backtest_period_days": 2, - "identifier": "test_rl9", + "identifier": "test_rl10", "feature_parameters": { "include_corr_pairlist": [ "BTC/USDT", "ETH/USDT" ], "include_timeframes": [ - "3m", - "15m" + "15m", + "30m" ], "label_period_candles": 80, "include_shifted_candles": 0, From 57c488a6f172a713a8625352b07480299c24f91c Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 18:35:41 +0300 Subject: [PATCH 025/232] learning_rate + multicpu changes --- .../ReinforcementLearningPPO_multiproc.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index 1b2873334..c00784d7a 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -50,19 +50,22 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): test_df = data_dictionary["test_features"] eval_freq = agent_params.get("eval_cycles", 4) * len(test_df) total_timesteps = agent_params["train_cycles"] * len(train_df) + learning_rate = agent_params["learning_rate"] # price data for model training and evaluation price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( len(test_df.index)) - env_id = "CartPole-v1" - num_cpu = 4 + env_id = "train_env" + train_num_cpu = 6 train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, - self.CONV_WIDTH) for i in range(num_cpu)]) + self.CONV_WIDTH) for i in range(train_num_cpu)]) - eval_env = SubprocVecEnv([make_env(env_id, i, 1, test_df, price_test, reward_params, - self.CONV_WIDTH) for i in range(num_cpu)]) + eval_num_cpu = 6 + eval_env_id = 'eval_env' + eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, + self.CONV_WIDTH) for i in range(eval_num_cpu)]) path = self.dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", @@ -71,10 +74,10 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) + net_arch=[512, 512, 512]) model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, gamma=0.9, verbose=1 + tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=learning_rate, gamma=0.9, verbose=1 ) model.learn( @@ -83,6 +86,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): ) print('Training finished!') + eval_env.close() return model From bf7ceba95857ab6880929c8387d32a84155d92fd Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 18:01:15 +0200 Subject: [PATCH 026/232] set cpu threads in config --- .gitignore | 1 + config_examples/config_freqai-rl.example.json | 110 ++++++++++++++++++ .../RL/BaseReinforcementLearningModel.py | 19 ++- freqtrade/freqai/data_drawer.py | 7 +- .../prediction_models/CatboostClassifier.py | 2 +- .../prediction_models/CatboostRegressor.py | 2 +- .../ReinforcementLearningPPO.py | 13 ++- .../ReinforcementLearningPPO_multiproc.py | 15 ++- .../ReinforcementLearningTDQN.py | 11 +- 9 files changed, 159 insertions(+), 21 deletions(-) create mode 100644 config_examples/config_freqai-rl.example.json diff --git a/.gitignore b/.gitignore index e400c01f5..2d2d526d9 100644 --- a/.gitignore +++ b/.gitignore @@ -113,3 +113,4 @@ target/ !config_examples/config_full.example.json !config_examples/config_kraken.example.json !config_examples/config_freqai.example.json +!config_examples/config_freqai-rl.example.json diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json new file mode 100644 index 000000000..826fe7187 --- /dev/null +++ b/config_examples/config_freqai-rl.example.json @@ -0,0 +1,110 @@ +{ + "trading_mode": "futures", + "new_pairs_days": 30, + "margin_mode": "isolated", + "max_open_trades": 8, + "stake_currency": "USDT", + "stake_amount": 1000, + "tradable_balance_ratio": 1, + "fiat_display_currency": "USD", + "dry_run": true, + "timeframe": "5m", + "dataformat_ohlcv": "json", + "dry_run_wallet": 12000, + "cancel_open_orders_on_exit": true, + "unfilledtimeout": { + "entry": 10, + "exit": 30 + }, + "exchange": { + "name": "binance", + "key": "", + "secret": "", + "ccxt_config": { + "enableRateLimit": true + }, + "ccxt_async_config": { + "enableRateLimit": true, + "rateLimit": 200 + }, + "pair_whitelist": [ + "1INCH/USDT", + "AAVE/USDT" + ], + "pair_blacklist": [] + }, + "entry_pricing": { + "price_side": "same", + "purge_old_models": true, + "use_order_book": true, + "order_book_top": 1, + "price_last_balance": 0.0, + "check_depth_of_market": { + "enabled": false, + "bids_to_ask_delta": 1 + } + }, + "exit_pricing": { + "price_side": "other", + "use_order_book": true, + "order_book_top": 1 + }, + "pairlists": [ + { + "method": "StaticPairList" + } + ], + "freqai": { + "model_save_type": "stable_baselines_ppo", + "conv_width": 10, + "follow_mode": false, + "purge_old_models": true, + "train_period_days": 10, + "backtest_period_days": 2, + "identifier": "unique-id", + "data_kitchen_thread_count": 4, + "feature_parameters": { + "include_corr_pairlist": [ + "BTC/USDT", + "ETH/USDT" + ], + "include_timeframes": [ + "5m", + "30m" + ], + "label_period_candles": 80, + "include_shifted_candles": 0, + "DI_threshold": 0, + "weight_factor": 0.9, + "principal_component_analysis": false, + "use_SVM_to_remove_outliers": false, + "svm_params": {"shuffle": true, "nu": 0.1}, + "stratify_training_data": 0, + "indicator_max_period_candles": 10, + "indicator_periods_candles": [5] + }, + "data_split_parameters": { + "test_size": 0.5, + "random_state": 1, + "shuffle": false + }, + "model_training_parameters": { + "n_steps": 2048, + "ent_coef": 0.005, + "learning_rate": 0.000025, + "batch_size": 256, + "eval_cycles" : 5, + "train_cycles" : 15 + }, + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.01 + } + }, + "bot_name": "RL_test", + "force_entry_enable": true, + "initial_state": "running", + "internals": { + "process_throttle_secs": 5 + } +} \ No newline at end of file diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a28b88c42..8fa784f12 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -56,7 +56,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): ) logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - model = self.fit(data_dictionary, pair) + model = self.fit_rl(data_dictionary, pair, dk) if pair not in self.dd.historic_predictions: self.set_initial_historic_predictions( @@ -69,7 +69,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): return model @abstractmethod - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): """ Agent customizations and abstract Reinforcement Learning customizations go in here. Abstract method, so this function must be overridden by @@ -164,6 +164,21 @@ class BaseReinforcementLearningModel(IFreqaiModel): for return_str in dk.data['extra_returns_per_train']: hist_preds_df[return_str] = 0 + # TODO take care of this appendage. Right now it needs to be called because FreqAI enforces it. + # But FreqaiRL needs more objects passed to fit() (like DK) and we dont want to go refactor + # all the other existing fit() functions to include dk argument. For now we instantiate and + # leave it. + def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: + """ + Most regressors use the same function names and arguments e.g. user + can drop in LGBMRegressor in place of CatBoostRegressor and all data + management will be properly handled by Freqai. + :param data_dictionary: Dict = the dictionary constructed by DataHandler to hold + all the training and test data/labels. + """ + + return + class MyRLEnv(Base3ActionRLEnv): diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index f9d56c4b4..68f688ed4 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -471,11 +471,12 @@ class FreqaiDataDrawer: elif model_type == 'keras': from tensorflow import keras model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") - elif model_type == 'stable_baselines': + elif model_type == 'stable_baselines_ppo': from stable_baselines3.ppo.ppo import PPO + model = PPO.load(dk.data_path / f"{dk.model_filename}_model.zip") + elif model_type == 'stable_baselines_dqn': from stable_baselines3 import DQN - #model = PPO.load(dk.data_path / f"{dk.model_filename}_model.zip") - model = DQN.load(dk.data_path / f"best_model.zip") + model = DQN.load(dk.data_path / f"{dk.model_filename}_model.zip") if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file(): dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib") diff --git a/freqtrade/freqai/prediction_models/CatboostClassifier.py b/freqtrade/freqai/prediction_models/CatboostClassifier.py index b88b28b25..fad74d7a8 100644 --- a/freqtrade/freqai/prediction_models/CatboostClassifier.py +++ b/freqtrade/freqai/prediction_models/CatboostClassifier.py @@ -16,7 +16,7 @@ class CatboostClassifier(BaseClassifierModel): has its own DataHandler where data is held, saved, loaded, and managed. """ - def fit(self, data_dictionary: Dict) -> Any: + def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: """ User sets up the training and test data to fit their desired model here :params: diff --git a/freqtrade/freqai/prediction_models/CatboostRegressor.py b/freqtrade/freqai/prediction_models/CatboostRegressor.py index d93569c91..018f55879 100644 --- a/freqtrade/freqai/prediction_models/CatboostRegressor.py +++ b/freqtrade/freqai/prediction_models/CatboostRegressor.py @@ -17,7 +17,7 @@ class CatboostRegressor(BaseRegressionModel): has its own DataHandler where data is held, saved, loaded, and managed. """ - def fit(self, data_dictionary: Dict) -> Any: + def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: """ User sets up the training and test data to fit their desired model here :param data_dictionary: the dictionary constructed by DataHandler to hold diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index 5bc33bff1..d1cd2293e 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -9,9 +9,9 @@ import torch as th from stable_baselines3 import PPO from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor -# from stable_baselines3.common.vec_env import SubprocVecEnv from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen logger = logging.getLogger(__name__) @@ -22,7 +22,7 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -44,7 +44,7 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): eval_env = Monitor(eval, ".") eval_env.reset() - path = self.dk.data_path + path = dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) @@ -54,7 +54,8 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): net_arch=[256, 256, 128]) model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, gamma=0.9, verbose=1 + tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, + gamma=0.9, verbose=1 ) model.learn( @@ -62,9 +63,11 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): callback=eval_callback ) + best_model = PPO.load(dk.data_path / "best_model.zip") + print('Training finished!') - return model + return best_model class MyRLEnv(Base3ActionRLEnv): diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index c00784d7a..743caf8c6 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -13,7 +13,9 @@ from stable_baselines3.common.vec_env import SubprocVecEnv from stable_baselines3.common.utils import set_random_seed from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen import gym + logger = logging.getLogger(__name__) @@ -42,7 +44,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -58,16 +60,15 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): len(test_df.index)) env_id = "train_env" - train_num_cpu = 6 + num_cpu = int(dk.thread_count / 2) train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, self.CONV_WIDTH) for i in range(train_num_cpu)]) - eval_num_cpu = 6 eval_env_id = 'eval_env' eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, - self.CONV_WIDTH) for i in range(eval_num_cpu)]) + self.CONV_WIDTH) for i in range(num_cpu)]) - path = self.dk.data_path + path = dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) @@ -85,10 +86,12 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): callback=eval_callback ) + # TODO get callback working so the best model is saved. For now we save last model + # best_model = PPO.load(dk.data_path / "best_model.zip") print('Training finished!') eval_env.close() - return model + return model # best_model class MyRLEnv(Base3ActionRLEnv): diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index 2a8570d3e..8bc5f9152 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -7,9 +7,12 @@ from stable_baselines3.common.monitor import Monitor from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.RL.TDQNagent import TDQN +from stable_baselines3 import DQN from stable_baselines3.common.buffers import ReplayBuffer import numpy as np +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen + logger = logging.getLogger(__name__) @@ -18,7 +21,7 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit(self, data_dictionary: Dict[str, Any], pair: str = ''): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -40,7 +43,7 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): eval_env = Monitor(eval, ".") eval_env.reset() - path = self.dk.data_path + path = dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) @@ -63,9 +66,11 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): callback=eval_callback ) + best_model = DQN.load(dk.data_path / "best_model.zip") + print('Training finished!') - return model + return best_model class MyRLEnv(Base3ActionRLEnv): From e5df39e8913721847c0f6ab9754a71cde0cea38b Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 18:08:20 +0200 Subject: [PATCH 027/232] ensuring best_model is placed in ram and saved to disk and loaded from disk --- .../prediction_models/ReinforcementLearningPPO_multiproc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index 743caf8c6..8370500b9 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -62,7 +62,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): env_id = "train_env" num_cpu = int(dk.thread_count / 2) train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, - self.CONV_WIDTH) for i in range(train_num_cpu)]) + self.CONV_WIDTH) for i in range(num_cpu)]) eval_env_id = 'eval_env' eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, From 69d542d3e2a464ae6fbd039489d2187b20705c01 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 18:27:48 +0200 Subject: [PATCH 028/232] match config and strats to upstream freqai --- config_examples/config_freqai-rl.example.json | 1 + .../ReinforcementLearningExample3ac.py | 74 +++++++++---------- .../ReinforcementLearningExample5ac.py | 74 +++++++++---------- 3 files changed, 73 insertions(+), 76 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 826fe7187..736f3e022 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -55,6 +55,7 @@ } ], "freqai": { + "enabled": true, "model_save_type": "stable_baselines_ppo", "conv_width": 10, "follow_mode": false, diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py index 2173f3d2f..1976620fb 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py @@ -62,57 +62,55 @@ class ReinforcementLearningExample3ac(IStrategy): coin = pair.split('/')[0] - with self.freqai.lock: - if informative is None: - informative = self.dp.get_pair_dataframe(pair, tf) - # first loop is automatically duplicating indicators for time periods - for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: + if informative is None: + informative = self.dp.get_pair_dataframe(pair, tf) - t = int(t) - informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + # first loop is automatically duplicating indicators for time periods + for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: - informative[f"%-{coin}pct-change"] = informative["close"].pct_change() - informative[f"%-{coin}raw_volume"] = informative["volume"] + t = int(t) + informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) - # Raw price currently necessary for RL models: - informative[f"%-{coin}raw_price"] = informative["close"] + informative[f"%-{coin}pct-change"] = informative["close"].pct_change() + informative[f"%-{coin}raw_volume"] = informative["volume"] - indicators = [col for col in informative if col.startswith("%")] - # This loop duplicates and shifts all indicators to add a sense of recency to data - for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): - if n == 0: - continue - informative_shift = informative[indicators].shift(n) - informative_shift = informative_shift.add_suffix("_shift-" + str(n)) - informative = pd.concat((informative, informative_shift), axis=1) + # Raw price currently necessary for RL models: + informative[f"%-{coin}raw_price"] = informative["close"] - df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) - skip_columns = [ - (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] - ] - df = df.drop(columns=skip_columns) + indicators = [col for col in informative if col.startswith("%")] + # This loop duplicates and shifts all indicators to add a sense of recency to data + for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): + if n == 0: + continue + informative_shift = informative[indicators].shift(n) + informative_shift = informative_shift.add_suffix("_shift-" + str(n)) + informative = pd.concat((informative, informative_shift), axis=1) - # Add generalized indicators here (because in live, it will call this - # function to populate indicators during training). Notice how we ensure not to - # add them multiple times - if set_generalized_indicators: - df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 - df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) + skip_columns = [ + (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] + ] + df = df.drop(columns=skip_columns) - # user adds targets here by prepending them with &- (see convention below) - # If user wishes to use multiple targets, a multioutput prediction model - # needs to be used such as templates/CatboostPredictionMultiModel.py - df["&-action"] = 2 + # Add generalized indicators here (because in live, it will call this + # function to populate indicators during training). Notice how we ensure not to + # add them multiple times + if set_generalized_indicators: + df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 + df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + + # user adds targets here by prepending them with &- (see convention below) + # If user wishes to use multiple targets, a multioutput prediction model + # needs to be used such as templates/CatboostPredictionMultiModel.py + df["&-action"] = 2 return df def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: - self.freqai_info = self.config["freqai"] - dataframe = self.freqai.start(dataframe, metadata, self) return dataframe diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index bf0d91390..8c19cc0fa 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -62,57 +62,55 @@ class ReinforcementLearningExample5ac(IStrategy): coin = pair.split('/')[0] - with self.freqai.lock: - if informative is None: - informative = self.dp.get_pair_dataframe(pair, tf) - # first loop is automatically duplicating indicators for time periods - for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: + if informative is None: + informative = self.dp.get_pair_dataframe(pair, tf) - t = int(t) - informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + # first loop is automatically duplicating indicators for time periods + for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: - informative[f"%-{coin}pct-change"] = informative["close"].pct_change() - informative[f"%-{coin}raw_volume"] = informative["volume"] + t = int(t) + informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) - # Raw price currently necessary for RL models: - informative[f"%-{coin}raw_price"] = informative["close"] + informative[f"%-{coin}pct-change"] = informative["close"].pct_change() + informative[f"%-{coin}raw_volume"] = informative["volume"] - indicators = [col for col in informative if col.startswith("%")] - # This loop duplicates and shifts all indicators to add a sense of recency to data - for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): - if n == 0: - continue - informative_shift = informative[indicators].shift(n) - informative_shift = informative_shift.add_suffix("_shift-" + str(n)) - informative = pd.concat((informative, informative_shift), axis=1) + # Raw price currently necessary for RL models: + informative[f"%-{coin}raw_price"] = informative["close"] - df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) - skip_columns = [ - (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] - ] - df = df.drop(columns=skip_columns) + indicators = [col for col in informative if col.startswith("%")] + # This loop duplicates and shifts all indicators to add a sense of recency to data + for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): + if n == 0: + continue + informative_shift = informative[indicators].shift(n) + informative_shift = informative_shift.add_suffix("_shift-" + str(n)) + informative = pd.concat((informative, informative_shift), axis=1) - # Add generalized indicators here (because in live, it will call this - # function to populate indicators during training). Notice how we ensure not to - # add them multiple times - if set_generalized_indicators: - df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 - df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) + skip_columns = [ + (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] + ] + df = df.drop(columns=skip_columns) - # user adds targets here by prepending them with &- (see convention below) - # If user wishes to use multiple targets, a multioutput prediction model - # needs to be used such as templates/CatboostPredictionMultiModel.py - df["&-action"] = 2 + # Add generalized indicators here (because in live, it will call this + # function to populate indicators during training). Notice how we ensure not to + # add them multiple times + if set_generalized_indicators: + df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 + df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + + # user adds targets here by prepending them with &- (see convention below) + # If user wishes to use multiple targets, a multioutput prediction model + # needs to be used such as templates/CatboostPredictionMultiModel.py + df["&-action"] = 2 return df def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: - self.freqai_info = self.config["freqai"] - dataframe = self.freqai.start(dataframe, metadata, self) return dataframe From dd382dd3702cfe7edf2848adc9f7958d08ac62dc Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 15 Aug 2022 18:56:53 +0200 Subject: [PATCH 029/232] add monitor to eval env so that multiproc can save best_model --- .../ReinforcementLearningPPO_multiproc.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index 8370500b9..e8f67cbb8 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -6,6 +6,7 @@ import numpy as np # import pandas as pd import torch as th # from pandas import DataFrame +from stable_baselines3.common.monitor import Monitor from typing import Callable from stable_baselines3 import PPO from stable_baselines3.common.callbacks import EvalCallback @@ -20,7 +21,7 @@ logger = logging.getLogger(__name__) def make_env(env_id: str, rank: int, seed: int, train_df, price, - reward_params, window_size) -> Callable: + reward_params, window_size, monitor=False) -> Callable: """ Utility function for multiprocessed env. @@ -34,6 +35,8 @@ def make_env(env_id: str, rank: int, seed: int, train_df, price, env = MyRLEnv(df=train_df, prices=price, window_size=window_size, reward_kwargs=reward_params, id=env_id, seed=seed + rank) + if monitor: + env = Monitor(env, ".") return env set_random_seed(seed) return _init @@ -66,7 +69,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): eval_env_id = 'eval_env' eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, - self.CONV_WIDTH) for i in range(num_cpu)]) + self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) path = dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", @@ -86,12 +89,11 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): callback=eval_callback ) - # TODO get callback working so the best model is saved. For now we save last model - # best_model = PPO.load(dk.data_path / "best_model.zip") + best_model = PPO.load(dk.data_path / "best_model.zip") print('Training finished!') eval_env.close() - return model # best_model + return best_model class MyRLEnv(Base3ActionRLEnv): From d60a166fbf8a63f7d0107a115d8e00cd190630d4 Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Mon, 15 Aug 2022 22:39:33 +0300 Subject: [PATCH 030/232] multiproc TDQN with xtra callbacks --- .../ReinforcementLearningTDQN_multiproc.py | 164 ++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py new file mode 100644 index 000000000..d05184d87 --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py @@ -0,0 +1,164 @@ +import logging +from typing import Any, Dict # Optional +import torch as th +import numpy as np +import gym +from typing import Callable +from stable_baselines3.common.callbacks import EvalCallback, StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold +from stable_baselines3.common.monitor import Monitor +from stable_baselines3.common.vec_env import SubprocVecEnv +from stable_baselines3.common.utils import set_random_seed +from stable_baselines3 import DQN +from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +from freqtrade.freqai.RL.TDQNagent import TDQN +from stable_baselines3.common.buffers import ReplayBuffer +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen + + +logger = logging.getLogger(__name__) + +def make_env(env_id: str, rank: int, seed: int, train_df, price, + reward_params, window_size, monitor=False) -> Callable: + """ + Utility function for multiprocessed env. + + :param env_id: (str) the environment ID + :param num_env: (int) the number of environment you wish to have in subprocesses + :param seed: (int) the inital seed for RNG + :param rank: (int) index of the subprocess + :return: (Callable) + """ + def _init() -> gym.Env: + + env = MyRLEnv(df=train_df, prices=price, window_size=window_size, + reward_kwargs=reward_params, id=env_id, seed=seed + rank) + if monitor: + env = Monitor(env, ".") + return env + set_random_seed(seed) + return _init + +class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): + + agent_params = self.freqai_info['model_training_parameters'] + reward_params = self.freqai_info['model_reward_parameters'] + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + eval_freq = agent_params["eval_cycles"] * len(test_df) + total_timesteps = agent_params["train_cycles"] * len(train_df) + learning_rate = agent_params["learning_rate"] + + # price data for model training and evaluation + price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) + price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( + len(test_df.index)) + + env_id = "train_env" + num_cpu = int(dk.thread_count / 2) + train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, + self.CONV_WIDTH) for i in range(num_cpu)]) + + eval_env_id = 'eval_env' + eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, + self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) + + path = dk.data_path + stop_train_callback = StopTrainingOnNoModelImprovement(max_no_improvement_evals=5, min_evals=10, verbose=2) + callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=-200, verbose=2) + eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), + deterministic=True, render=True, callback_after_eval=stop_train_callback, callback_on_new_best=callback_on_best, verbose=2) + # model arch + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[512, 512, 512]) + + model = TDQN('TMultiInputPolicy', train_env, + policy_kwargs=policy_kwargs, + tensorboard_log=f"{path}/tdqn/tensorboard/", + learning_rate=learning_rate, gamma=0.9, + target_update_interval=5000, buffer_size=50000, + exploration_initial_eps=1, exploration_final_eps=0.1, + replay_buffer_class=ReplayBuffer + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=eval_callback + ) + + best_model = DQN.load(dk.data_path / "best_model.zip") + print('Training finished!') + eval_env.close() + + return best_model + + +class MyRLEnv(Base3ActionRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env + """ + + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if (action == Actions.Short.value or + action == Actions.Neutral.value) and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + # close short + if (action == Actions.Long.value or + action == Actions.Neutral.value) and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + return 0. + +# User can inherit and customize 5 action environment +# class MyRLEnv(Base5ActionRLEnv): +# """ +# User can override any function in BaseRLEnv and gym.Env. Here the user +# Adds 5 actions. +# """ + +# def calculate_reward(self, action): + +# if self._last_trade_tick is None: +# return 0. + +# # close long +# if action == Actions.Long_sell.value and self._position == Positions.Long: +# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) +# return float(np.log(current_price) - np.log(last_trade_price)) + +# if action == Actions.Long_sell.value and self._position == Positions.Long: +# if self.close_trade_profit[-1] > self.profit_aim * self.rr: +# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) +# return float((np.log(current_price) - np.log(last_trade_price)) * 2) + +# # close short +# if action == Actions.Short_buy.value and self._position == Positions.Short: +# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) +# return float(np.log(last_trade_price) - np.log(current_price)) + +# if action == Actions.Short_buy.value and self._position == Positions.Short: +# if self.close_trade_profit[-1] > self.profit_aim * self.rr: +# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) +# return float((np.log(last_trade_price) - np.log(current_price)) * 2) + +# return 0. From 0475b7cb1838f46bcbb31771eca0b3c3cb6ed940 Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Tue, 16 Aug 2022 09:30:35 +0700 Subject: [PATCH 031/232] remove unuse code and fix coding conventions --- freqtrade/freqai/RL/Base3ActionRLEnv.py | 7 ------ freqtrade/freqai/RL/Base5ActionRLEnv.py | 14 ----------- .../ReinforcementLearningExample3ac.py | 1 - .../ReinforcementLearningExample5ac.py | 1 - .../ReinforcementLearningPPO_multiproc.py | 5 +++- .../ReinforcementLearningTDQN_multiproc.py | 24 +++++++++++++++---- 6 files changed, 23 insertions(+), 29 deletions(-) diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index 5e8bff024..bf7b2fc7b 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -71,10 +71,6 @@ class Base3ActionRLEnv(gym.Env): self.history = None self.trade_history = [] - self.r_t_change = 0. - - self.returns_report = [] - def seed(self, seed: int = 1): self.np_random, seed = seeding.np_random(seed) return [seed] @@ -101,9 +97,6 @@ class Base3ActionRLEnv(gym.Env): self._profits = [(self._start_tick, 1)] self.close_trade_profit = [] - self.r_t_change = 0. - - self.returns_report = [] return self._get_observation() diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 01fb77481..00b031e54 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -73,11 +73,6 @@ class Base5ActionRLEnv(gym.Env): self.history = None self.trade_history = [] - # self.A_t, self.B_t = 0.000639, 0.00001954 - self.r_t_change = 0. - - self.returns_report = [] - def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] @@ -104,9 +99,6 @@ class Base5ActionRLEnv(gym.Env): self._profits = [(self._start_tick, 1)] self.close_trade_profit = [] - self.r_t_change = 0. - - self.returns_report = [] return self._get_observation() @@ -178,12 +170,6 @@ class Base5ActionRLEnv(gym.Env): return observation, step_reward, self._done, info - # def processState(self, state): - # return state.to_numpy() - - # def convert_mlp_Policy(self, obs_): - # pass - def _get_observation(self): return self.signal_features[(self._current_tick - self.window_size):self._current_tick] diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py index 1976620fb..be7a8973b 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py @@ -62,7 +62,6 @@ class ReinforcementLearningExample3ac(IStrategy): coin = pair.split('/')[0] - if informative is None: informative = self.dp.get_pair_dataframe(pair, tf) diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index 8c19cc0fa..0ecea92a9 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -62,7 +62,6 @@ class ReinforcementLearningExample5ac(IStrategy): coin = pair.split('/')[0] - if informative is None: informative = self.dp.get_pair_dataframe(pair, tf) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index e8f67cbb8..26099a9e3 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -81,7 +81,10 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): net_arch=[512, 512, 512]) model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=learning_rate, gamma=0.9, verbose=1 + tensorboard_log=f"{path}/ppo/tensorboard/", + learning_rate=learning_rate, + gamma=0.9, + verbose=1 ) model.learn( diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py index d05184d87..dd34c96c1 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py @@ -4,7 +4,8 @@ import torch as th import numpy as np import gym from typing import Callable -from stable_baselines3.common.callbacks import EvalCallback, StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold +from stable_baselines3.common.callbacks import ( + EvalCallback, StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold) from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.vec_env import SubprocVecEnv from stable_baselines3.common.utils import set_random_seed @@ -18,6 +19,7 @@ from freqtrade.freqai.data_kitchen import FreqaiDataKitchen logger = logging.getLogger(__name__) + def make_env(env_id: str, rank: int, seed: int, train_df, price, reward_params, window_size, monitor=False) -> Callable: """ @@ -39,6 +41,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df, price, set_random_seed(seed) return _init + class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): """ User created Reinforcement Learning Model prediction model. @@ -69,11 +72,22 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) path = dk.data_path - stop_train_callback = StopTrainingOnNoModelImprovement(max_no_improvement_evals=5, min_evals=10, verbose=2) + stop_train_callback = StopTrainingOnNoModelImprovement( + max_no_improvement_evals=5, + min_evals=10, + verbose=2 + ) callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=-200, verbose=2) - eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), - deterministic=True, render=True, callback_after_eval=stop_train_callback, callback_on_new_best=callback_on_best, verbose=2) + eval_callback = EvalCallback( + eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/tdqn/logs/", + eval_freq=int(eval_freq), + deterministic=True, + render=True, + callback_after_eval=stop_train_callback, + callback_on_new_best=callback_on_best, + verbose=2 + ) # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[512, 512, 512]) From 16cec7dfbd51f34c479d842ca023c8cd34aa79a7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 16 Aug 2022 12:18:06 +0200 Subject: [PATCH 032/232] fix save/reload functionality for stablebaselines --- .../config_reinforcementlearning_example.json | 110 ------------------ freqtrade/freqai/data_drawer.py | 6 +- 2 files changed, 3 insertions(+), 113 deletions(-) delete mode 100644 config_examples/config_reinforcementlearning_example.json diff --git a/config_examples/config_reinforcementlearning_example.json b/config_examples/config_reinforcementlearning_example.json deleted file mode 100644 index 29f088ef3..000000000 --- a/config_examples/config_reinforcementlearning_example.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "trading_mode": "futures", - "new_pairs_days": 30, - "margin_mode": "isolated", - "max_open_trades": 8, - "stake_currency": "USDT", - "stake_amount": 1000, - "tradable_balance_ratio": 1, - "fiat_display_currency": "USD", - "dry_run": true, - "timeframe": "5m", - "dataformat_ohlcv": "json", - "dry_run_wallet": 12000, - "cancel_open_orders_on_exit": true, - "unfilledtimeout": { - "entry": 10, - "exit": 30 - }, - "exchange": { - "name": "binance", - "key": "", - "secret": "", - "ccxt_config": { - "enableRateLimit": true - }, - "ccxt_async_config": { - "enableRateLimit": true, - "rateLimit": 200 - }, - "pair_whitelist": [ - "1INCH/USDT", - "AAVE/USDT" - ], - "pair_blacklist": [] - }, - "entry_pricing": { - "price_side": "same", - "purge_old_models": true, - "use_order_book": true, - "order_book_top": 1, - "price_last_balance": 0.0, - "check_depth_of_market": { - "enabled": false, - "bids_to_ask_delta": 1 - } - }, - "exit_pricing": { - "price_side": "other", - "use_order_book": true, - "order_book_top": 1 - }, - "pairlists": [ - { - "method": "StaticPairList" - } - ], - "freqai": { - "model_save_type": "stable_baselines", - "conv_width": 10, - "follow_mode": false, - "purge_old_models": true, - "expiration_hours": 1, - "train_period_days": 10, - "backtest_period_days": 2, - "identifier": "test_rl10", - "feature_parameters": { - "include_corr_pairlist": [ - "BTC/USDT", - "ETH/USDT" - ], - "include_timeframes": [ - "15m", - "30m" - ], - "label_period_candles": 80, - "include_shifted_candles": 0, - "DI_threshold": 0, - "weight_factor": 0.9, - "principal_component_analysis": false, - "use_SVM_to_remove_outliers": false, - "svm_params": {"shuffle": true, "nu": 0.1}, - "stratify_training_data": 0, - "indicator_max_period_candles": 10, - "indicator_periods_candles": [5] - }, - "data_split_parameters": { - "test_size": 0.5, - "random_state": 1, - "shuffle": false - }, - "model_training_parameters": { - "n_steps": 2048, - "ent_coef": 0.005, - "learning_rate": 0.000025, - "batch_size": 256, - "eval_cycles" : 5, - "train_cycles" : 15 - }, - "model_reward_parameters": { - "rr": 1, - "profit_aim": 0.01 - } - }, - "bot_name": "RL_test", - "force_entry_enable": true, - "initial_state": "running", - "internals": { - "process_throttle_secs": 5 - } -} \ No newline at end of file diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 68f688ed4..9603fb9ab 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -395,7 +395,7 @@ class FreqaiDataDrawer: dump(model, save_path / f"{dk.model_filename}_model.joblib") elif model_type == 'keras': model.save(save_path / f"{dk.model_filename}_model.h5") - elif model_type == 'stable_baselines': + elif 'stable_baselines' in model_type: model.save(save_path / f"{dk.model_filename}_model.zip") if dk.svm_model is not None: @@ -473,10 +473,10 @@ class FreqaiDataDrawer: model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") elif model_type == 'stable_baselines_ppo': from stable_baselines3.ppo.ppo import PPO - model = PPO.load(dk.data_path / f"{dk.model_filename}_model.zip") + model = PPO.load(dk.data_path / f"{dk.model_filename}_model") elif model_type == 'stable_baselines_dqn': from stable_baselines3 import DQN - model = DQN.load(dk.data_path / f"{dk.model_filename}_model.zip") + model = DQN.load(dk.data_path / f"{dk.model_filename}_model") if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file(): dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib") From 2080ff86ed77b1b4c430d9a9b2f9cbf7ffc08a8a Mon Sep 17 00:00:00 2001 From: MukavaValkku Date: Wed, 17 Aug 2022 08:36:10 +0300 Subject: [PATCH 033/232] 5ac base fixes in logic --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 129 +++++++++++++----------- 1 file changed, 68 insertions(+), 61 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 00b031e54..574e71857 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -26,23 +26,23 @@ class Positions(Enum): def opposite(self): return Positions.Short if self == Positions.Long else Positions.Long - def mean_over_std(x): std = np.std(x, ddof=1) mean = np.mean(x) return mean / std if std > 0 else 0 - class Base5ActionRLEnv(gym.Env): """ Base class for a 5 action environment """ metadata = {'render.modes': ['human']} - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, ): + def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, + id: str = 'baseenv-1', seed: int = 1): assert df.ndim == 2 - self.seed() + self.id = id + self.seed(seed) self.df = df self.signal_features = self.df self.prices = prices @@ -73,7 +73,7 @@ class Base5ActionRLEnv(gym.Env): self.history = None self.trade_history = [] - def seed(self, seed=None): + def seed(self, seed: int = 1): self.np_random, seed = seeding.np_random(seed) return [seed] @@ -102,7 +102,7 @@ class Base5ActionRLEnv(gym.Env): return self._get_observation() - def step(self, action): + def step(self, action: int): self._done = False self._current_tick += 1 @@ -191,7 +191,7 @@ class Base5ActionRLEnv(gym.Env): else: return 0. - def is_tradesignal(self, action): + def is_tradesignal(self, action: int): # trade signal """ not trade signal is : @@ -200,29 +200,29 @@ class Base5ActionRLEnv(gym.Env): Action: Short, position: Short -> Hold Short """ return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + (action == Actions.Neutral.value and self._position == Positions.Long) or (action == Actions.Short_buy.value and self._position == Positions.Short) or - (action == Actions.Short_sell.value and self._position == Positions.Short) or (action == Actions.Short_buy.value and self._position == Positions.Long) or + (action == Actions.Short_sell.value and self._position == Positions.Short) or (action == Actions.Short_sell.value and self._position == Positions.Long) or - + (action == Actions.Short_sell.value and self._position == Positions.Neutral) or (action == Actions.Long_buy.value and self._position == Positions.Long) or - (action == Actions.Long_sell.value and self._position == Positions.Long) or (action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Long_sell.value and self._position == Positions.Short)) + (action == Actions.Long_sell.value and self._position == Positions.Long) or + (action == Actions.Long_sell.value and self._position == Positions.Short) or + (action == Actions.Long_sell.value and self._position == Positions.Neutral)) def _is_trade(self, action: Actions): - return ((action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) or - - (action == Actions.Neutral.Short_sell and self._position == Positions.Long) or - (action == Actions.Neutral.Long_sell and self._position == Positions.Short) - ) + return ((action == Actions.Long_buy.value and self._position == Positions.Neutral) or + (action == Actions.Short_buy.value and self._position == Positions.Neutral)) def is_hold(self, action): - return ((action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) + return ((action == Actions.Short_buy.value and self._position == Positions.Short) or + (action == Actions.Long_buy.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + (action == Actions.Neutral.value and self._position == Positions.Neutral)) def add_buy_fee(self, price): return price * (1 + self.fee) @@ -240,6 +240,52 @@ class Base5ActionRLEnv(gym.Env): def get_sharpe_ratio(self): return mean_over_std(self.get_portfolio_log_returns()) + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if action == Actions.Long_sell.value and self._position == Positions.Long: + if len(self.close_trade_profit): + # aim x2 rw + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + # less than aim x1 rw + elif self.close_trade_profit[-1] < self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + # # less than RR SL x2 neg rw + # elif self.close_trade_profit[-1] < (self.profit_aim * -1): + # last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + # current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + # return float((np.log(current_price) - np.log(last_trade_price)) * 2) * -1 + + + # close short + if action == Actions.Short_buy.value and self._position == Positions.Short: + if len(self.close_trade_profit): + # aim x2 rw + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + # less than aim x1 rw + elif self.close_trade_profit[-1] < self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + # # less than RR SL x2 neg rw + # elif self.close_trade_profit[-1] > self.profit_aim * self.rr: + # last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + # current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + # return float((np.log(last_trade_price) - np.log(current_price)) * 2) * -1 + return 0. + + def _update_profit(self, action): # if self._is_trade(action) or self._done: if self._is_trade(action) or self._done: @@ -255,7 +301,7 @@ class Base5ActionRLEnv(gym.Env): self._profits.append((self._current_tick, self._total_profit)) self.close_trade_profit.append(pnl) - def most_recent_return(self, action): + def most_recent_return(self, action: int): """ We support Long, Neutral and Short positions. Return is generated from rising prices in Long @@ -265,7 +311,6 @@ class Base5ActionRLEnv(gym.Env): # Long positions if self._position == Positions.Long: current_price = self.prices.iloc[self._current_tick].open - # if action == Actions.Short.value or action == Actions.Neutral.value: if action == Actions.Short_buy.value or action == Actions.Neutral.value: current_price = self.add_sell_fee(current_price) @@ -280,7 +325,6 @@ class Base5ActionRLEnv(gym.Env): # Short positions if self._position == Positions.Short: current_price = self.prices.iloc[self._current_tick].open - # if action == Actions.Long.value or action == Actions.Neutral.value: if action == Actions.Long_buy.value or action == Actions.Neutral.value: current_price = self.add_buy_fee(current_price) @@ -296,9 +340,6 @@ class Base5ActionRLEnv(gym.Env): def get_portfolio_log_returns(self): return self.portfolio_log_returns[1:self._current_tick + 1] - def get_trading_log_return(self): - return self.portfolio_log_returns[self._start_tick:] - def update_portfolio_log_returns(self, action): self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) @@ -314,37 +355,3 @@ class Base5ActionRLEnv(gym.Env): returns = np.array(self.close_trade_profit) reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) return reward - - def get_bnh_log_return(self): - return np.diff(np.log(self.prices['open'][self._start_tick:])) - - def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_sell.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - - # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_buy.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) - - return 0. From b90da46b1b0889bea477e65edf58d1375d2a352f Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 17 Aug 2022 12:51:14 +0200 Subject: [PATCH 034/232] improve price df handling to enable backtesting --- config_examples/config_freqai-rl.example.json | 7 +--- .../RL/BaseReinforcementLearningModel.py | 39 +++++++++++++++++-- .../ReinforcementLearningExample3ac.py | 15 ++++--- .../ReinforcementLearningExample5ac.py | 12 +++--- .../ReinforcementLearningPPO.py | 18 +++------ .../ReinforcementLearningPPO_multiproc.py | 16 ++++---- .../ReinforcementLearningTDQN.py | 15 +++---- .../ReinforcementLearningTDQN_multiproc.py | 14 +++---- 8 files changed, 77 insertions(+), 59 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 736f3e022..565eeda00 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -73,16 +73,12 @@ "5m", "30m" ], - "label_period_candles": 80, "include_shifted_candles": 0, - "DI_threshold": 0, "weight_factor": 0.9, "principal_component_analysis": false, "use_SVM_to_remove_outliers": false, - "svm_params": {"shuffle": true, "nu": 0.1}, - "stratify_training_data": 0, "indicator_max_period_candles": 10, - "indicator_periods_candles": [5] + "indicator_periods_candles": [5, 10] }, "data_split_parameters": { "test_size": 0.5, @@ -90,7 +86,6 @@ "shuffle": false }, "model_training_parameters": { - "n_steps": 2048, "ent_coef": 0.005, "learning_rate": 0.000025, "batch_size": 256, diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 8fa784f12..78feea6d1 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -10,8 +10,11 @@ from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions from freqtrade.persistence import Trade - +import torch.multiprocessing +import torch as th logger = logging.getLogger(__name__) +th.set_num_threads(8) +torch.multiprocessing.set_sharing_strategy('file_system') class BaseReinforcementLearningModel(IFreqaiModel): @@ -46,6 +49,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): dk.fit_labels() # useless for now, but just satiating append methods # normalize all data based on train_dataset only + prices_train, prices_test = self.build_ohlc_price_dataframes(dk.data_dictionary, pair, dk) data_dictionary = dk.normalize_data(data_dictionary) # optional additional data cleaning/analysis @@ -56,7 +60,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): ) logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - model = self.fit_rl(data_dictionary, pair, dk) + model = self.fit_rl(data_dictionary, pair, dk, prices_train, prices_test) if pair not in self.dd.historic_predictions: self.set_initial_historic_predictions( @@ -69,7 +73,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): return model @abstractmethod - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, + prices_train: DataFrame, prices_test: DataFrame): """ Agent customizations and abstract Reinforcement Learning customizations go in here. Abstract method, so this function must be overridden by @@ -141,6 +146,34 @@ class BaseReinforcementLearningModel(IFreqaiModel): return output + def build_ohlc_price_dataframes(self, data_dictionary: dict, + pair: str, dk: FreqaiDataKitchen) -> Tuple[DataFrame, + DataFrame]: + """ + Builds the train prices and test prices for the environment. + """ + + coin = pair.split('/')[0] + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + + # price data for model training and evaluation + tf = self.config['timeframe'] + ohlc_list = [f'%-{coin}raw_open_{tf}', f'%-{coin}raw_low_{tf}', + f'%-{coin}raw_high_{tf}', f'%-{coin}raw_close_{tf}'] + rename_dict = {f'%-{coin}raw_open_{tf}': 'open', f'%-{coin}raw_low_{tf}': 'low', + f'%-{coin}raw_high_{tf}': ' high', f'%-{coin}raw_close_{tf}': 'close'} + + prices_train = train_df.filter(ohlc_list, axis=1) + prices_train.rename(columns=rename_dict, inplace=True) + prices_train.reset_index(drop=True) + + prices_test = test_df.filter(ohlc_list, axis=1) + prices_test.rename(columns=rename_dict, inplace=True) + prices_test.reset_index(drop=True) + + return prices_train, prices_test + def set_initial_historic_predictions( self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str ) -> None: diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py index be7a8973b..ec0977455 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py @@ -36,7 +36,7 @@ class ReinforcementLearningExample3ac(IStrategy): stoploss = -0.05 use_exit_signal = True startup_candle_count: int = 300 - can_short = False + can_short = True linear_roi_offset = DecimalParameter( 0.00, 0.02, default=0.005, space="sell", optimize=False, load=True @@ -76,8 +76,11 @@ class ReinforcementLearningExample3ac(IStrategy): informative[f"%-{coin}pct-change"] = informative["close"].pct_change() informative[f"%-{coin}raw_volume"] = informative["volume"] - # Raw price currently necessary for RL models: - informative[f"%-{coin}raw_price"] = informative["close"] + # The following features are necessary for RL models + informative[f"%-{coin}raw_close"] = informative["close"] + informative[f"%-{coin}raw_open"] = informative["open"] + informative[f"%-{coin}raw_high"] = informative["high"] + informative[f"%-{coin}raw_low"] = informative["low"] indicators = [col for col in informative if col.startswith("%")] # This loop duplicates and shifts all indicators to add a sense of recency to data @@ -101,9 +104,9 @@ class ReinforcementLearningExample3ac(IStrategy): df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 - # user adds targets here by prepending them with &- (see convention below) - # If user wishes to use multiple targets, a multioutput prediction model - # needs to be used such as templates/CatboostPredictionMultiModel.py + # For RL, this is not a target, it is simply a filler until actions come out + # of the model. + # for Base3ActionEnv, 2 is netural (hold) df["&-action"] = 2 return df diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index 0ecea92a9..70727f6db 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -76,8 +76,11 @@ class ReinforcementLearningExample5ac(IStrategy): informative[f"%-{coin}pct-change"] = informative["close"].pct_change() informative[f"%-{coin}raw_volume"] = informative["volume"] - # Raw price currently necessary for RL models: - informative[f"%-{coin}raw_price"] = informative["close"] + # The following features are necessary for RL models + informative[f"%-{coin}raw_close"] = informative["close"] + informative[f"%-{coin}raw_open"] = informative["open"] + informative[f"%-{coin}raw_high"] = informative["high"] + informative[f"%-{coin}raw_low"] = informative["low"] indicators = [col for col in informative if col.startswith("%")] # This loop duplicates and shifts all indicators to add a sense of recency to data @@ -101,9 +104,8 @@ class ReinforcementLearningExample5ac(IStrategy): df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 - # user adds targets here by prepending them with &- (see convention below) - # If user wishes to use multiple targets, a multioutput prediction model - # needs to be used such as templates/CatboostPredictionMultiModel.py + # For RL, there are no direct targets to set. This is filler (neutral) + # until the agent sends an action. df["&-action"] = 2 return df diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index d1cd2293e..b437ea8aa 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -3,9 +3,8 @@ from typing import Any, Dict # , Tuple import numpy as np # import numpy.typing as npt -# import pandas as pd import torch as th -# from pandas import DataFrame +from pandas import DataFrame from stable_baselines3 import PPO from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor @@ -22,7 +21,8 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, + prices_train: DataFrame, prices_test: DataFrame): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -31,18 +31,12 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): eval_freq = agent_params.get("eval_cycles", 4) * len(test_df) total_timesteps = agent_params["train_cycles"] * len(train_df) - # price data for model training and evaluation - price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( - len(test_df.index)) - # environments - train_env = MyRLEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, + train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - eval = MyRLEnv(df=test_df, prices=price_test, + eval = MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) eval_env = Monitor(eval, ".") - eval_env.reset() path = dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", @@ -63,7 +57,7 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): callback=eval_callback ) - best_model = PPO.load(dk.data_path / "best_model.zip") + best_model = PPO.load(dk.data_path / "best_model") print('Training finished!') diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index 26099a9e3..b1c5f316f 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -16,6 +16,7 @@ from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Posi from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.data_kitchen import FreqaiDataKitchen import gym +from pandas import DataFrame logger = logging.getLogger(__name__) @@ -47,7 +48,8 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, + prices_train: DataFrame, prices_test: DataFrame): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -57,18 +59,14 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): total_timesteps = agent_params["train_cycles"] * len(train_df) learning_rate = agent_params["learning_rate"] - # price data for model training and evaluation - price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( - len(test_df.index)) - env_id = "train_env" + th.set_num_threads(dk.thread_count) num_cpu = int(dk.thread_count / 2) - train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, + train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, reward_params, self.CONV_WIDTH) for i in range(num_cpu)]) eval_env_id = 'eval_env' - eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, + eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, reward_params, self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) path = dk.data_path @@ -92,7 +90,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): callback=eval_callback ) - best_model = PPO.load(dk.data_path / "best_model.zip") + best_model = PPO.load(dk.data_path / "best_model") print('Training finished!') eval_env.close() diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index 8bc5f9152..a60bc1fa1 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -10,6 +10,7 @@ from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3 import DQN from stable_baselines3.common.buffers import ReplayBuffer import numpy as np +from pandas import DataFrame from freqtrade.freqai.data_kitchen import FreqaiDataKitchen @@ -21,7 +22,8 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, + prices_train: DataFrame, prices_test: DataFrame): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -30,15 +32,10 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): eval_freq = agent_params["eval_cycles"] * len(test_df) total_timesteps = agent_params["train_cycles"] * len(train_df) - # price data for model training and evaluation - price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( - len(test_df.index)) - # environments - train_env = MyRLEnv(df=train_df, prices=price, window_size=self.CONV_WIDTH, + train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) - eval = MyRLEnv(df=test_df, prices=price_test, + eval = MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, reward_kwargs=reward_params) eval_env = Monitor(eval, ".") eval_env.reset() @@ -66,7 +63,7 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): callback=eval_callback ) - best_model = DQN.load(dk.data_path / "best_model.zip") + best_model = DQN.load(dk.data_path / "best_model") print('Training finished!') diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py index dd34c96c1..51e3c07c4 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py @@ -15,7 +15,7 @@ from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcement from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3.common.buffers import ReplayBuffer from freqtrade.freqai.data_kitchen import FreqaiDataKitchen - +from pandas import DataFrame logger = logging.getLogger(__name__) @@ -47,7 +47,8 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen): + def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, + prices_train: DataFrame, prices_test: DataFrame): agent_params = self.freqai_info['model_training_parameters'] reward_params = self.freqai_info['model_reward_parameters'] @@ -57,18 +58,13 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): total_timesteps = agent_params["train_cycles"] * len(train_df) learning_rate = agent_params["learning_rate"] - # price data for model training and evaluation - price = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail(len(train_df.index)) - price_test = self.dd.historic_data[pair][f"{self.config['timeframe']}"].tail( - len(test_df.index)) - env_id = "train_env" num_cpu = int(dk.thread_count / 2) - train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, price, reward_params, + train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, reward_params, self.CONV_WIDTH) for i in range(num_cpu)]) eval_env_id = 'eval_env' - eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, price_test, reward_params, + eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, reward_params, self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) path = dk.data_path From 74e4fd0633ac288cb86eb6b847f0214a9f774dc6 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 17 Aug 2022 12:58:29 +0200 Subject: [PATCH 035/232] ensure config example can work with backtesting RL --- config_examples/config_freqai-rl.example.json | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 565eeda00..053c1a08e 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -8,7 +8,7 @@ "tradable_balance_ratio": 1, "fiat_display_currency": "USD", "dry_run": true, - "timeframe": "5m", + "timeframe": "3m", "dataformat_ohlcv": "json", "dry_run_wallet": 12000, "cancel_open_orders_on_exit": true, @@ -56,6 +56,7 @@ ], "freqai": { "enabled": true, + "startup_candles": 1000, "model_save_type": "stable_baselines_ppo", "conv_width": 10, "follow_mode": false, @@ -70,8 +71,8 @@ "ETH/USDT" ], "include_timeframes": [ - "5m", - "30m" + "3m", + "15m" ], "include_shifted_candles": 0, "weight_factor": 0.9, From d55092ff178600d6882bfe5b3149a48bc26856de Mon Sep 17 00:00:00 2001 From: richardjozsa Date: Wed, 17 Aug 2022 17:31:27 +0200 Subject: [PATCH 036/232] Docker building update, and TDQN repair with the newer release of SB+ --- Dockerfile | 2 +- requirements-freqai.txt | 2 +- requirements-hyperopt.txt | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 14a67edc8..d06b53202 100644 --- a/Dockerfile +++ b/Dockerfile @@ -33,7 +33,7 @@ RUN cd /tmp && /tmp/install_ta-lib.sh && rm -r /tmp/*ta-lib* ENV LD_LIBRARY_PATH /usr/local/lib # Install dependencies -COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt /freqtrade/ +COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt requirements-freqai.txt /freqtrade/ USER ftuser RUN pip install --user --no-cache-dir numpy \ && pip install --user --no-cache-dir -r requirements-hyperopt.txt diff --git a/requirements-freqai.txt b/requirements-freqai.txt index 869606365..6000f8e0f 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -7,6 +7,6 @@ joblib==1.1.0 catboost==1.0.6; platform_machine != 'aarch64' lightgbm==3.3.2 torch==1.12.1 -stable-baselines3==1.5.0 +stable-baselines3==1.6.0 gym==0.21.0 tensorboard==2.9.1 \ No newline at end of file diff --git a/requirements-hyperopt.txt b/requirements-hyperopt.txt index 020ccdda8..e19eb27c1 100644 --- a/requirements-hyperopt.txt +++ b/requirements-hyperopt.txt @@ -7,3 +7,4 @@ scikit-learn==1.1.2 scikit-optimize==0.9.0 filelock==3.8.0 progressbar2==4.0.0 +-r requirements-freqai.txt \ No newline at end of file From 45218faeb0c91f07c40e20071278bee2b865f084 Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Thu, 18 Aug 2022 17:01:04 +0700 Subject: [PATCH 037/232] fix coding convention --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 46 +++++++++++++++++-------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 574e71857..5f817f14e 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -26,11 +26,13 @@ class Positions(Enum): def opposite(self): return Positions.Short if self == Positions.Long else Positions.Long + def mean_over_std(x): std = np.std(x, ddof=1) mean = np.mean(x) return mean / std if std > 0 else 0 + class Base5ActionRLEnv(gym.Env): """ Base class for a 5 action environment @@ -250,42 +252,58 @@ class Base5ActionRLEnv(gym.Env): if len(self.close_trade_profit): # aim x2 rw if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee( + self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee( + self.prices.iloc[self._current_tick].open) return float((np.log(current_price) - np.log(last_trade_price)) * 2) # less than aim x1 rw elif self.close_trade_profit[-1] < self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_buy_fee( + self.prices.iloc[self._last_trade_tick].open + ) + current_price = self.add_sell_fee( + self.prices.iloc[self._current_tick].open + ) return float(np.log(current_price) - np.log(last_trade_price)) # # less than RR SL x2 neg rw # elif self.close_trade_profit[-1] < (self.profit_aim * -1): - # last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - # current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + # last_trade_price = self.add_buy_fee( + # self.prices.iloc[self._last_trade_tick].open) + # current_price = self.add_sell_fee( + # self.prices.iloc[self._current_tick].open) # return float((np.log(current_price) - np.log(last_trade_price)) * 2) * -1 - # close short if action == Actions.Short_buy.value and self._position == Positions.Short: if len(self.close_trade_profit): # aim x2 rw if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee( + self.prices.iloc[self._last_trade_tick].open + ) + current_price = self.add_buy_fee( + self.prices.iloc[self._current_tick].open + ) return float((np.log(last_trade_price) - np.log(current_price)) * 2) # less than aim x1 rw elif self.close_trade_profit[-1] < self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_sell_fee( + self.prices.iloc[self._last_trade_tick].open + ) + current_price = self.add_buy_fee( + self.prices.iloc[self._current_tick].open + ) return float(np.log(last_trade_price) - np.log(current_price)) # # less than RR SL x2 neg rw # elif self.close_trade_profit[-1] > self.profit_aim * self.rr: - # last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - # current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + # last_trade_price = self.add_sell_fee( + # self.prices.iloc[self._last_trade_tick].open) + # current_price = self.add_buy_fee( + # self.prices.iloc[self._current_tick].open) # return float((np.log(last_trade_price) - np.log(current_price)) * 2) * -1 return 0. - def _update_profit(self, action): # if self._is_trade(action) or self._done: if self._is_trade(action) or self._done: From 81b5aa66e847453989f20418829bc4c52b5b6c4c Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Thu, 18 Aug 2022 17:37:26 +0700 Subject: [PATCH 038/232] make env keep current position when low profit --- freqtrade/freqai/RL/Base3ActionRLEnv.py | 28 ++++++++++++++++--------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index bf7b2fc7b..9bb4cc39f 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -127,17 +127,25 @@ class Base3ActionRLEnv(gym.Env): Action: Short, position: Long -> Close Long and Open Short """ - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short.value: - self._position = Positions.Short - trade_type = "short" + u_pnl = self.get_unrealized_profit() + # keep current position if upnl from -0.4% to 0.4% + if u_pnl <= 0.004 and u_pnl >= -0.004 and self._position != Positions.Neutral: + if action == Actions.Long.value and self._position == Positions.Short: + self._position = Positions.Short + elif action == Actions.Short.value and self._position == Positions.Long: + self._position = Positions.Long else: - print("case not defined") + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" + else: + print("case not defined") # Update last trade tick self._last_trade_tick = self._current_tick From 7962a1439be4ead6d21089cfaa385380b88e2752 Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Thu, 18 Aug 2022 17:53:52 +0700 Subject: [PATCH 039/232] remove keep low profit --- freqtrade/freqai/RL/Base3ActionRLEnv.py | 28 +++++++++---------------- 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index 9bb4cc39f..bf7b2fc7b 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -127,25 +127,17 @@ class Base3ActionRLEnv(gym.Env): Action: Short, position: Long -> Close Long and Open Short """ - u_pnl = self.get_unrealized_profit() - # keep current position if upnl from -0.4% to 0.4% - if u_pnl <= 0.004 and u_pnl >= -0.004 and self._position != Positions.Neutral: - if action == Actions.Long.value and self._position == Positions.Short: - self._position = Positions.Short - elif action == Actions.Short.value and self._position == Positions.Long: - self._position = Positions.Long + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + elif action == Actions.Long.value: + self._position = Positions.Long + trade_type = "long" + elif action == Actions.Short.value: + self._position = Positions.Short + trade_type = "short" else: - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short.value: - self._position = Positions.Short - trade_type = "short" - else: - print("case not defined") + print("case not defined") # Update last trade tick self._last_trade_tick = self._current_tick From 5d4e5e69fe44aa9dedb9dcfdf43adfe240d9832b Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 18 Aug 2022 13:02:47 +0200 Subject: [PATCH 040/232] reinforce training with state info, reinforce prediction with state info, restructure config to accommodate all parameters from any user imported model type. Set 5Act to default env on TDQN. Clean example config. --- config_examples/config_freqai-rl.example.json | 39 ++++----- freqtrade/freqai/RL/Base3ActionRLEnv.py | 4 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 17 +++- .../RL/BaseReinforcementLearningModel.py | 44 +++------- .../ReinforcementLearningPPO.py | 12 ++- .../ReinforcementLearningPPO_multiproc.py | 21 ++--- .../ReinforcementLearningTDQN.py | 83 ++++++------------ .../ReinforcementLearningTDQN_multiproc.py | 86 ++++++------------- 8 files changed, 114 insertions(+), 192 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 053c1a08e..1f12cbc6c 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -8,7 +8,7 @@ "tradable_balance_ratio": 1, "fiat_display_currency": "USD", "dry_run": true, - "timeframe": "3m", + "timeframe": "5m", "dataformat_ohlcv": "json", "dry_run_wallet": 12000, "cancel_open_orders_on_exit": true, @@ -35,7 +35,6 @@ }, "entry_pricing": { "price_side": "same", - "purge_old_models": true, "use_order_book": true, "order_book_top": 1, "price_last_balance": 0.0, @@ -56,10 +55,8 @@ ], "freqai": { "enabled": true, - "startup_candles": 1000, - "model_save_type": "stable_baselines_ppo", + "model_save_type": "stable_baselines_dqn", "conv_width": 10, - "follow_mode": false, "purge_old_models": true, "train_period_days": 10, "backtest_period_days": 2, @@ -71,13 +68,9 @@ "ETH/USDT" ], "include_timeframes": [ - "3m", - "15m" + "5m", + "30m" ], - "include_shifted_candles": 0, - "weight_factor": 0.9, - "principal_component_analysis": false, - "use_SVM_to_remove_outliers": false, "indicator_max_period_candles": 10, "indicator_periods_candles": [5, 10] }, @@ -86,16 +79,22 @@ "random_state": 1, "shuffle": false }, - "model_training_parameters": { - "ent_coef": 0.005, - "learning_rate": 0.000025, - "batch_size": 256, - "eval_cycles" : 5, - "train_cycles" : 15 + "model_training_parameters": { + "learning_rate": 0.00025, + "gamma": 0.9, + "target_update_interval": 5000, + "buffer_size": 50000, + "exploration_initial_eps":1, + "exploration_final_eps": 0.1, + "verbose": 1 }, - "model_reward_parameters": { - "rr": 1, - "profit_aim": 0.01 + "rl_config": { + "train_cycles": 15, + "eval_cycles": 5, + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.02 + } } }, "bot_name": "RL_test", diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index bf7b2fc7b..9d17b982d 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -6,6 +6,7 @@ import gym import numpy as np from gym import spaces from gym.utils import seeding +from pandas import DataFrame logger = logging.getLogger(__name__) @@ -35,7 +36,8 @@ class Base3ActionRLEnv(gym.Env): metadata = {'render.modes': ['human']} - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, + def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), + reward_kwargs: dict = {}, window_size=10, starting_point=True, id: str = 'baseenv-1', seed: int = 1): assert df.ndim == 2 diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 5f817f14e..d7ceb5ff3 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -6,6 +6,7 @@ import gym import numpy as np from gym import spaces from gym.utils import seeding +from pandas import DataFrame logger = logging.getLogger(__name__) @@ -39,7 +40,8 @@ class Base5ActionRLEnv(gym.Env): """ metadata = {'render.modes': ['human']} - def __init__(self, df, prices, reward_kwargs, window_size=10, starting_point=True, + def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), + reward_kwargs: dict = {}, window_size=10, starting_point=True, id: str = 'baseenv-1', seed: int = 1): assert df.ndim == 2 @@ -56,7 +58,7 @@ class Base5ActionRLEnv(gym.Env): self.fee = 0.0015 # # spaces - self.shape = (window_size, self.signal_features.shape[1]) + self.shape = (window_size, self.signal_features.shape[1] + 2) self.action_space = spaces.Discrete(len(Actions)) self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) @@ -161,19 +163,26 @@ class Base5ActionRLEnv(gym.Env): self._done = True self._position_history.append(self._position) - observation = self._get_observation() + info = dict( tick=self._current_tick, total_reward=self.total_reward, total_profit=self._total_profit, position=self._position.value ) + + observation = self._get_observation() + self._update_history(info) return observation, step_reward, self._done, info def _get_observation(self): - return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + features_and_state = self.signal_features[( + self._current_tick - self.window_size):self._current_tick] + features_and_state['current_profit_pct'] = self.get_unrealized_profit() + features_and_state['position'] = self._position.value + return features_and_state def get_unrealized_profit(self): diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 78feea6d1..395b2a1a6 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -13,7 +13,7 @@ from freqtrade.persistence import Trade import torch.multiprocessing import torch as th logger = logging.getLogger(__name__) -th.set_num_threads(8) + torch.multiprocessing.set_sharing_strategy('file_system') @@ -22,6 +22,11 @@ class BaseReinforcementLearningModel(IFreqaiModel): User created Reinforcement Learning Model prediction model. """ + def __init__(self, **kwargs): + super().__init__(config=kwargs['config']) + th.set_num_threads(self.freqai_info.get('data_kitchen_thread_count', 4)) + self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] + def train( self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen ) -> Any: @@ -62,12 +67,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): model = self.fit_rl(data_dictionary, pair, dk, prices_train, prices_test) - if pair not in self.dd.historic_predictions: - self.set_initial_historic_predictions( - data_dictionary['train_features'], model, dk, pair) - - self.dd.save_historic_predictions_to_disk() - logger.info(f"--------------------done training {pair}--------------------") return model @@ -127,7 +126,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): # optional additional data cleaning/analysis self.data_cleaning_predict(dk, filtered_dataframe) - pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) + pred_df = self.rl_model_predict( + dk.data_dictionary["prediction_features"], dk, self.model) pred_df.fillna(0, inplace=True) return (pred_df, dk.do_predict) @@ -135,10 +135,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): def rl_model_predict(self, dataframe: DataFrame, dk: FreqaiDataKitchen, model: Any) -> DataFrame: - output = pd.DataFrame(np.full((len(dataframe), 1), 2), columns=dk.label_list) + output = pd.DataFrame(np.zeros(len(dataframe)), columns=dk.label_list) def _predict(window): + market_side, current_profit, total_profit = self.get_state_info(dk.pair) observations = dataframe.iloc[window.index] + observations['current_profit'] = current_profit + observations['position'] = market_side res, _ = model.predict(observations, deterministic=True) return res @@ -174,29 +177,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): return prices_train, prices_test - def set_initial_historic_predictions( - self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str - ) -> None: - - pred_df = self.rl_model_predict(df, dk, model) - pred_df.fillna(0, inplace=True) - self.dd.historic_predictions[pair] = pred_df - hist_preds_df = self.dd.historic_predictions[pair] - - for label in hist_preds_df.columns: - if hist_preds_df[label].dtype == object: - continue - hist_preds_df[f'{label}_mean'] = 0 - hist_preds_df[f'{label}_std'] = 0 - - hist_preds_df['do_predict'] = 0 - - if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0: - hist_preds_df['DI_values'] = 0 - - for return_str in dk.data['extra_returns_per_train']: - hist_preds_df[return_str] = 0 - # TODO take care of this appendage. Right now it needs to be called because FreqAI enforces it. # But FreqaiRL needs more objects passed to fit() (like DK) and we dont want to go refactor # all the other existing fit() functions to include dk argument. For now we instantiate and diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index b437ea8aa..5dc7735d3 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -24,18 +24,16 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, prices_train: DataFrame, prices_test: DataFrame): - agent_params = self.freqai_info['model_training_parameters'] - reward_params = self.freqai_info['model_reward_parameters'] train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = agent_params.get("eval_cycles", 4) * len(test_df) - total_timesteps = agent_params["train_cycles"] * len(train_df) + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) # environments train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=reward_params) + reward_kwargs=self.reward_params) eval = MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params) eval_env = Monitor(eval, ".") path = dk.data_path @@ -49,7 +47,7 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, - gamma=0.9, verbose=1 + **self.freqai_info['model_training_parameters'] ) model.learn( diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index b1c5f316f..337e94607 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -51,23 +51,20 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, prices_train: DataFrame, prices_test: DataFrame): - agent_params = self.freqai_info['model_training_parameters'] - reward_params = self.freqai_info['model_reward_parameters'] train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = agent_params.get("eval_cycles", 4) * len(test_df) - total_timesteps = agent_params["train_cycles"] * len(train_df) - learning_rate = agent_params["learning_rate"] + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) env_id = "train_env" - th.set_num_threads(dk.thread_count) num_cpu = int(dk.thread_count / 2) - train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, reward_params, - self.CONV_WIDTH) for i in range(num_cpu)]) + train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.reward_params, self.CONV_WIDTH) for i in range(num_cpu)]) eval_env_id = 'eval_env' - eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, reward_params, - self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) + eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.reward_params, self.CONV_WIDTH, monitor=True) for i in + range(num_cpu)]) path = dk.data_path eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", @@ -80,9 +77,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/ppo/tensorboard/", - learning_rate=learning_rate, - gamma=0.9, - verbose=1 + **self.freqai_info['model_training_parameters'] ) model.learn( diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index a60bc1fa1..3a57142cf 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -3,8 +3,7 @@ from typing import Any, Dict # Optional import torch as th from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor -# from stable_baselines3.common.vec_env import SubprocVecEnv -from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions +from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3 import DQN @@ -25,18 +24,16 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, prices_train: DataFrame, prices_test: DataFrame): - agent_params = self.freqai_info['model_training_parameters'] - reward_params = self.freqai_info['model_reward_parameters'] train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = agent_params["eval_cycles"] * len(test_df) - total_timesteps = agent_params["train_cycles"] * len(train_df) + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) # environments train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=reward_params) + reward_kwargs=self.reward_params) eval = MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, reward_kwargs=reward_params) + window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params) eval_env = Monitor(eval, ".") eval_env.reset() @@ -50,12 +47,10 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): net_arch=[256, 256, 128]) model = TDQN('TMultiInputPolicy', train_env, - policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/tdqn/tensorboard/", - learning_rate=0.00025, gamma=0.9, - target_update_interval=5000, buffer_size=50000, - exploration_initial_eps=1, exploration_final_eps=0.1, - replay_buffer_class=ReplayBuffer + policy_kwargs=policy_kwargs, + replay_buffer_class=ReplayBuffer, + **self.freqai_info['model_training_parameters'] ) model.learn( @@ -70,9 +65,11 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): return best_model -class MyRLEnv(Base3ActionRLEnv): +# User can inherit and customize 5 action environment +class MyRLEnv(Base5ActionRLEnv): """ - User can override any function in BaseRLEnv and gym.Env + User can override any function in BaseRLEnv and gym.Env. Here the user + Adds 5 actions. """ def calculate_reward(self, action): @@ -81,55 +78,27 @@ class MyRLEnv(Base3ActionRLEnv): return 0. # close long - if (action == Actions.Short.value or - action == Actions.Neutral.value) and self._position == Positions.Long: + if action == Actions.Long_sell.value and self._position == Positions.Long: last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) return float(np.log(current_price) - np.log(last_trade_price)) + if action == Actions.Long_sell.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + # close short - if (action == Actions.Long.value or - action == Actions.Neutral.value) and self._position == Positions.Short: + if action == Actions.Short_buy.value and self._position == Positions.Short: last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) return float(np.log(last_trade_price) - np.log(current_price)) + if action == Actions.Short_buy.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + return 0. - -# User can inherit and customize 5 action environment -# class MyRLEnv(Base5ActionRLEnv): -# """ -# User can override any function in BaseRLEnv and gym.Env. Here the user -# Adds 5 actions. -# """ - -# def calculate_reward(self, action): - -# if self._last_trade_tick is None: -# return 0. - -# # close long -# if action == Actions.Long_sell.value and self._position == Positions.Long: -# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) -# return float(np.log(current_price) - np.log(last_trade_price)) - -# if action == Actions.Long_sell.value and self._position == Positions.Long: -# if self.close_trade_profit[-1] > self.profit_aim * self.rr: -# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) -# return float((np.log(current_price) - np.log(last_trade_price)) * 2) - -# # close short -# if action == Actions.Short_buy.value and self._position == Positions.Short: -# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) -# return float(np.log(last_trade_price) - np.log(current_price)) - -# if action == Actions.Short_buy.value and self._position == Positions.Short: -# if self.close_trade_profit[-1] > self.profit_aim * self.rr: -# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) -# return float((np.log(last_trade_price) - np.log(current_price)) * 2) - -# return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py index 51e3c07c4..bf9e03b7f 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py @@ -10,7 +10,7 @@ from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.vec_env import SubprocVecEnv from stable_baselines3.common.utils import set_random_seed from stable_baselines3 import DQN -from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions +from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3.common.buffers import ReplayBuffer @@ -50,22 +50,20 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, prices_train: DataFrame, prices_test: DataFrame): - agent_params = self.freqai_info['model_training_parameters'] - reward_params = self.freqai_info['model_reward_parameters'] train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = agent_params["eval_cycles"] * len(test_df) - total_timesteps = agent_params["train_cycles"] * len(train_df) - learning_rate = agent_params["learning_rate"] + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) env_id = "train_env" num_cpu = int(dk.thread_count / 2) - train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, reward_params, - self.CONV_WIDTH) for i in range(num_cpu)]) + train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.reward_params, self.CONV_WIDTH) for i in range(num_cpu)]) eval_env_id = 'eval_env' - eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, reward_params, - self.CONV_WIDTH, monitor=True) for i in range(num_cpu)]) + eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.reward_params, self.CONV_WIDTH, monitor=True) for i in + range(num_cpu)]) path = dk.data_path stop_train_callback = StopTrainingOnNoModelImprovement( @@ -91,10 +89,8 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): model = TDQN('TMultiInputPolicy', train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/tdqn/tensorboard/", - learning_rate=learning_rate, gamma=0.9, - target_update_interval=5000, buffer_size=50000, - exploration_initial_eps=1, exploration_final_eps=0.1, - replay_buffer_class=ReplayBuffer + replay_buffer_class=ReplayBuffer, + **self.freqai_info['model_training_parameters'] ) model.learn( @@ -109,9 +105,11 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): return best_model -class MyRLEnv(Base3ActionRLEnv): +# User can inherit and customize 5 action environment +class MyRLEnv(Base5ActionRLEnv): """ - User can override any function in BaseRLEnv and gym.Env + User can override any function in BaseRLEnv and gym.Env. Here the user + Adds 5 actions. """ def calculate_reward(self, action): @@ -120,55 +118,27 @@ class MyRLEnv(Base3ActionRLEnv): return 0. # close long - if (action == Actions.Short.value or - action == Actions.Neutral.value) and self._position == Positions.Long: + if action == Actions.Long_sell.value and self._position == Positions.Long: last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) return float(np.log(current_price) - np.log(last_trade_price)) + if action == Actions.Long_sell.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + # close short - if (action == Actions.Long.value or - action == Actions.Neutral.value) and self._position == Positions.Short: + if action == Actions.Short_buy.value and self._position == Positions.Short: last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) return float(np.log(last_trade_price) - np.log(current_price)) + if action == Actions.Short_buy.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + return 0. - -# User can inherit and customize 5 action environment -# class MyRLEnv(Base5ActionRLEnv): -# """ -# User can override any function in BaseRLEnv and gym.Env. Here the user -# Adds 5 actions. -# """ - -# def calculate_reward(self, action): - -# if self._last_trade_tick is None: -# return 0. - -# # close long -# if action == Actions.Long_sell.value and self._position == Positions.Long: -# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) -# return float(np.log(current_price) - np.log(last_trade_price)) - -# if action == Actions.Long_sell.value and self._position == Positions.Long: -# if self.close_trade_profit[-1] > self.profit_aim * self.rr: -# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) -# return float((np.log(current_price) - np.log(last_trade_price)) * 2) - -# # close short -# if action == Actions.Short_buy.value and self._position == Positions.Short: -# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) -# return float(np.log(last_trade_price) - np.log(current_price)) - -# if action == Actions.Short_buy.value and self._position == Positions.Short: -# if self.close_trade_profit[-1] > self.profit_aim * self.rr: -# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) -# return float((np.log(last_trade_price) - np.log(current_price)) * 2) - -# return 0. From f95602f6bd3a9e9ef6d2e83921828e33be2d9b91 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 18 Aug 2022 16:07:19 +0200 Subject: [PATCH 041/232] persist a single training environment. --- config_examples/config_freqai-rl.example.json | 2 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 12 +- .../RL/BaseReinforcementLearningModel.py | 117 +++++++++--------- .../ReinforcementLearningPPO_multiproc.py | 49 +++++--- .../ReinforcementLearningTDQN.py | 41 +++--- .../ReinforcementLearningTDQN_multiproc.py | 70 ++++++----- 6 files changed, 162 insertions(+), 129 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 1f12cbc6c..ccc977705 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -61,7 +61,7 @@ "train_period_days": 10, "backtest_period_days": 2, "identifier": "unique-id", - "data_kitchen_thread_count": 4, + "data_kitchen_thread_count": 2, "feature_parameters": { "include_corr_pairlist": [ "BTC/USDT", diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index d7ceb5ff3..bf3f0df33 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -7,7 +7,7 @@ import numpy as np from gym import spaces from gym.utils import seeding from pandas import DataFrame - +import pandas as pd logger = logging.getLogger(__name__) @@ -47,6 +47,9 @@ class Base5ActionRLEnv(gym.Env): self.id = id self.seed(seed) + self.reset_env(df, prices, window_size, reward_kwargs, starting_point) + + def reset_env(self, df, prices, window_size, reward_kwargs, starting_point=True): self.df = df self.signal_features = self.df self.prices = prices @@ -178,10 +181,15 @@ class Base5ActionRLEnv(gym.Env): return observation, step_reward, self._done, info def _get_observation(self): - features_and_state = self.signal_features[( + features_window = self.signal_features[( self._current_tick - self.window_size):self._current_tick] + features_and_state = DataFrame(np.zeros((len(features_window), 2)), + columns=['current_profit_pct', 'position'], + index=features_window.index) + features_and_state['current_profit_pct'] = self.get_unrealized_profit() features_and_state['position'] = self._position.value + features_and_state = pd.concat([features_window, features_and_state], axis=1) return features_and_state def get_unrealized_profit(self): diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 395b2a1a6..9c7b1e4b4 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -8,9 +8,10 @@ from pandas import DataFrame from abc import abstractmethod from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel -from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions +from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions from freqtrade.persistence import Trade import torch.multiprocessing +from stable_baselines3.common.monitor import Monitor import torch as th logger = logging.getLogger(__name__) @@ -26,6 +27,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): super().__init__(config=kwargs['config']) th.set_num_threads(self.freqai_info.get('data_kitchen_thread_count', 4)) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] + self.train_env: Base5ActionRLEnv = None def train( self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen @@ -65,15 +67,37 @@ class BaseReinforcementLearningModel(IFreqaiModel): ) logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - model = self.fit_rl(data_dictionary, pair, dk, prices_train, prices_test) + self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test) + + model = self.fit_rl(data_dictionary, dk) logger.info(f"--------------------done training {pair}--------------------") return model + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): + """ + User overrides this in their prediction model if they are custom a MyRLEnv. Othwerwise + leaving this will default to Base5ActEnv + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + + # environments + if not self.train_env: + self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params) + self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params), ".") + else: + self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.eval_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.train_env.reset() + self.eval_env.reset() + @abstractmethod - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, - prices_train: DataFrame, prices_test: DataFrame): + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): """ Agent customizations and abstract Reinforcement Learning customizations go in here. Abstract method, so this function must be overridden by @@ -193,66 +217,39 @@ class BaseReinforcementLearningModel(IFreqaiModel): return -class MyRLEnv(Base3ActionRLEnv): +class MyRLEnv(Base5ActionRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env. Here the user + Adds 5 actions. + """ - def step(self, action): - self._done = False - self._current_tick += 1 + def calculate_reward(self, action): - if self._current_tick == self._end_tick: - self._done = True + if self._last_trade_tick is None: + return 0. - self.update_portfolio_log_returns(action) + # close long + if action == Actions.Long_sell.value and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) - self._update_profit(action) - step_reward = self._calculate_reward(action) - self.total_reward += step_reward + if action == Actions.Long_sell.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) - trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade - # Update position - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short + # close short + if action == Actions.Short_buy.value and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long + if action == Actions.Short_buy.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short.value: - self._position = Positions.Short - trade_type = "short" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type is not None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, - 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick=self._current_tick, - total_reward=self.total_reward, - total_profit=self._total_profit, - position=self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info + return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py index 337e94607..5fa24a599 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py @@ -3,9 +3,7 @@ from typing import Any, Dict # , Tuple import numpy as np # import numpy.typing as npt -# import pandas as pd import torch as th -# from pandas import DataFrame from stable_baselines3.common.monitor import Monitor from typing import Callable from stable_baselines3 import PPO @@ -16,7 +14,6 @@ from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Posi from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.data_kitchen import FreqaiDataKitchen import gym -from pandas import DataFrame logger = logging.getLogger(__name__) @@ -48,26 +45,15 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, - prices_train: DataFrame, prices_test: DataFrame): + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - env_id = "train_env" - num_cpu = int(dk.thread_count / 2) - train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH) for i in range(num_cpu)]) - - eval_env_id = 'eval_env' - eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True) for i in - range(num_cpu)]) - path = dk.data_path - eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) @@ -75,7 +61,7 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[512, 512, 512]) - model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, + model = PPO('MlpPolicy', self.train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/ppo/tensorboard/", **self.freqai_info['model_training_parameters'] ) @@ -87,10 +73,37 @@ class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): best_model = PPO.load(dk.data_path / "best_model") print('Training finished!') - eval_env.close() return best_model + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): + """ + User overrides this in their prediction model if they are custom a MyRLEnv. Othwerwise + leaving this will default to Base5ActEnv + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + + # environments + if not self.train_env: + env_id = "train_env" + num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) + self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.reward_params, self.CONV_WIDTH) for i + in range(num_cpu)]) + + eval_env_id = 'eval_env' + self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.reward_params, self.CONV_WIDTH, monitor=True) for i + in range(num_cpu)]) + else: + self.train_env.env_method('reset_env', train_df, prices_train, + self.CONV_WIDTH, self.reward_params) + self.eval_env.env_method('reset_env', train_df, prices_train, + self.CONV_WIDTH, self.reward_params) + self.train_env.env_method('reset') + self.eval_env.env_method('reset') + class MyRLEnv(Base3ActionRLEnv): """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py index 3a57142cf..3c4ac6bdb 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py @@ -9,8 +9,7 @@ from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3 import DQN from stable_baselines3.common.buffers import ReplayBuffer import numpy as np -from pandas import DataFrame - +import gc from freqtrade.freqai.data_kitchen import FreqaiDataKitchen logger = logging.getLogger(__name__) @@ -21,24 +20,15 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, - prices_train: DataFrame, prices_test: DataFrame): + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - # environments - train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params) - eval = MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params) - eval_env = Monitor(eval, ".") - eval_env.reset() - path = dk.data_path - eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) @@ -46,7 +36,7 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[256, 256, 128]) - model = TDQN('TMultiInputPolicy', train_env, + model = TDQN('TMultiInputPolicy', self.train_env, tensorboard_log=f"{path}/tdqn/tensorboard/", policy_kwargs=policy_kwargs, replay_buffer_class=ReplayBuffer, @@ -58,12 +48,33 @@ class ReinforcementLearningTDQN(BaseReinforcementLearningModel): callback=eval_callback ) + del model best_model = DQN.load(dk.data_path / "best_model") print('Training finished!') - + gc.collect() return best_model + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): + """ + User overrides this as shown here if they are using a custom MyRLEnv + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + + # environments + if not self.train_env: + self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params) + self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params), ".") + else: + self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.eval_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.train_env.reset() + self.eval_env.reset() + # User can inherit and customize 5 action environment class MyRLEnv(Base5ActionRLEnv): diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py index bf9e03b7f..8634fd958 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py @@ -4,8 +4,8 @@ import torch as th import numpy as np import gym from typing import Callable -from stable_baselines3.common.callbacks import ( - EvalCallback, StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold) +from stable_baselines3.common.callbacks import EvalCallback +# EvalCallback , StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.vec_env import SubprocVecEnv from stable_baselines3.common.utils import set_random_seed @@ -15,7 +15,6 @@ from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcement from freqtrade.freqai.RL.TDQNagent import TDQN from stable_baselines3.common.buffers import ReplayBuffer from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from pandas import DataFrame logger = logging.getLogger(__name__) @@ -47,46 +46,23 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, - prices_train: DataFrame, prices_test: DataFrame): + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - env_id = "train_env" - num_cpu = int(dk.thread_count / 2) - train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH) for i in range(num_cpu)]) - - eval_env_id = 'eval_env' - eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True) for i in - range(num_cpu)]) - path = dk.data_path - stop_train_callback = StopTrainingOnNoModelImprovement( - max_no_improvement_evals=5, - min_evals=10, - verbose=2 - ) - callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=-200, verbose=2) - eval_callback = EvalCallback( - eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/tdqn/logs/", - eval_freq=int(eval_freq), - deterministic=True, - render=True, - callback_after_eval=stop_train_callback, - callback_on_new_best=callback_on_best, - verbose=2 - ) + + eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", + log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), + deterministic=True, render=False) # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[512, 512, 512]) - model = TDQN('TMultiInputPolicy', train_env, + model = TDQN('TMultiInputPolicy', self.train_env, policy_kwargs=policy_kwargs, tensorboard_log=f"{path}/tdqn/tensorboard/", replay_buffer_class=ReplayBuffer, @@ -100,12 +76,40 @@ class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): best_model = DQN.load(dk.data_path / "best_model.zip") print('Training finished!') - eval_env.close() return best_model + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): + """ + User overrides this in their prediction model if they are custom a MyRLEnv. Othwerwise + leaving this will default to Base5ActEnv + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + + # environments + if not self.train_env: + env_id = "train_env" + num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) + self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.reward_params, self.CONV_WIDTH) for i + in range(num_cpu)]) + + eval_env_id = 'eval_env' + self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.reward_params, self.CONV_WIDTH, monitor=True) for i + in range(num_cpu)]) + else: + self.train_env.env_method('reset_env', train_df, prices_train, + self.CONV_WIDTH, self.reward_params) + self.eval_env.env_method('reset_env', train_df, prices_train, + self.CONV_WIDTH, self.reward_params) + self.train_env.env_method('reset') + self.eval_env.env_method('reset') # User can inherit and customize 5 action environment + + class MyRLEnv(Base5ActionRLEnv): """ User can override any function in BaseRLEnv and gym.Env. Here the user From 4baa36bdcf449e224eaa4c69001bc2c503253988 Mon Sep 17 00:00:00 2001 From: sonnhfit Date: Fri, 19 Aug 2022 01:49:11 +0700 Subject: [PATCH 042/232] fix persist a single training environment for PPO --- config_examples/config_freqai-rl.example.json | 8 +--- freqtrade/freqai/RL/Base3ActionRLEnv.py | 23 ++++++++-- .../ReinforcementLearningPPO.py | 45 ++++++++++++------- 3 files changed, 51 insertions(+), 25 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index ccc977705..1af872552 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -79,13 +79,9 @@ "random_state": 1, "shuffle": false }, - "model_training_parameters": { + "model_training_parameters": { "learning_rate": 0.00025, "gamma": 0.9, - "target_update_interval": 5000, - "buffer_size": 50000, - "exploration_initial_eps":1, - "exploration_final_eps": 0.1, "verbose": 1 }, "rl_config": { @@ -103,4 +99,4 @@ "internals": { "process_throttle_secs": 5 } -} \ No newline at end of file +} diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index 9d17b982d..df53c729b 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -1,13 +1,16 @@ import logging from enum import Enum -# from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union import gym import numpy as np +import pandas as pd from gym import spaces from gym.utils import seeding from pandas import DataFrame + +# from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union + logger = logging.getLogger(__name__) @@ -43,6 +46,9 @@ class Base3ActionRLEnv(gym.Env): self.id = id self.seed(seed) + self.reset_env(df, prices, window_size, reward_kwargs, starting_point) + + def reset_env(self, df, prices, window_size, reward_kwargs, starting_point=True): self.df = df self.signal_features = self.df self.prices = prices @@ -54,7 +60,7 @@ class Base3ActionRLEnv(gym.Env): self.fee = 0.0015 # # spaces - self.shape = (window_size, self.signal_features.shape[1]) + self.shape = (window_size, self.signal_features.shape[1] + 2) self.action_space = spaces.Discrete(len(Actions)) self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) @@ -165,7 +171,16 @@ class Base3ActionRLEnv(gym.Env): return observation, step_reward, self._done, info def _get_observation(self): - return self.signal_features[(self._current_tick - self.window_size):self._current_tick] + features_window = self.signal_features[( + self._current_tick - self.window_size):self._current_tick] + features_and_state = DataFrame(np.zeros((len(features_window), 2)), + columns=['current_profit_pct', 'position'], + index=features_window.index) + + features_and_state['current_profit_pct'] = self.get_unrealized_profit() + features_and_state['position'] = self._position.value + features_and_state = pd.concat([features_window, features_and_state], axis=1) + return features_and_state def get_unrealized_profit(self): @@ -307,7 +322,7 @@ class Base3ActionRLEnv(gym.Env): def prev_price(self) -> float: return self.prices.iloc[self._current_tick - 1].open - def sharpe_ratio(self): + def sharpe_ratio(self) -> float: if len(self.close_trade_profit) == 0: return 0. returns = np.array(self.close_trade_profit) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py index 5dc7735d3..993ac263b 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py @@ -1,16 +1,17 @@ +import gc import logging from typing import Any, Dict # , Tuple import numpy as np # import numpy.typing as npt import torch as th -from pandas import DataFrame from stable_baselines3 import PPO from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor -from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel + from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.RL.Base3ActionRLEnv import Actions, Base3ActionRLEnv, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel logger = logging.getLogger(__name__) @@ -21,23 +22,15 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], pair: str, dk: FreqaiDataKitchen, - prices_train: DataFrame, prices_test: DataFrame): + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - # environments - train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params) - eval = MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params) - eval_env = Monitor(eval, ".") - path = dk.data_path - eval_callback = EvalCallback(eval_env, best_model_save_path=f"{path}/", + eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), deterministic=True, render=False) @@ -45,8 +38,8 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[256, 256, 128]) - model = PPO('MlpPolicy', train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", learning_rate=0.00025, + model = PPO('MlpPolicy', self.train_env, policy_kwargs=policy_kwargs, + tensorboard_log=f"{path}/ppo/tensorboard/", **self.freqai_info['model_training_parameters'] ) @@ -55,12 +48,34 @@ class ReinforcementLearningPPO(BaseReinforcementLearningModel): callback=eval_callback ) + del model best_model = PPO.load(dk.data_path / "best_model") print('Training finished!') + gc.collect() return best_model + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): + """ + User overrides this as shown here if they are using a custom MyRLEnv + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + + # environments + if not self.train_env: + self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params) + self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params), ".") + else: + self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.eval_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.train_env.reset() + self.eval_env.reset() + class MyRLEnv(Base3ActionRLEnv): """ From 4b9499e321ba107f71db0953ed5718a31b4f8bc1 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 19 Aug 2022 11:04:15 +0200 Subject: [PATCH 043/232] improve nomenclature and fix short exit bug --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 52 ++++++++++++------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index bf3f0df33..4c946a5b2 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -13,10 +13,10 @@ logger = logging.getLogger(__name__) class Actions(Enum): Neutral = 0 - Long_buy = 1 - Long_sell = 2 - Short_buy = 3 - Short_sell = 4 + Long_enter = 1 + Long_exit = 2 + Short_enter = 3 + Short_exit = 4 class Positions(Enum): @@ -139,16 +139,16 @@ class Base5ActionRLEnv(gym.Env): if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions.Long_buy.value: + elif action == Actions.Long_enter.value: self._position = Positions.Long trade_type = "long" - elif action == Actions.Short_buy.value: + elif action == Actions.Short_enter.value: self._position = Positions.Short trade_type = "short" - elif action == Actions.Long_sell.value: + elif action == Actions.Long_exit.value: self._position = Positions.Neutral trade_type = "neutral" - elif action == Actions.Short_sell.value: + elif action == Actions.Short_exit.value: self._position = Positions.Neutral trade_type = "neutral" else: @@ -221,24 +221,24 @@ class Base5ActionRLEnv(gym.Env): return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or (action == Actions.Neutral.value and self._position == Positions.Short) or (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Short_buy.value and self._position == Positions.Short) or - (action == Actions.Short_buy.value and self._position == Positions.Long) or - (action == Actions.Short_sell.value and self._position == Positions.Short) or - (action == Actions.Short_sell.value and self._position == Positions.Long) or - (action == Actions.Short_sell.value and self._position == Positions.Neutral) or - (action == Actions.Long_buy.value and self._position == Positions.Long) or - (action == Actions.Long_buy.value and self._position == Positions.Short) or - (action == Actions.Long_sell.value and self._position == Positions.Long) or - (action == Actions.Long_sell.value and self._position == Positions.Short) or - (action == Actions.Long_sell.value and self._position == Positions.Neutral)) + (action == Actions.Short_enter.value and self._position == Positions.Short) or + (action == Actions.Short_enter.value and self._position == Positions.Long) or + (action == Actions.Short_exit.value and self._position == Positions.Short) or + (action == Actions.Short_exit.value and self._position == Positions.Long) or + (action == Actions.Short_exit.value and self._position == Positions.Neutral) or + (action == Actions.Long_enter.value and self._position == Positions.Long) or + (action == Actions.Long_enter.value and self._position == Positions.Short) or + (action == Actions.Long_exit.value and self._position == Positions.Long) or + (action == Actions.Long_exit.value and self._position == Positions.Short) or + (action == Actions.Long_exit.value and self._position == Positions.Neutral)) def _is_trade(self, action: Actions): - return ((action == Actions.Long_buy.value and self._position == Positions.Neutral) or - (action == Actions.Short_buy.value and self._position == Positions.Neutral)) + return ((action == Actions.Long_enter.value and self._position == Positions.Neutral) or + (action == Actions.Short_enter.value and self._position == Positions.Neutral)) def is_hold(self, action): - return ((action == Actions.Short_buy.value and self._position == Positions.Short) or - (action == Actions.Long_buy.value and self._position == Positions.Long) or + return ((action == Actions.Short_enter.value and self._position == Positions.Short) or + (action == Actions.Long_enter.value and self._position == Positions.Long) or (action == Actions.Neutral.value and self._position == Positions.Long) or (action == Actions.Neutral.value and self._position == Positions.Short) or (action == Actions.Neutral.value and self._position == Positions.Neutral)) @@ -265,7 +265,7 @@ class Base5ActionRLEnv(gym.Env): return 0. # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: + if action == Actions.Long_exit.value and self._position == Positions.Long: if len(self.close_trade_profit): # aim x2 rw if self.close_trade_profit[-1] > self.profit_aim * self.rr: @@ -292,7 +292,7 @@ class Base5ActionRLEnv(gym.Env): # return float((np.log(current_price) - np.log(last_trade_price)) * 2) * -1 # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: + if action == Actions.Short_exit.value and self._position == Positions.Short: if len(self.close_trade_profit): # aim x2 rw if self.close_trade_profit[-1] > self.profit_aim * self.rr: @@ -346,7 +346,7 @@ class Base5ActionRLEnv(gym.Env): # Long positions if self._position == Positions.Long: current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Short_buy.value or action == Actions.Neutral.value: + if action == Actions.Short_enter.value or action == Actions.Neutral.value: current_price = self.add_sell_fee(current_price) previous_price = self.prices.iloc[self._current_tick - 1].open @@ -360,7 +360,7 @@ class Base5ActionRLEnv(gym.Env): # Short positions if self._position == Positions.Short: current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Long_buy.value or action == Actions.Neutral.value: + if action == Actions.Long_enter.value or action == Actions.Neutral.value: current_price = self.add_buy_fee(current_price) previous_price = self.prices.iloc[self._current_tick - 1].open From 3eb897c2f8c89e07f81fbd8675b97a3f7bddab91 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 20 Aug 2022 16:35:29 +0200 Subject: [PATCH 044/232] reuse callback, allow user to acces all stable_baselines3 agents via config --- config_examples/config_freqai-rl.example.json | 9 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 69 +++----- .../RL/BaseReinforcementLearningModel.py | 66 +++++--- freqtrade/freqai/data_drawer.py | 11 +- .../prediction_models/ReinforcementLearner.py | 82 ++++++++++ .../ReinforcementLearnerCustomAgent.py} | 62 ++++++-- .../ReinforcementLearner_multiproc.py | 84 ++++++++++ .../ReinforcementLearningPPO.py | 104 ------------ .../ReinforcementLearningPPO_multiproc.py | 132 ---------------- .../ReinforcementLearningTDQN.py | 115 -------------- .../ReinforcementLearningTDQN_multiproc.py | 148 ------------------ 11 files changed, 295 insertions(+), 587 deletions(-) create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearner.py rename freqtrade/freqai/{RL/TDQNagent.py => prediction_models/ReinforcementLearnerCustomAgent.py} (81%) create mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py delete mode 100644 freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 1af872552..fa08cdd60 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -55,7 +55,7 @@ ], "freqai": { "enabled": true, - "model_save_type": "stable_baselines_dqn", + "model_save_type": "stable_baselines", "conv_width": 10, "purge_old_models": true, "train_period_days": 10, @@ -85,8 +85,11 @@ "verbose": 1 }, "rl_config": { - "train_cycles": 15, - "eval_cycles": 5, + "train_cycles": 10, + "eval_cycles": 3, + "thread_count": 4, + "model_type": "PPO", + "policy_type": "MlpPolicy", "model_reward_parameters": { "rr": 1, "profit_aim": 0.02 diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 4c946a5b2..7d3cbffbe 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -266,59 +266,28 @@ class Base5ActionRLEnv(gym.Env): # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - if len(self.close_trade_profit): - # aim x2 rw - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee( - self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee( - self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - # less than aim x1 rw - elif self.close_trade_profit[-1] < self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee( - self.prices.iloc[self._last_trade_tick].open - ) - current_price = self.add_sell_fee( - self.prices.iloc[self._current_tick].open - ) - return float(np.log(current_price) - np.log(last_trade_price)) - # # less than RR SL x2 neg rw - # elif self.close_trade_profit[-1] < (self.profit_aim * -1): - # last_trade_price = self.add_buy_fee( - # self.prices.iloc[self._last_trade_tick].open) - # current_price = self.add_sell_fee( - # self.prices.iloc[self._current_tick].open) - # return float((np.log(current_price) - np.log(last_trade_price)) * 2) * -1 + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + if action == Actions.Long_exit.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - if len(self.close_trade_profit): - # aim x2 rw - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee( - self.prices.iloc[self._last_trade_tick].open - ) - current_price = self.add_buy_fee( - self.prices.iloc[self._current_tick].open - ) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) - # less than aim x1 rw - elif self.close_trade_profit[-1] < self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee( - self.prices.iloc[self._last_trade_tick].open - ) - current_price = self.add_buy_fee( - self.prices.iloc[self._current_tick].open - ) - return float(np.log(last_trade_price) - np.log(current_price)) - # # less than RR SL x2 neg rw - # elif self.close_trade_profit[-1] > self.profit_aim * self.rr: - # last_trade_price = self.add_sell_fee( - # self.prices.iloc[self._last_trade_tick].open) - # current_price = self.add_buy_fee( - # self.prices.iloc[self._current_tick].open) - # return float((np.log(last_trade_price) - np.log(current_price)) * 2) * -1 + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + if action == Actions.Short_exit.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + return 0. def _update_profit(self, action): diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 9c7b1e4b4..9cada2bf0 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -11,8 +11,12 @@ from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions from freqtrade.persistence import Trade import torch.multiprocessing +from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor import torch as th +from typing import Callable +from stable_baselines3.common.utils import set_random_seed +import gym logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') @@ -25,9 +29,15 @@ class BaseReinforcementLearningModel(IFreqaiModel): def __init__(self, **kwargs): super().__init__(config=kwargs['config']) - th.set_num_threads(self.freqai_info.get('data_kitchen_thread_count', 4)) + th.set_num_threads(self.freqai_info['rl_config'].get('thread_count', 4)) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] self.train_env: Base5ActionRLEnv = None + self.eval_env: Base5ActionRLEnv = None + self.eval_callback: EvalCallback = None + mod = __import__('stable_baselines3', fromlist=[ + self.freqai_info['rl_config']['model_type']]) + self.MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) + self.policy_type = self.freqai_info['rl_config']['policy_type'] def train( self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen @@ -67,7 +77,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): ) logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') - self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test) + self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test, dk) model = self.fit_rl(data_dictionary, dk) @@ -75,13 +85,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): return model - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test, dk): """ - User overrides this in their prediction model if they are custom a MyRLEnv. Othwerwise - leaving this will default to Base5ActEnv + User overrides this as shown here if they are using a custom MyRLEnv """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) # environments if not self.train_env: @@ -90,11 +100,17 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params), ".") + self.eval_callback = EvalCallback(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=dk.data_path) else: - self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) - self.eval_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) self.train_env.reset() self.eval_env.reset() + self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) + self.eval_env.reset_env(test_df, prices_test, self.CONV_WIDTH, self.reward_params) + self.eval_callback.__init__(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=dk.data_path) @abstractmethod def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): @@ -206,16 +222,28 @@ class BaseReinforcementLearningModel(IFreqaiModel): # all the other existing fit() functions to include dk argument. For now we instantiate and # leave it. def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: - """ - Most regressors use the same function names and arguments e.g. user - can drop in LGBMRegressor in place of CatBoostRegressor and all data - management will be properly handled by Freqai. - :param data_dictionary: Dict = the dictionary constructed by DataHandler to hold - all the training and test data/labels. - """ - return +def make_env(env_id: str, rank: int, seed: int, train_df, price, + reward_params, window_size, monitor=False) -> Callable: + """ + Utility function for multiprocessed env. + + :param env_id: (str) the environment ID + :param num_env: (int) the number of environment you wish to have in subprocesses + :param seed: (int) the inital seed for RNG + :param rank: (int) index of the subprocess + :return: (Callable) + """ + def _init() -> gym.Env: + + env = MyRLEnv(df=train_df, prices=price, window_size=window_size, + reward_kwargs=reward_params, id=env_id, seed=seed + rank) + if monitor: + env = Monitor(env, ".") + return env + set_random_seed(seed) + return _init class MyRLEnv(Base5ActionRLEnv): """ @@ -229,24 +257,24 @@ class MyRLEnv(Base5ActionRLEnv): return 0. # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: + if action == Actions.Long_exit.value and self._position == Positions.Long: last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) return float(np.log(current_price) - np.log(last_trade_price)) - if action == Actions.Long_sell.value and self._position == Positions.Long: + if action == Actions.Long_exit.value and self._position == Positions.Long: if self.close_trade_profit[-1] > self.profit_aim * self.rr: last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) return float((np.log(current_price) - np.log(last_trade_price)) * 2) # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: + if action == Actions.Short_exit.value and self._position == Positions.Short: last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) return float(np.log(last_trade_price) - np.log(current_price)) - if action == Actions.Short_buy.value and self._position == Positions.Short: + if action == Actions.Short_exit.value and self._position == Positions.Short: if self.close_trade_profit[-1] > self.profit_aim * self.rr: last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 9603fb9ab..c37973551 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -471,12 +471,11 @@ class FreqaiDataDrawer: elif model_type == 'keras': from tensorflow import keras model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") - elif model_type == 'stable_baselines_ppo': - from stable_baselines3.ppo.ppo import PPO - model = PPO.load(dk.data_path / f"{dk.model_filename}_model") - elif model_type == 'stable_baselines_dqn': - from stable_baselines3 import DQN - model = DQN.load(dk.data_path / f"{dk.model_filename}_model") + elif model_type == 'stable_baselines': + mod = __import__('stable_baselines3', fromlist=[ + self.freqai_info['rl_config']['model_type']]) + MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) + model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model") if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file(): dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib") diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py new file mode 100644 index 000000000..2faa6eb3a --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -0,0 +1,82 @@ +import logging +from typing import Any, Dict # , Tuple + +# import numpy.typing as npt +import torch as th +import numpy as np +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +from pathlib import Path + +logger = logging.getLogger(__name__) + + +class ReinforcementLearner(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): + + train_df = data_dictionary["train_features"] + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) + + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[256, 256, 128]) + + model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, + tensorboard_log=Path(dk.data_path / "tensorboard"), + **self.freqai_info['model_training_parameters'] + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=self.eval_callback + ) + + if Path(dk.data_path / "best_model.zip").is_file(): + logger.info('Callback found a best model.') + best_model = self.MODELCLASS.load(dk.data_path / "best_model") + return best_model + + logger.info('Couldnt find best model, using final model instead.') + + return model + + +class MyRLEnv(Base5ActionRLEnv): + """ + User can modify any part of the environment by overriding base + functions + """ + def calculate_reward(self, action): + + if self._last_trade_tick is None: + return 0. + + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(current_price) - np.log(last_trade_price)) + + if action == Actions.Long_exit.value and self._position == Positions.Long: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(current_price) - np.log(last_trade_price)) * 2) + + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float(np.log(last_trade_price) - np.log(current_price)) + + if action == Actions.Short_exit.value and self._position == Positions.Short: + if self.close_trade_profit[-1] > self.profit_aim * self.rr: + last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) + return float((np.log(last_trade_price) - np.log(current_price)) * 2) + + return 0. diff --git a/freqtrade/freqai/RL/TDQNagent.py b/freqtrade/freqai/prediction_models/ReinforcementLearnerCustomAgent.py similarity index 81% rename from freqtrade/freqai/RL/TDQNagent.py rename to freqtrade/freqai/prediction_models/ReinforcementLearnerCustomAgent.py index 584f6a8ef..bb16b612b 100644 --- a/freqtrade/freqai/RL/TDQNagent.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearnerCustomAgent.py @@ -1,17 +1,59 @@ -from typing import Any, Dict, List, Optional, Tuple, Type, Union - -import gym -import torch +import logging import torch as th +from typing import Any, Dict, List, Optional, Tuple, Type, Union +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from stable_baselines3 import DQN from stable_baselines3.common.buffers import ReplayBuffer -from stable_baselines3.common.policies import BasePolicy -from stable_baselines3.common.torch_layers import (BaseFeaturesExtractor, - FlattenExtractor) -from stable_baselines3.common.type_aliases import GymEnv, Schedule +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from pathlib import Path from stable_baselines3.dqn.policies import (CnnPolicy, DQNPolicy, MlpPolicy, QNetwork) from torch import nn +import gym +from stable_baselines3.common.torch_layers import (BaseFeaturesExtractor, + FlattenExtractor) +from stable_baselines3.common.type_aliases import GymEnv, Schedule +from stable_baselines3.common.policies import BasePolicy + +logger = logging.getLogger(__name__) + + +class ReinforcementLearnerCustomAgent(BaseReinforcementLearningModel): + """ + User can customize agent by defining the class and using it directly. + Here the example is "TDQN" + """ + + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): + + train_df = data_dictionary["train_features"] + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) + + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[256, 256, 128]) + + # TDQN is a custom agent defined below + model = TDQN(self.policy_type, self.train_env, + tensorboard_log=Path(dk.data_path / "tensorboard"), + policy_kwargs=policy_kwargs, + **self.freqai_info['model_training_parameters'] + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=self.eval_callback + ) + + if Path(dk.data_path / "best_model.zip").is_file(): + logger.info('Callback found a best model.') + best_model = self.MODELCLASS.load(dk.data_path / "best_model") + return best_model + + logger.info('Couldnt find best model, using final model instead.') + + return model + +# User creates their custom agent and networks as shown below def create_mlp_( @@ -72,7 +114,7 @@ class TDQNetwork(QNetwork): def init_weights(self, m): if type(m) == nn.Linear: - torch.nn.init.kaiming_uniform_(m.weight) + th.nn.init.kaiming_uniform_(m.weight) class TDQNPolicy(DQNPolicy): @@ -175,7 +217,7 @@ class TDQN(DQN): exploration_initial_eps: float = 1.0, exploration_final_eps: float = 0.05, max_grad_norm: float = 10, - tensorboard_log: Optional[str] = None, + tensorboard_log: Optional[Path] = None, create_eval_env: bool = False, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 1, diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py new file mode 100644 index 000000000..1854bb1a5 --- /dev/null +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -0,0 +1,84 @@ +import logging +from typing import Any, Dict # , Tuple + +# import numpy.typing as npt +import torch as th +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.vec_env import SubprocVecEnv +from freqtrade.freqai.RL.BaseReinforcementLearningModel import (BaseReinforcementLearningModel, + make_env) +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen + +from pathlib import Path + +logger = logging.getLogger(__name__) + + +class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): + + train_df = data_dictionary["train_features"] + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) + + # model arch + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[512, 512, 512]) + + model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, + tensorboard_log=Path(dk.data_path / "tensorboard"), + **self.freqai_info['model_training_parameters'] + ) + + model.learn( + total_timesteps=int(total_timesteps), + callback=self.eval_callback + ) + + if Path(dk.data_path / "best_model.zip").is_file(): + logger.info('Callback found a best model.') + best_model = self.MODELCLASS.load(dk.data_path / "best_model") + return best_model + + logger.info('Couldnt find best model, using final model instead.') + + return model + + def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test, dk): + """ + If user has particular environment configuration needs, they can do that by + overriding this function. In the present case, the user wants to setup training + environments for multiple workers. + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) + + # environments + if not self.train_env: + env_id = "train_env" + num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) + self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.reward_params, self.CONV_WIDTH) for i + in range(num_cpu)]) + + eval_env_id = 'eval_env' + self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.reward_params, self.CONV_WIDTH, monitor=True) for i + in range(num_cpu)]) + self.eval_callback = EvalCallback(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=dk.data_path) + else: + self.train_env.env_method('reset') + self.eval_env.env_method('reset') + self.train_env.env_method('reset_env', train_df, prices_train, + self.CONV_WIDTH, self.reward_params) + self.eval_env.env_method('reset_env', train_df, prices_train, + self.CONV_WIDTH, self.reward_params) + self.eval_callback.__init__(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=dk.data_path) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py deleted file mode 100644 index 993ac263b..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO.py +++ /dev/null @@ -1,104 +0,0 @@ -import gc -import logging -from typing import Any, Dict # , Tuple - -import numpy as np -# import numpy.typing as npt -import torch as th -from stable_baselines3 import PPO -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.monitor import Monitor - -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.RL.Base3ActionRLEnv import Actions, Base3ActionRLEnv, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel - - -logger = logging.getLogger(__name__) - - -class ReinforcementLearningPPO(BaseReinforcementLearningModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): - - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - - path = dk.data_path - eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), - deterministic=True, render=False) - - # model arch - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) - - model = PPO('MlpPolicy', self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", - **self.freqai_info['model_training_parameters'] - ) - - model.learn( - total_timesteps=int(total_timesteps), - callback=eval_callback - ) - - del model - best_model = PPO.load(dk.data_path / "best_model") - - print('Training finished!') - gc.collect() - - return best_model - - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): - """ - User overrides this as shown here if they are using a custom MyRLEnv - """ - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - - # environments - if not self.train_env: - self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params) - self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params), ".") - else: - self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) - self.eval_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) - self.train_env.reset() - self.eval_env.reset() - - -class MyRLEnv(Base3ActionRLEnv): - """ - User can override any function in BaseRLEnv and gym.Env - """ - - def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if (action == Actions.Short.value or - action == Actions.Neutral.value) and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - # close short - if (action == Actions.Long.value or - action == Actions.Neutral.value) and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py deleted file mode 100644 index 5fa24a599..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningPPO_multiproc.py +++ /dev/null @@ -1,132 +0,0 @@ -import logging -from typing import Any, Dict # , Tuple - -import numpy as np -# import numpy.typing as npt -import torch as th -from stable_baselines3.common.monitor import Monitor -from typing import Callable -from stable_baselines3 import PPO -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.vec_env import SubprocVecEnv -from stable_baselines3.common.utils import set_random_seed -from freqtrade.freqai.RL.Base3ActionRLEnv import Base3ActionRLEnv, Actions, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -import gym - -logger = logging.getLogger(__name__) - - -def make_env(env_id: str, rank: int, seed: int, train_df, price, - reward_params, window_size, monitor=False) -> Callable: - """ - Utility function for multiprocessed env. - - :param env_id: (str) the environment ID - :param num_env: (int) the number of environment you wish to have in subprocesses - :param seed: (int) the inital seed for RNG - :param rank: (int) index of the subprocess - :return: (Callable) - """ - def _init() -> gym.Env: - - env = MyRLEnv(df=train_df, prices=price, window_size=window_size, - reward_kwargs=reward_params, id=env_id, seed=seed + rank) - if monitor: - env = Monitor(env, ".") - return env - set_random_seed(seed) - return _init - - -class ReinforcementLearningPPO_multiproc(BaseReinforcementLearningModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): - - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - - path = dk.data_path - eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/ppo/logs/", eval_freq=int(eval_freq), - deterministic=True, render=False) - - # model arch - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[512, 512, 512]) - - model = PPO('MlpPolicy', self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/ppo/tensorboard/", - **self.freqai_info['model_training_parameters'] - ) - - model.learn( - total_timesteps=int(total_timesteps), - callback=eval_callback - ) - - best_model = PPO.load(dk.data_path / "best_model") - print('Training finished!') - - return best_model - - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): - """ - User overrides this in their prediction model if they are custom a MyRLEnv. Othwerwise - leaving this will default to Base5ActEnv - """ - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - - # environments - if not self.train_env: - env_id = "train_env" - num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) - self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH) for i - in range(num_cpu)]) - - eval_env_id = 'eval_env' - self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True) for i - in range(num_cpu)]) - else: - self.train_env.env_method('reset_env', train_df, prices_train, - self.CONV_WIDTH, self.reward_params) - self.eval_env.env_method('reset_env', train_df, prices_train, - self.CONV_WIDTH, self.reward_params) - self.train_env.env_method('reset') - self.eval_env.env_method('reset') - - -class MyRLEnv(Base3ActionRLEnv): - """ - User can override any function in BaseRLEnv and gym.Env - """ - - def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if (action == Actions.Short.value or - action == Actions.Neutral.value) and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - # close short - if (action == Actions.Long.value or - action == Actions.Neutral.value) and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py deleted file mode 100644 index 3c4ac6bdb..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py +++ /dev/null @@ -1,115 +0,0 @@ -import logging -from typing import Any, Dict # Optional -import torch as th -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.monitor import Monitor -from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel -from freqtrade.freqai.RL.TDQNagent import TDQN -from stable_baselines3 import DQN -from stable_baselines3.common.buffers import ReplayBuffer -import numpy as np -import gc -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen - -logger = logging.getLogger(__name__) - - -class ReinforcementLearningTDQN(BaseReinforcementLearningModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): - - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - - path = dk.data_path - eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), - deterministic=True, render=False) - - # model arch - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) - - model = TDQN('TMultiInputPolicy', self.train_env, - tensorboard_log=f"{path}/tdqn/tensorboard/", - policy_kwargs=policy_kwargs, - replay_buffer_class=ReplayBuffer, - **self.freqai_info['model_training_parameters'] - ) - - model.learn( - total_timesteps=int(total_timesteps), - callback=eval_callback - ) - - del model - best_model = DQN.load(dk.data_path / "best_model") - - print('Training finished!') - gc.collect() - return best_model - - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): - """ - User overrides this as shown here if they are using a custom MyRLEnv - """ - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - - # environments - if not self.train_env: - self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params) - self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params), ".") - else: - self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) - self.eval_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) - self.train_env.reset() - self.eval_env.reset() - - -# User can inherit and customize 5 action environment -class MyRLEnv(Base5ActionRLEnv): - """ - User can override any function in BaseRLEnv and gym.Env. Here the user - Adds 5 actions. - """ - - def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_sell.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - - # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_buy.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) - - return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py deleted file mode 100644 index 8634fd958..000000000 --- a/freqtrade/freqai/prediction_models/ReinforcementLearningTDQN_multiproc.py +++ /dev/null @@ -1,148 +0,0 @@ -import logging -from typing import Any, Dict # Optional -import torch as th -import numpy as np -import gym -from typing import Callable -from stable_baselines3.common.callbacks import EvalCallback -# EvalCallback , StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold -from stable_baselines3.common.monitor import Monitor -from stable_baselines3.common.vec_env import SubprocVecEnv -from stable_baselines3.common.utils import set_random_seed -from stable_baselines3 import DQN -from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel -from freqtrade.freqai.RL.TDQNagent import TDQN -from stable_baselines3.common.buffers import ReplayBuffer -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen - -logger = logging.getLogger(__name__) - - -def make_env(env_id: str, rank: int, seed: int, train_df, price, - reward_params, window_size, monitor=False) -> Callable: - """ - Utility function for multiprocessed env. - - :param env_id: (str) the environment ID - :param num_env: (int) the number of environment you wish to have in subprocesses - :param seed: (int) the inital seed for RNG - :param rank: (int) index of the subprocess - :return: (Callable) - """ - def _init() -> gym.Env: - - env = MyRLEnv(df=train_df, prices=price, window_size=window_size, - reward_kwargs=reward_params, id=env_id, seed=seed + rank) - if monitor: - env = Monitor(env, ".") - return env - set_random_seed(seed) - return _init - - -class ReinforcementLearningTDQN_multiproc(BaseReinforcementLearningModel): - """ - User created Reinforcement Learning Model prediction model. - """ - - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): - - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - - path = dk.data_path - - eval_callback = EvalCallback(self.eval_env, best_model_save_path=f"{path}/", - log_path=f"{path}/tdqn/logs/", eval_freq=int(eval_freq), - deterministic=True, render=False) - # model arch - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[512, 512, 512]) - - model = TDQN('TMultiInputPolicy', self.train_env, - policy_kwargs=policy_kwargs, - tensorboard_log=f"{path}/tdqn/tensorboard/", - replay_buffer_class=ReplayBuffer, - **self.freqai_info['model_training_parameters'] - ) - - model.learn( - total_timesteps=int(total_timesteps), - callback=eval_callback - ) - - best_model = DQN.load(dk.data_path / "best_model.zip") - print('Training finished!') - - return best_model - - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test): - """ - User overrides this in their prediction model if they are custom a MyRLEnv. Othwerwise - leaving this will default to Base5ActEnv - """ - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - - # environments - if not self.train_env: - env_id = "train_env" - num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) - self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH) for i - in range(num_cpu)]) - - eval_env_id = 'eval_env' - self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True) for i - in range(num_cpu)]) - else: - self.train_env.env_method('reset_env', train_df, prices_train, - self.CONV_WIDTH, self.reward_params) - self.eval_env.env_method('reset_env', train_df, prices_train, - self.CONV_WIDTH, self.reward_params) - self.train_env.env_method('reset') - self.eval_env.env_method('reset') - -# User can inherit and customize 5 action environment - - -class MyRLEnv(Base5ActionRLEnv): - """ - User can override any function in BaseRLEnv and gym.Env. Here the user - Adds 5 actions. - """ - - def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if action == Actions.Long_sell.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_sell.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) - - # close short - if action == Actions.Short_buy.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_buy.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) - - return 0. From 8cd4daad0ad1e511f95c7881d18713a54897567a Mon Sep 17 00:00:00 2001 From: mrzdev <106373816+mrzdev@users.noreply.github.com> Date: Sun, 21 Aug 2022 17:43:40 +0200 Subject: [PATCH 045/232] Feat/freqai rl dev (#7) * access trades through get_trades_proxy method to allow backtesting --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 9cada2bf0..a0d5425d3 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -123,7 +123,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): return def get_state_info(self, pair): - open_trades = Trade.get_trades(trade_filter=Trade.is_open.is_(True)) + open_trades = Trade.get_trades_proxy(is_open=True) market_side = 0.5 current_profit = 0 for trade in open_trades: @@ -137,8 +137,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): current_profit = current_value / openrate - 1 total_profit = 0 - closed_trades = Trade.get_trades( - trade_filter=[Trade.is_open.is_(False), Trade.pair == pair]) + closed_trades = Trade.get_trades_proxy(pair = pair, is_open=False) for trade in closed_trades: total_profit += trade.close_profit From 8b3a8234ac96d91b9544005df8d7b5983134ea1a Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 21 Aug 2022 19:43:39 +0200 Subject: [PATCH 046/232] fix env bug, allow example strat to short --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 56 ++++++++----------- .../ReinforcementLearningExample5ac.py | 2 +- 2 files changed, 23 insertions(+), 35 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 7d3cbffbe..b2aeef73b 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -200,12 +200,12 @@ class Base5ActionRLEnv(gym.Env): if self._position == Positions.Neutral: return 0. elif self._position == Positions.Short: - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) return (last_trade_price - current_price) / last_trade_price elif self._position == Positions.Long: - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) return (current_price - last_trade_price) / last_trade_price else: return 0. @@ -223,12 +223,12 @@ class Base5ActionRLEnv(gym.Env): (action == Actions.Neutral.value and self._position == Positions.Long) or (action == Actions.Short_enter.value and self._position == Positions.Short) or (action == Actions.Short_enter.value and self._position == Positions.Long) or - (action == Actions.Short_exit.value and self._position == Positions.Short) or + # (action == Actions.Short_exit.value and self._position == Positions.Short) or (action == Actions.Short_exit.value and self._position == Positions.Long) or (action == Actions.Short_exit.value and self._position == Positions.Neutral) or (action == Actions.Long_enter.value and self._position == Positions.Long) or (action == Actions.Long_enter.value and self._position == Positions.Short) or - (action == Actions.Long_exit.value and self._position == Positions.Long) or + # (action == Actions.Long_exit.value and self._position == Positions.Long) or (action == Actions.Long_exit.value and self._position == Positions.Short) or (action == Actions.Long_exit.value and self._position == Positions.Neutral)) @@ -243,10 +243,10 @@ class Base5ActionRLEnv(gym.Env): (action == Actions.Neutral.value and self._position == Positions.Short) or (action == Actions.Neutral.value and self._position == Positions.Neutral)) - def add_buy_fee(self, price): + def add_entry_fee(self, price): return price * (1 + self.fee) - def add_sell_fee(self, price): + def add_exit_fee(self, price): return price / (1 + self.fee) def _update_history(self, info): @@ -266,27 +266,21 @@ class Base5ActionRLEnv(gym.Env): # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_exit.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + factor = 1 + if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + factor = 2 + return float((np.log(current_price) - np.log(last_trade_price)) * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_exit.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) + factor = 1 + if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + factor = 2 + return float(np.log(last_trade_price) - np.log(current_price) * factor) return 0. @@ -315,27 +309,21 @@ class Base5ActionRLEnv(gym.Env): # Long positions if self._position == Positions.Long: current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Short_enter.value or action == Actions.Neutral.value: - current_price = self.add_sell_fee(current_price) - previous_price = self.prices.iloc[self._current_tick - 1].open if (self._position_history[self._current_tick - 1] == Positions.Short or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_buy_fee(previous_price) + previous_price = self.add_entry_fee(previous_price) return np.log(current_price) - np.log(previous_price) # Short positions if self._position == Positions.Short: current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Long_enter.value or action == Actions.Neutral.value: - current_price = self.add_buy_fee(current_price) - previous_price = self.prices.iloc[self._current_tick - 1].open if (self._position_history[self._current_tick - 1] == Positions.Long or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_sell_fee(previous_price) + previous_price = self.add_exit_fee(previous_price) return np.log(previous_price) - np.log(current_price) diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index 70727f6db..437b53b05 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -36,7 +36,7 @@ class ReinforcementLearningExample5ac(IStrategy): stoploss = -0.05 use_exit_signal = True startup_candle_count: int = 300 - can_short = False + can_short = True linear_roi_offset = DecimalParameter( 0.00, 0.02, default=0.005, space="sell", optimize=False, load=True From d88a0dbf82bd180e66b53cca2bc0781179de42a9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 21 Aug 2022 19:58:36 +0200 Subject: [PATCH 047/232] add sb3_contrib models to the available agents. include sb3_contrib in requirements. --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 2 - .../RL/BaseReinforcementLearningModel.py | 54 +++++++++++-------- requirements-freqai.txt | 4 +- 3 files changed, 35 insertions(+), 25 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index b2aeef73b..94de259a9 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -223,12 +223,10 @@ class Base5ActionRLEnv(gym.Env): (action == Actions.Neutral.value and self._position == Positions.Long) or (action == Actions.Short_enter.value and self._position == Positions.Short) or (action == Actions.Short_enter.value and self._position == Positions.Long) or - # (action == Actions.Short_exit.value and self._position == Positions.Short) or (action == Actions.Short_exit.value and self._position == Positions.Long) or (action == Actions.Short_exit.value and self._position == Positions.Neutral) or (action == Actions.Long_enter.value and self._position == Positions.Long) or (action == Actions.Long_enter.value and self._position == Positions.Short) or - # (action == Actions.Long_exit.value and self._position == Positions.Long) or (action == Actions.Long_exit.value and self._position == Positions.Short) or (action == Actions.Long_exit.value and self._position == Positions.Neutral)) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a0d5425d3..bb858f3cf 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -6,6 +6,7 @@ import numpy.typing as npt import pandas as pd from pandas import DataFrame from abc import abstractmethod +from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions @@ -21,6 +22,9 @@ logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') +SB3_MODELS = ['PPO', 'A2C', 'DQN', 'TD3', 'SAC'] +SB3_CONTRIB_MODELS = ['TRPO', 'ARS'] + class BaseReinforcementLearningModel(IFreqaiModel): """ @@ -34,9 +38,19 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.train_env: Base5ActionRLEnv = None self.eval_env: Base5ActionRLEnv = None self.eval_callback: EvalCallback = None - mod = __import__('stable_baselines3', fromlist=[ - self.freqai_info['rl_config']['model_type']]) - self.MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) + self.model_type = self.freqai_info['rl_config']['model_type'] + if self.model_type in SB3_MODELS: + import_str = 'stable_baselines3' + elif self.model_type in SB3_CONTRIB_MODELS: + import_str = 'sb3_contrib' + else: + raise OperationalException(f'{self.model_type} not available in stable_baselines3 or ' + f'sb3_contrib. please choose one of {SB3_MODELS} or ' + f'{SB3_CONTRIB_MODELS}') + + mod = __import__(import_str, fromlist=[ + self.model_type]) + self.MODELCLASS = getattr(mod, self.model_type) self.policy_type = self.freqai_info['rl_config']['policy_type'] def train( @@ -137,7 +151,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): current_profit = current_value / openrate - 1 total_profit = 0 - closed_trades = Trade.get_trades_proxy(pair = pair, is_open=False) + closed_trades = Trade.get_trades_proxy(pair=pair, is_open=False) for trade in closed_trades: total_profit += trade.close_profit @@ -223,6 +237,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: return + def make_env(env_id: str, rank: int, seed: int, train_df, price, reward_params, window_size, monitor=False) -> Callable: """ @@ -244,6 +259,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df, price, set_random_seed(seed) return _init + class MyRLEnv(Base5ActionRLEnv): """ User can override any function in BaseRLEnv and gym.Env. Here the user @@ -257,26 +273,20 @@ class MyRLEnv(Base5ActionRLEnv): # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_exit.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + factor = 1 + if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + factor = 2 + return float((np.log(current_price) - np.log(last_trade_price)) * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_exit.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + factor = 1 + if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + factor = 2 + return float(np.log(last_trade_price) - np.log(current_price) * factor) return 0. diff --git a/requirements-freqai.txt b/requirements-freqai.txt index 6000f8e0f..de1b6670a 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -9,4 +9,6 @@ lightgbm==3.3.2 torch==1.12.1 stable-baselines3==1.6.0 gym==0.21.0 -tensorboard==2.9.1 \ No newline at end of file +tensorboard==2.9.1 +optuna==2.10.1 +sb3-contrib==1.6.0 \ No newline at end of file From 29f0e01c4a50e7b955a100ef49b47049eff3737a Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 21 Aug 2022 20:33:09 +0200 Subject: [PATCH 048/232] expose environment reward parameters to the user config --- config_examples/config_freqai-rl.example.json | 3 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 7 +++-- .../RL/BaseReinforcementLearningModel.py | 16 +++++----- .../prediction_models/ReinforcementLearner.py | 30 ++++++++----------- .../ReinforcementLearner_multiproc.py | 4 +-- 5 files changed, 28 insertions(+), 32 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index fa08cdd60..07ddb04d3 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -92,7 +92,8 @@ "policy_type": "MlpPolicy", "model_reward_parameters": { "rr": 1, - "profit_aim": 0.02 + "profit_aim": 0.02, + "win_reward_factor": 2 } } }, diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 94de259a9..84a82c5de 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -42,9 +42,10 @@ class Base5ActionRLEnv(gym.Env): def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1): + id: str = 'baseenv-1', seed: int = 1, config: dict = {}): assert df.ndim == 2 + self.rl_config = config['freqai']['rl_config'] self.id = id self.seed(seed) self.reset_env(df, prices, window_size, reward_kwargs, starting_point) @@ -268,7 +269,7 @@ class Base5ActionRLEnv(gym.Env): current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = 2 + factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) return float((np.log(current_price) - np.log(last_trade_price)) * factor) # close short @@ -277,7 +278,7 @@ class Base5ActionRLEnv(gym.Env): current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = 2 + factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) return float(np.log(last_trade_price) - np.log(current_price) * factor) return 0. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index bb858f3cf..0618a91ed 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -110,10 +110,10 @@ class BaseReinforcementLearningModel(IFreqaiModel): # environments if not self.train_env: self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params) + reward_kwargs=self.reward_params, config=self.config) self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params), ".") + reward_kwargs=self.reward_params, config=self.config), ".") self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=eval_freq, best_model_save_path=dk.data_path) @@ -239,7 +239,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def make_env(env_id: str, rank: int, seed: int, train_df, price, - reward_params, window_size, monitor=False) -> Callable: + reward_params, window_size, monitor=False, config={}) -> Callable: """ Utility function for multiprocessed env. @@ -252,7 +252,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df, price, def _init() -> gym.Env: env = MyRLEnv(df=train_df, prices=price, window_size=window_size, - reward_kwargs=reward_params, id=env_id, seed=seed + rank) + reward_kwargs=reward_params, id=env_id, seed=seed + rank, config=config) if monitor: env = Monitor(env, ".") return env @@ -277,16 +277,16 @@ class MyRLEnv(Base5ActionRLEnv): current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = 2 + factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) return float((np.log(current_price) - np.log(last_trade_price)) * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = 2 + factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) return float(np.log(last_trade_price) - np.log(current_price) * factor) return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 2faa6eb3a..5f22971e1 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -57,26 +57,20 @@ class MyRLEnv(Base5ActionRLEnv): # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - if action == Actions.Long_exit.value and self._position == Positions.Long: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(current_price) - np.log(last_trade_price)) * 2) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + factor = 1 + if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float((np.log(current_price) - np.log(last_trade_price)) * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - if action == Actions.Short_exit.value and self._position == Positions.Short: - if self.close_trade_profit[-1] > self.profit_aim * self.rr: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float((np.log(last_trade_price) - np.log(current_price)) * 2) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) + factor = 1 + if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(np.log(last_trade_price) - np.log(current_price) * factor) return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 1854bb1a5..ee9a407c9 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -62,12 +62,12 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): env_id = "train_env" num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH) for i + self.reward_params, self.CONV_WIDTH, config=self.config) for i in range(num_cpu)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True) for i + self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i in range(num_cpu)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=eval_freq, From a2a4bc05dbcd56b94bf87393e78864dde2d5d916 Mon Sep 17 00:00:00 2001 From: richardjozsa Date: Mon, 22 Aug 2022 18:06:33 +0200 Subject: [PATCH 049/232] Fix the state profit calculation logic --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 0618a91ed..a9f406c9d 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -142,13 +142,14 @@ class BaseReinforcementLearningModel(IFreqaiModel): current_profit = 0 for trade in open_trades: if trade.pair == pair: - current_value = trade.open_trade_value + current_value = self.strategy.dp._exchange.get_rate(pair, refresh=False) openrate = trade.open_rate if 'long' in trade.enter_tag: market_side = 1 + current_profit = (current_value - openrate) / openrate else: market_side = 0 - current_profit = current_value / openrate - 1 + current_profit = (openrate - current_value ) / openrate total_profit = 0 closed_trades = Trade.get_trades_proxy(pair=pair, is_open=False) From f9a49744e6a9c4db7e74d9437c8da4e527adaddd Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 22 Aug 2022 19:15:56 +0200 Subject: [PATCH 050/232] add strategy to the freqai object --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- freqtrade/freqai/freqai_interface.py | 3 ++- freqtrade/strategy/interface.py | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a9f406c9d..360cbf9d4 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -142,7 +142,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): current_profit = 0 for trade in open_trades: if trade.pair == pair: - current_value = self.strategy.dp._exchange.get_rate(pair, refresh=False) + current_value = self.strategy.dp._exchange.get_rate(pair, refresh=False) #, side="buy", is_short=True) openrate = trade.open_rate if 'long' in trade.enter_tag: market_side = 1 diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index b6fde9357..21b79e003 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -7,7 +7,7 @@ import time from abc import ABC, abstractmethod from pathlib import Path from threading import Lock -from typing import Any, Dict, Tuple +from typing import Any, Dict, Tuple, Optional import numpy as np import pandas as pd @@ -90,6 +90,7 @@ class IFreqaiModel(ABC): self.begin_time: float = 0 self.begin_time_train: float = 0 self.base_tf_seconds = timeframe_to_seconds(self.config['timeframe']) + self.strategy: Optional[IStrategy] = None def assert_config(self, config: Dict[str, Any]) -> None: diff --git a/freqtrade/strategy/interface.py b/freqtrade/strategy/interface.py index 79dbd4c69..fe301eb30 100644 --- a/freqtrade/strategy/interface.py +++ b/freqtrade/strategy/interface.py @@ -152,6 +152,7 @@ class IStrategy(ABC, HyperStrategyMixin): self.freqai = FreqaiModelResolver.load_freqaimodel(self.config) self.freqai_info = self.config["freqai"] + self.freqai.strategy = self else: # Gracious failures if freqAI is disabled but "start" is called. class DummyClass(): From 280a1dc3f87f451cf2d8367d910ce1cf01f95d3d Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 23 Aug 2022 09:44:44 +0200 Subject: [PATCH 051/232] add live rate, add trade duration --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 13 ++++++++--- .../RL/BaseReinforcementLearningModel.py | 22 ++++++++++++------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 84a82c5de..2b1c4f975 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -62,7 +62,7 @@ class Base5ActionRLEnv(gym.Env): self.fee = 0.0015 # # spaces - self.shape = (window_size, self.signal_features.shape[1] + 2) + self.shape = (window_size, self.signal_features.shape[1] + 3) self.action_space = spaces.Discrete(len(Actions)) self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) @@ -184,15 +184,22 @@ class Base5ActionRLEnv(gym.Env): def _get_observation(self): features_window = self.signal_features[( self._current_tick - self.window_size):self._current_tick] - features_and_state = DataFrame(np.zeros((len(features_window), 2)), - columns=['current_profit_pct', 'position'], + features_and_state = DataFrame(np.zeros((len(features_window), 3)), + columns=['current_profit_pct', 'position', 'trade_duration'], index=features_window.index) features_and_state['current_profit_pct'] = self.get_unrealized_profit() features_and_state['position'] = self._position.value + features_and_state['trade_duration'] = self.get_trade_duration() features_and_state = pd.concat([features_window, features_and_state], axis=1) return features_and_state + def get_trade_duration(self): + if self._last_trade_tick is None: + return 0 + else: + return self._current_tick - self._last_trade_tick + def get_unrealized_profit(self): if self._last_trade_tick is None: diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 360cbf9d4..6a15b96f9 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -16,6 +16,7 @@ from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor import torch as th from typing import Callable +from datetime import datetime, timezone from stable_baselines3.common.utils import set_random_seed import gym logger = logging.getLogger(__name__) @@ -140,23 +141,27 @@ class BaseReinforcementLearningModel(IFreqaiModel): open_trades = Trade.get_trades_proxy(is_open=True) market_side = 0.5 current_profit = 0 + trade_duration = 0 for trade in open_trades: if trade.pair == pair: - current_value = self.strategy.dp._exchange.get_rate(pair, refresh=False) #, side="buy", is_short=True) + current_value = self.strategy.dp._exchange.get_rate( + pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate + now = datetime.now(timezone.utc).timestamp() + trade_duration = (now - trade.open_date.timestamp()) / self.base_tf_seconds if 'long' in trade.enter_tag: market_side = 1 current_profit = (current_value - openrate) / openrate else: market_side = 0 - current_profit = (openrate - current_value ) / openrate + current_profit = (openrate - current_value) / openrate - total_profit = 0 - closed_trades = Trade.get_trades_proxy(pair=pair, is_open=False) - for trade in closed_trades: - total_profit += trade.close_profit + # total_profit = 0 + # closed_trades = Trade.get_trades_proxy(pair=pair, is_open=False) + # for trade in closed_trades: + # total_profit += trade.close_profit - return market_side, current_profit, total_profit + return market_side, current_profit, int(trade_duration) def predict( self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False @@ -192,10 +197,11 @@ class BaseReinforcementLearningModel(IFreqaiModel): output = pd.DataFrame(np.zeros(len(dataframe)), columns=dk.label_list) def _predict(window): - market_side, current_profit, total_profit = self.get_state_info(dk.pair) + market_side, current_profit, trade_duration = self.get_state_info(dk.pair) observations = dataframe.iloc[window.index] observations['current_profit'] = current_profit observations['position'] = market_side + observations['trade_duration'] = trade_duration res, _ = model.predict(observations, deterministic=True) return res From b26ed7dea4564d55b112cc50ce96e08983913bf2 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 23 Aug 2022 14:58:38 +0200 Subject: [PATCH 052/232] fix generic reward, add time duration to reward --- config_examples/config_freqai-rl.example.json | 1 + freqtrade/freqai/RL/Base5ActionRLEnv.py | 27 ++++------------- .../RL/BaseReinforcementLearningModel.py | 25 +++++++++------- .../prediction_models/ReinforcementLearner.py | 29 +++++++++++-------- .../ReinforcementLearner_multiproc.py | 6 ++-- 5 files changed, 43 insertions(+), 45 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 07ddb04d3..bb67b44b6 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -88,6 +88,7 @@ "train_cycles": 10, "eval_cycles": 3, "thread_count": 4, + "max_trade_duration_candles": 100, "model_type": "PPO", "policy_type": "MlpPolicy", "model_reward_parameters": { diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 2b1c4f975..a14111495 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -8,6 +8,7 @@ from gym import spaces from gym.utils import seeding from pandas import DataFrame import pandas as pd +from abc import abstractmethod logger = logging.getLogger(__name__) @@ -265,28 +266,12 @@ class Base5ActionRLEnv(gym.Env): def get_sharpe_ratio(self): return mean_over_std(self.get_portfolio_log_returns()) + @abstractmethod def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if action == Actions.Long_exit.value and self._position == Positions.Long: - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) - factor = 1 - if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float((np.log(current_price) - np.log(last_trade_price)) * factor) - - # close short - if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) - factor = 1 - if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(np.log(last_trade_price) - np.log(current_price) * factor) + """ + Reward is created by BaseReinforcementLearningModel and can + be inherited/edited by the user made ReinforcementLearner file. + """ return 0. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 6a15b96f9..a9a1377a8 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -270,7 +270,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df, price, class MyRLEnv(Base5ActionRLEnv): """ User can override any function in BaseRLEnv and gym.Env. Here the user - Adds 5 actions. + sets a custom reward based on profit and trade duration. """ def calculate_reward(self, action): @@ -278,22 +278,27 @@ class MyRLEnv(Base5ActionRLEnv): if self._last_trade_tick is None: return 0. + pnl = self.get_unrealized_profit() + max_trade_duration = self.rl_config['max_trade_duration_candles'] + trade_duration = self._current_tick - self._last_trade_tick + + factor = 1 + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) - factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float((np.log(current_price) - np.log(last_trade_price)) * factor) + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(np.log(last_trade_price) - np.log(current_price) * factor) + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(pnl * factor) return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 5f22971e1..d3e6bde7c 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -3,7 +3,6 @@ from typing import Any, Dict # , Tuple # import numpy.typing as npt import torch as th -import numpy as np from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel @@ -47,30 +46,36 @@ class ReinforcementLearner(BaseReinforcementLearningModel): class MyRLEnv(Base5ActionRLEnv): """ - User can modify any part of the environment by overriding base - functions + User can override any function in BaseRLEnv and gym.Env. Here the user + sets a custom reward based on profit and trade duration. """ + def calculate_reward(self, action): if self._last_trade_tick is None: return 0. + pnl = self.get_unrealized_profit() + max_trade_duration = self.rl_config['max_trade_duration_candles'] + trade_duration = self._current_tick - self._last_trade_tick + + factor = 1 + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) - factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float((np.log(current_price) - np.log(last_trade_price)) * factor) + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) factor = 1 if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: - factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(np.log(last_trade_price) - np.log(current_price) * factor) + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(pnl * factor) return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index ee9a407c9..96d42ae66 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -62,12 +62,14 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): env_id = "train_env" num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH, config=self.config) for i + self.reward_params, self.CONV_WIDTH, + config=self.config) for i in range(num_cpu)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i + self.reward_params, self.CONV_WIDTH, monitor=True, + config=self.config) for i in range(num_cpu)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=eval_freq, From b708134c1a1e6429216f2605f625e064d51da235 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 23 Aug 2022 15:06:57 +0200 Subject: [PATCH 053/232] switch multiproc thread count to rl_config definition --- .../freqai/prediction_models/ReinforcementLearner_multiproc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 96d42ae66..17281e2d0 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -60,7 +60,7 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): # environments if not self.train_env: env_id = "train_env" - num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2) + num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2) self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, config=self.config) for i From c0cee5df07ac18d7f870385586e9007ccc74024b Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 24 Aug 2022 12:54:02 +0200 Subject: [PATCH 054/232] add continual retraining feature, handly mypy typing reqs, improve docstrings --- config_examples/config_freqai-rl.example.json | 3 +- freqtrade/freqai/RL/Base3ActionRLEnv.py | 618 +++++++++--------- freqtrade/freqai/RL/Base5ActionRLEnv.py | 38 +- .../RL/BaseReinforcementLearningModel.py | 42 +- .../ReinforcementLearnerCustomAgent.py | 10 +- freqtrade/freqai/data_drawer.py | 4 + .../ReinforcementLearningExample5ac.py | 3 +- .../prediction_models/BaseClassifierModel.py | 4 +- .../prediction_models/BaseRegressionModel.py | 4 +- .../prediction_models/BaseTensorFlowModel.py | 4 +- .../prediction_models/ReinforcementLearner.py | 19 +- 11 files changed, 387 insertions(+), 362 deletions(-) rename freqtrade/freqai/{prediction_models => RL}/ReinforcementLearnerCustomAgent.py (95%) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index bb67b44b6..b3f8737be 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -85,12 +85,13 @@ "verbose": 1 }, "rl_config": { - "train_cycles": 10, + "train_cycles": 3, "eval_cycles": 3, "thread_count": 4, "max_trade_duration_candles": 100, "model_type": "PPO", "policy_type": "MlpPolicy", + "continual_retraining": true, "model_reward_parameters": { "rr": 1, "profit_aim": 0.02, diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index df53c729b..cddd2f6f9 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -1,330 +1,330 @@ -import logging -from enum import Enum +# import logging +# from enum import Enum -import gym -import numpy as np -import pandas as pd -from gym import spaces -from gym.utils import seeding -from pandas import DataFrame +# import gym +# import numpy as np +# import pandas as pd +# from gym import spaces +# from gym.utils import seeding +# from pandas import DataFrame -# from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union +# # from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union -logger = logging.getLogger(__name__) +# logger = logging.getLogger(__name__) -class Actions(Enum): - Short = 0 - Long = 1 - Neutral = 2 +# class Actions(Enum): +# Short = 0 +# Long = 1 +# Neutral = 2 -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 +# class Positions(Enum): +# Short = 0 +# Long = 1 +# Neutral = 0.5 - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long +# def opposite(self): +# return Positions.Short if self == Positions.Long else Positions.Long -def mean_over_std(x): - std = np.std(x, ddof=1) - mean = np.mean(x) - return mean / std if std > 0 else 0 +# def mean_over_std(x): +# std = np.std(x, ddof=1) +# mean = np.mean(x) +# return mean / std if std > 0 else 0 -class Base3ActionRLEnv(gym.Env): +# class Base3ActionRLEnv(gym.Env): - metadata = {'render.modes': ['human']} +# metadata = {'render.modes': ['human']} - def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), - reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1): - assert df.ndim == 2 +# def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), +# reward_kwargs: dict = {}, window_size=10, starting_point=True, +# id: str = 'baseenv-1', seed: int = 1): +# assert df.ndim == 2 - self.id = id - self.seed(seed) - self.reset_env(df, prices, window_size, reward_kwargs, starting_point) +# self.id = id +# self.seed(seed) +# self.reset_env(df, prices, window_size, reward_kwargs, starting_point) - def reset_env(self, df, prices, window_size, reward_kwargs, starting_point=True): - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] +# def reset_env(self, df, prices, window_size, reward_kwargs, starting_point=True): +# self.df = df +# self.signal_features = self.df +# self.prices = prices +# self.window_size = window_size +# self.starting_point = starting_point +# self.rr = reward_kwargs["rr"] +# self.profit_aim = reward_kwargs["profit_aim"] - self.fee = 0.0015 +# self.fee = 0.0015 - # # spaces - self.shape = (window_size, self.signal_features.shape[1] + 2) - self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick = self.window_size - self._end_tick = len(self.prices) - 1 - self._done = None - self._current_tick = None - self._last_trade_tick = None - self._position = Positions.Neutral - self._position_history = None - self.total_reward = None - self._total_profit = None - self._first_rendering = None - self.history = None - self.trade_history = [] - - def seed(self, seed: int = 1): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - def reset(self): - - self._done = False - - if self.starting_point is True: - self._position_history = (self._start_tick * [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self._first_rendering = True - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - - return self._get_observation() - - def step(self, action: int): - self._done = False - self._current_tick += 1 - - if self._current_tick == self._end_tick: - self._done = True - - self.update_portfolio_log_returns(action) - - self._update_profit(action) - step_reward = self.calculate_reward(action) - self.total_reward += step_reward - - trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade - # Update position - """ - Action: Neutral, position: Long -> Close Long - Action: Neutral, position: Short -> Close Short - - Action: Long, position: Neutral -> Open Long - Action: Long, position: Short -> Close Short and Open Long - - Action: Short, position: Neutral -> Open Short - Action: Short, position: Long -> Close Long and Open Short - """ - - if action == Actions.Neutral.value: - self._position = Positions.Neutral - trade_type = "neutral" - elif action == Actions.Long.value: - self._position = Positions.Long - trade_type = "long" - elif action == Actions.Short.value: - self._position = Positions.Short - trade_type = "short" - else: - print("case not defined") - - # Update last trade tick - self._last_trade_tick = self._current_tick - - if trade_type is not None: - self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, - 'type': trade_type}) - - if self._total_profit < 0.2: - self._done = True - - self._position_history.append(self._position) - observation = self._get_observation() - info = dict( - tick=self._current_tick, - total_reward=self.total_reward, - total_profit=self._total_profit, - position=self._position.value - ) - self._update_history(info) - - return observation, step_reward, self._done, info - - def _get_observation(self): - features_window = self.signal_features[( - self._current_tick - self.window_size):self._current_tick] - features_and_state = DataFrame(np.zeros((len(features_window), 2)), - columns=['current_profit_pct', 'position'], - index=features_window.index) - - features_and_state['current_profit_pct'] = self.get_unrealized_profit() - features_and_state['position'] = self._position.value - features_and_state = pd.concat([features_window, features_and_state], axis=1) - return features_and_state - - def get_unrealized_profit(self): - - if self._last_trade_tick is None: - return 0. - - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price) / last_trade_price - elif self._position == Positions.Long: - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price) / last_trade_price - else: - return 0. - - def is_tradesignal(self, action: int): - # trade signal - """ - not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short - """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) - or (action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def _is_trade(self, action: Actions): - return ((action == Actions.Long.value and self._position == Positions.Short) or - (action == Actions.Short.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) - ) - - def is_hold(self, action): - return ((action == Actions.Short.value and self._position == Positions.Short) - or (action == Actions.Long.value and self._position == Positions.Long)) - - def add_buy_fee(self, price): - return price * (1 + self.fee) - - def add_sell_fee(self, price): - return price / (1 + self.fee) - - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} - - for key, value in info.items(): - self.history[key].append(value) - - def get_sharpe_ratio(self): - return mean_over_std(self.get_portfolio_log_returns()) - - def calculate_reward(self, action): - - if self._last_trade_tick is None: - return 0. - - # close long - if (action == Actions.Short.value or - action == Actions.Neutral.value) and self._position == Positions.Long: - last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(current_price) - np.log(last_trade_price)) - - # close short - if (action == Actions.Long.value or - action == Actions.Neutral.value) and self._position == Positions.Short: - last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) - current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) - return float(np.log(last_trade_price) - np.log(current_price)) - - return 0. - - def _update_profit(self, action): - if self._is_trade(action) or self._done: - pnl = self.get_unrealized_profit() - - if self._position == Positions.Long: - self._total_profit = self._total_profit + self._total_profit * pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - if self._position == Positions.Short: - self._total_profit = self._total_profit + self._total_profit * pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - def most_recent_return(self, action: int): - """ - We support Long, Neutral and Short positions. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Short.value or action == Actions.Neutral.value: - current_price = self.add_sell_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_buy_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - if action == Actions.Long.value or action == Actions.Neutral.value: - current_price = self.add_buy_fee(current_price) - - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_sell_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - - def current_price(self) -> float: - return self.prices.iloc[self._current_tick].open +# # # spaces +# self.shape = (window_size, self.signal_features.shape[1] + 2) +# self.action_space = spaces.Discrete(len(Actions)) +# self.observation_space = spaces.Box( +# low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + +# # episode +# self._start_tick = self.window_size +# self._end_tick = len(self.prices) - 1 +# self._done = None +# self._current_tick = None +# self._last_trade_tick = None +# self._position = Positions.Neutral +# self._position_history = None +# self.total_reward = None +# self._total_profit = None +# self._first_rendering = None +# self.history = None +# self.trade_history = [] + +# def seed(self, seed: int = 1): +# self.np_random, seed = seeding.np_random(seed) +# return [seed] + +# def reset(self): + +# self._done = False + +# if self.starting_point is True: +# self._position_history = (self._start_tick * [None]) + [self._position] +# else: +# self._position_history = (self.window_size * [None]) + [self._position] + +# self._current_tick = self._start_tick +# self._last_trade_tick = None +# self._position = Positions.Neutral + +# self.total_reward = 0. +# self._total_profit = 1. # unit +# self._first_rendering = True +# self.history = {} +# self.trade_history = [] +# self.portfolio_log_returns = np.zeros(len(self.prices)) + +# self._profits = [(self._start_tick, 1)] +# self.close_trade_profit = [] + +# return self._get_observation() + +# def step(self, action: int): +# self._done = False +# self._current_tick += 1 + +# if self._current_tick == self._end_tick: +# self._done = True + +# self.update_portfolio_log_returns(action) + +# self._update_profit(action) +# step_reward = self.calculate_reward(action) +# self.total_reward += step_reward + +# trade_type = None +# if self.is_tradesignal(action): # exclude 3 case not trade +# # Update position +# """ +# Action: Neutral, position: Long -> Close Long +# Action: Neutral, position: Short -> Close Short + +# Action: Long, position: Neutral -> Open Long +# Action: Long, position: Short -> Close Short and Open Long + +# Action: Short, position: Neutral -> Open Short +# Action: Short, position: Long -> Close Long and Open Short +# """ + +# if action == Actions.Neutral.value: +# self._position = Positions.Neutral +# trade_type = "neutral" +# elif action == Actions.Long.value: +# self._position = Positions.Long +# trade_type = "long" +# elif action == Actions.Short.value: +# self._position = Positions.Short +# trade_type = "short" +# else: +# print("case not defined") + +# # Update last trade tick +# self._last_trade_tick = self._current_tick + +# if trade_type is not None: +# self.trade_history.append( +# {'price': self.current_price(), 'index': self._current_tick, +# 'type': trade_type}) + +# if self._total_profit < 0.2: +# self._done = True + +# self._position_history.append(self._position) +# observation = self._get_observation() +# info = dict( +# tick=self._current_tick, +# total_reward=self.total_reward, +# total_profit=self._total_profit, +# position=self._position.value +# ) +# self._update_history(info) + +# return observation, step_reward, self._done, info + +# def _get_observation(self): +# features_window = self.signal_features[( +# self._current_tick - self.window_size):self._current_tick] +# features_and_state = DataFrame(np.zeros((len(features_window), 2)), +# columns=['current_profit_pct', 'position'], +# index=features_window.index) + +# features_and_state['current_profit_pct'] = self.get_unrealized_profit() +# features_and_state['position'] = self._position.value +# features_and_state = pd.concat([features_window, features_and_state], axis=1) +# return features_and_state + +# def get_unrealized_profit(self): + +# if self._last_trade_tick is None: +# return 0. + +# if self._position == Positions.Neutral: +# return 0. +# elif self._position == Positions.Short: +# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) +# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) +# return (last_trade_price - current_price) / last_trade_price +# elif self._position == Positions.Long: +# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) +# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) +# return (current_price - last_trade_price) / last_trade_price +# else: +# return 0. + +# def is_tradesignal(self, action: int): +# # trade signal +# """ +# not trade signal is : +# Action: Neutral, position: Neutral -> Nothing +# Action: Long, position: Long -> Hold Long +# Action: Short, position: Short -> Hold Short +# """ +# return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) +# or (action == Actions.Short.value and self._position == Positions.Short) +# or (action == Actions.Long.value and self._position == Positions.Long)) + +# def _is_trade(self, action: Actions): +# return ((action == Actions.Long.value and self._position == Positions.Short) or +# (action == Actions.Short.value and self._position == Positions.Long) or +# (action == Actions.Neutral.value and self._position == Positions.Long) or +# (action == Actions.Neutral.value and self._position == Positions.Short) +# ) + +# def is_hold(self, action): +# return ((action == Actions.Short.value and self._position == Positions.Short) +# or (action == Actions.Long.value and self._position == Positions.Long)) + +# def add_buy_fee(self, price): +# return price * (1 + self.fee) + +# def add_sell_fee(self, price): +# return price / (1 + self.fee) + +# def _update_history(self, info): +# if not self.history: +# self.history = {key: [] for key in info.keys()} + +# for key, value in info.items(): +# self.history[key].append(value) + +# def get_sharpe_ratio(self): +# return mean_over_std(self.get_portfolio_log_returns()) + +# def calculate_reward(self, action): + +# if self._last_trade_tick is None: +# return 0. + +# # close long +# if (action == Actions.Short.value or +# action == Actions.Neutral.value) and self._position == Positions.Long: +# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) +# return float(np.log(current_price) - np.log(last_trade_price)) + +# # close short +# if (action == Actions.Long.value or +# action == Actions.Neutral.value) and self._position == Positions.Short: +# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) +# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) +# return float(np.log(last_trade_price) - np.log(current_price)) + +# return 0. + +# def _update_profit(self, action): +# if self._is_trade(action) or self._done: +# pnl = self.get_unrealized_profit() + +# if self._position == Positions.Long: +# self._total_profit = self._total_profit + self._total_profit * pnl +# self._profits.append((self._current_tick, self._total_profit)) +# self.close_trade_profit.append(pnl) + +# if self._position == Positions.Short: +# self._total_profit = self._total_profit + self._total_profit * pnl +# self._profits.append((self._current_tick, self._total_profit)) +# self.close_trade_profit.append(pnl) + +# def most_recent_return(self, action: int): +# """ +# We support Long, Neutral and Short positions. +# Return is generated from rising prices in Long +# and falling prices in Short positions. +# The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. +# """ +# # Long positions +# if self._position == Positions.Long: +# current_price = self.prices.iloc[self._current_tick].open +# if action == Actions.Short.value or action == Actions.Neutral.value: +# current_price = self.add_sell_fee(current_price) + +# previous_price = self.prices.iloc[self._current_tick - 1].open + +# if (self._position_history[self._current_tick - 1] == Positions.Short +# or self._position_history[self._current_tick - 1] == Positions.Neutral): +# previous_price = self.add_buy_fee(previous_price) + +# return np.log(current_price) - np.log(previous_price) + +# # Short positions +# if self._position == Positions.Short: +# current_price = self.prices.iloc[self._current_tick].open +# if action == Actions.Long.value or action == Actions.Neutral.value: +# current_price = self.add_buy_fee(current_price) + +# previous_price = self.prices.iloc[self._current_tick - 1].open +# if (self._position_history[self._current_tick - 1] == Positions.Long +# or self._position_history[self._current_tick - 1] == Positions.Neutral): +# previous_price = self.add_sell_fee(previous_price) + +# return np.log(previous_price) - np.log(current_price) + +# return 0 + +# def get_portfolio_log_returns(self): +# return self.portfolio_log_returns[1:self._current_tick + 1] + +# def update_portfolio_log_returns(self, action): +# self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + +# def current_price(self) -> float: +# return self.prices.iloc[self._current_tick].open - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick - 1].open +# def prev_price(self) -> float: +# return self.prices.iloc[self._current_tick - 1].open - def sharpe_ratio(self) -> float: - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward +# def sharpe_ratio(self) -> float: +# if len(self.close_trade_profit) == 0: +# return 0. +# returns = np.array(self.close_trade_profit) +# reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) +# return reward diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index a14111495..64d7061fc 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -1,6 +1,6 @@ import logging from enum import Enum -# from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union +from typing import Optional import gym import numpy as np @@ -44,14 +44,14 @@ class Base5ActionRLEnv(gym.Env): def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), reward_kwargs: dict = {}, window_size=10, starting_point=True, id: str = 'baseenv-1', seed: int = 1, config: dict = {}): - assert df.ndim == 2 self.rl_config = config['freqai']['rl_config'] self.id = id self.seed(seed) self.reset_env(df, prices, window_size, reward_kwargs, starting_point) - def reset_env(self, df, prices, window_size, reward_kwargs, starting_point=True): + def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, + reward_kwargs: dict, starting_point=True): self.df = df self.signal_features = self.df self.prices = prices @@ -69,18 +69,18 @@ class Base5ActionRLEnv(gym.Env): low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) # episode - self._start_tick = self.window_size - self._end_tick = len(self.prices) - 1 - self._done = None - self._current_tick = None - self._last_trade_tick = None + self._start_tick: int = self.window_size + self._end_tick: int = len(self.prices) - 1 + self._done: bool = False + self._current_tick: int = self._start_tick + self._last_trade_tick: Optional[int] = None self._position = Positions.Neutral - self._position_history = None - self.total_reward = None - self._total_profit = None - self._first_rendering = None - self.history = None - self.trade_history = [] + self._position_history: list = [None] + self.total_reward: float = 0 + self._total_profit: float = 0 + self._first_rendering: bool = False + self.history: dict = {} + self.trade_history: list = [] def seed(self, seed: int = 1): self.np_random, seed = seeding.np_random(seed) @@ -125,8 +125,7 @@ class Base5ActionRLEnv(gym.Env): self.total_reward += step_reward trade_type = None - if self.is_tradesignal(action): # exclude 3 case not trade - # Update position + if self.is_tradesignal(action): """ Action: Neutral, position: Long -> Close Long Action: Neutral, position: Short -> Close Short @@ -223,9 +222,8 @@ class Base5ActionRLEnv(gym.Env): # trade signal """ not trade signal is : - Action: Neutral, position: Neutral -> Nothing - Action: Long, position: Long -> Hold Long - Action: Short, position: Short -> Hold Short + Determine if the signal is non sensical + e.g.: agent wants a Actions.Long_exit while it is in a Positions.short """ return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or (action == Actions.Neutral.value and self._position == Positions.Short) or @@ -292,7 +290,7 @@ class Base5ActionRLEnv(gym.Env): def most_recent_return(self, action: int): """ - We support Long, Neutral and Short positions. + Calculate the tick to tick return if in a trade. Return is generated from rising prices in Long and falling prices in Short positions. The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a9a1377a8..6660709bd 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -19,6 +19,7 @@ from typing import Callable from datetime import datetime, timezone from stable_baselines3.common.utils import set_random_seed import gym +from pathlib import Path logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') @@ -40,6 +41,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_env: Base5ActionRLEnv = None self.eval_callback: EvalCallback = None self.model_type = self.freqai_info['rl_config']['model_type'] + self.rl_config = self.freqai_info['rl_config'] + self.continual_retraining = self.rl_config['continual_retraining'] if self.model_type in SB3_MODELS: import_str = 'stable_baselines3' elif self.model_type in SB3_CONTRIB_MODELS: @@ -68,7 +71,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): logger.info("--------------------Starting training " f"{pair} --------------------") - # filter the features requested by user in the configuration file and elegantly handle NaNs features_filtered, labels_filtered = dk.filter_features( unfiltered_dataframe, dk.training_features_list, @@ -78,19 +80,19 @@ class BaseReinforcementLearningModel(IFreqaiModel): data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( features_filtered, labels_filtered) - dk.fit_labels() # useless for now, but just satiating append methods + dk.fit_labels() # FIXME useless for now, but just satiating append methods # normalize all data based on train_dataset only prices_train, prices_test = self.build_ohlc_price_dataframes(dk.data_dictionary, pair, dk) data_dictionary = dk.normalize_data(data_dictionary) - # optional additional data cleaning/analysis + # data cleaning/analysis self.data_cleaning_train(dk) logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' + f' features and {len(data_dictionary["train_features"])} data points' ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test, dk) @@ -100,9 +102,11 @@ class BaseReinforcementLearningModel(IFreqaiModel): return model - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test, dk): + def set_train_and_eval_environments(self, data_dictionary: Dict[str, DataFrame], + prices_train: DataFrame, prices_test: DataFrame, + dk: FreqaiDataKitchen): """ - User overrides this as shown here if they are using a custom MyRLEnv + User can override this if they are using a custom MyRLEnv """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] @@ -114,18 +118,22 @@ class BaseReinforcementLearningModel(IFreqaiModel): reward_kwargs=self.reward_params, config=self.config) self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config), ".") + reward_kwargs=self.reward_params, config=self.config), + str(Path(dk.data_path / 'monitor'))) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=eval_freq, - best_model_save_path=dk.data_path) + best_model_save_path=str(dk.data_path)) else: self.train_env.reset() self.eval_env.reset() self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) self.eval_env.reset_env(test_df, prices_test, self.CONV_WIDTH, self.reward_params) + # self.eval_callback.eval_env = self.eval_env + # self.eval_callback.best_model_save_path = str(dk.data_path) + # self.eval_callback._init_callback() self.eval_callback.__init__(self.eval_env, deterministic=True, render=False, eval_freq=eval_freq, - best_model_save_path=dk.data_path) + best_model_save_path=str(dk.data_path)) @abstractmethod def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): @@ -137,19 +145,20 @@ class BaseReinforcementLearningModel(IFreqaiModel): return - def get_state_info(self, pair): + def get_state_info(self, pair: str): open_trades = Trade.get_trades_proxy(is_open=True) market_side = 0.5 - current_profit = 0 + current_profit: float = 0 trade_duration = 0 for trade in open_trades: if trade.pair == pair: + # FIXME: mypy typing doesnt like that strategy may be "None" (it never will be) current_value = self.strategy.dp._exchange.get_rate( pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate now = datetime.now(timezone.utc).timestamp() - trade_duration = (now - trade.open_date.timestamp()) / self.base_tf_seconds - if 'long' in trade.enter_tag: + trade_duration = int((now - trade.open_date.timestamp()) / self.base_tf_seconds) + if 'long' in str(trade.enter_tag): market_side = 1 current_profit = (current_value - openrate) / openrate else: @@ -245,8 +254,9 @@ class BaseReinforcementLearningModel(IFreqaiModel): return -def make_env(env_id: str, rank: int, seed: int, train_df, price, - reward_params, window_size, monitor=False, config={}) -> Callable: +def make_env(env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, + reward_params: Dict[str, int], window_size: int, monitor: bool = False, + config: Dict[str, Any] = {}) -> Callable: """ Utility function for multiprocessed env. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearnerCustomAgent.py b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py similarity index 95% rename from freqtrade/freqai/prediction_models/ReinforcementLearnerCustomAgent.py rename to freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py index bb16b612b..fcd813ce6 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearnerCustomAgent.py +++ b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py @@ -22,6 +22,12 @@ class ReinforcementLearnerCustomAgent(BaseReinforcementLearningModel): """ User can customize agent by defining the class and using it directly. Here the example is "TDQN" + + Warning! + This is an advanced example of how a user may create and use a highly + customized model class (which can inherit from existing classes, + similar to how the example below inherits from DQN). + This file is for example purposes only, and should not be run. """ def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): @@ -34,7 +40,7 @@ class ReinforcementLearnerCustomAgent(BaseReinforcementLearningModel): # TDQN is a custom agent defined below model = TDQN(self.policy_type, self.train_env, - tensorboard_log=Path(dk.data_path / "tensorboard"), + tensorboard_log=str(Path(dk.data_path / "tensorboard")), policy_kwargs=policy_kwargs, **self.freqai_info['model_training_parameters'] ) @@ -217,7 +223,7 @@ class TDQN(DQN): exploration_initial_eps: float = 1.0, exploration_final_eps: float = 0.05, max_grad_norm: float = 10, - tensorboard_log: Optional[Path] = None, + tensorboard_log: Optional[str] = None, create_eval_env: bool = False, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 1, diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index c37973551..ae3e92f5e 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -485,6 +485,10 @@ class FreqaiDataDrawer: f"Unable to load model, ensure model exists at " f"{dk.data_path} " ) + # load it into ram if it was loaded from disk + if coin not in self.model_dictionary: + self.model_dictionary[coin] = model + if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]: dk.pca = cloudpickle.load( open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "rb") diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index 437b53b05..15a263b94 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -76,7 +76,8 @@ class ReinforcementLearningExample5ac(IStrategy): informative[f"%-{coin}pct-change"] = informative["close"].pct_change() informative[f"%-{coin}raw_volume"] = informative["volume"] - # The following features are necessary for RL models + # FIXME: add these outside the user strategy? + # The following columns are necessary for RL models. informative[f"%-{coin}raw_close"] = informative["close"] informative[f"%-{coin}raw_open"] = informative["open"] informative[f"%-{coin}raw_high"] = informative["high"] diff --git a/freqtrade/freqai/prediction_models/BaseClassifierModel.py b/freqtrade/freqai/prediction_models/BaseClassifierModel.py index 2edbf3b51..042f43199 100644 --- a/freqtrade/freqai/prediction_models/BaseClassifierModel.py +++ b/freqtrade/freqai/prediction_models/BaseClassifierModel.py @@ -57,9 +57,9 @@ class BaseClassifierModel(IFreqaiModel): self.data_cleaning_train(dk) logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' + f' features and {len(data_dictionary["train_features"])} data points' ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') model = self.fit(data_dictionary) diff --git a/freqtrade/freqai/prediction_models/BaseRegressionModel.py b/freqtrade/freqai/prediction_models/BaseRegressionModel.py index 2ef175a2e..6ca9ae8cb 100644 --- a/freqtrade/freqai/prediction_models/BaseRegressionModel.py +++ b/freqtrade/freqai/prediction_models/BaseRegressionModel.py @@ -56,9 +56,9 @@ class BaseRegressionModel(IFreqaiModel): self.data_cleaning_train(dk) logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' + f' features and {len(data_dictionary["train_features"])} data points' ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') model = self.fit(data_dictionary) diff --git a/freqtrade/freqai/prediction_models/BaseTensorFlowModel.py b/freqtrade/freqai/prediction_models/BaseTensorFlowModel.py index 04eff045f..6a842f007 100644 --- a/freqtrade/freqai/prediction_models/BaseTensorFlowModel.py +++ b/freqtrade/freqai/prediction_models/BaseTensorFlowModel.py @@ -53,9 +53,9 @@ class BaseTensorFlowModel(IFreqaiModel): self.data_cleaning_train(dk) logger.info( - f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features" + f'Training model on {len(dk.data_dictionary["train_features"].columns)}' + f' features and {len(data_dictionary["train_features"])} data points' ) - logger.info(f'Training model on {len(data_dictionary["train_features"])} data points') model = self.fit(data_dictionary) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index d3e6bde7c..254fd32b0 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -1,7 +1,6 @@ import logging -from typing import Any, Dict # , Tuple +from typing import Any, Dict -# import numpy.typing as npt import torch as th from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions @@ -22,12 +21,18 @@ class ReinforcementLearner(BaseReinforcementLearningModel): total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) + net_arch=[512, 512, 256]) - model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path(dk.data_path / "tensorboard"), - **self.freqai_info['model_training_parameters'] - ) + if dk.pair not in self.dd.model_dictionary or not self.continual_retraining: + model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, + tensorboard_log=Path(dk.data_path / "tensorboard"), + **self.freqai_info['model_training_parameters'] + ) + else: + logger.info('Continual training activated - starting training from previously ' + 'trained agent.') + model = self.dd.model_dictionary[dk.pair] + model.set_env(self.train_env) model.learn( total_timesteps=int(total_timesteps), From bd870e233128d655ac89a091c1aa6a8b2196c0d7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 24 Aug 2022 16:32:14 +0200 Subject: [PATCH 055/232] fix monitor bug, set default values in case user doesnt set params --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 4 ++-- freqtrade/freqai/prediction_models/ReinforcementLearner.py | 3 ++- .../prediction_models/ReinforcementLearner_multiproc.py | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 6660709bd..1bc3505e1 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -42,7 +42,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_callback: EvalCallback = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] - self.continual_retraining = self.rl_config['continual_retraining'] + self.continual_retraining = self.rl_config.get('continual_retraining', False) if self.model_type in SB3_MODELS: import_str = 'stable_baselines3' elif self.model_type in SB3_CONTRIB_MODELS: @@ -289,7 +289,7 @@ class MyRLEnv(Base5ActionRLEnv): return 0. pnl = self.get_unrealized_profit() - max_trade_duration = self.rl_config['max_trade_duration_candles'] + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 100) trade_duration = self._current_tick - self._last_trade_tick factor = 1 diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 254fd32b0..f7f016ab4 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -32,6 +32,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): logger.info('Continual training activated - starting training from previously ' 'trained agent.') model = self.dd.model_dictionary[dk.pair] + model.tensorboard_log = Path(dk.data_path / "tensorboard") model.set_env(self.train_env) model.learn( @@ -61,7 +62,7 @@ class MyRLEnv(Base5ActionRLEnv): return 0. pnl = self.get_unrealized_profit() - max_trade_duration = self.rl_config['max_trade_duration_candles'] + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 100) trade_duration = self._current_tick - self._last_trade_tick factor = 1 diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 17281e2d0..3a4c245aa 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -26,10 +26,10 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[512, 512, 512]) + net_arch=[512, 512, 256]) model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path(dk.data_path / "tensorboard"), + tensorboard_log=Path(dk.full_path / "tensorboard"), **self.freqai_info['model_training_parameters'] ) From a61821e1c6803ca82951e3e03df9fcb8cecbcc99 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 24 Aug 2022 16:33:13 +0200 Subject: [PATCH 056/232] remove monitor log --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 1bc3505e1..0f0120365 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -118,8 +118,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): reward_kwargs=self.reward_params, config=self.config) self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config), - str(Path(dk.data_path / 'monitor'))) + reward_kwargs=self.reward_params, config=self.config)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=eval_freq, best_model_save_path=str(dk.data_path)) From d1bee29b1e5b01eb3465deea1b64968660e42b82 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 24 Aug 2022 18:32:40 +0200 Subject: [PATCH 057/232] improve default reward, fix bugs in environment --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 40 ++++++++---- .../RL/BaseReinforcementLearningModel.py | 61 +++++++++---------- .../prediction_models/ReinforcementLearner.py | 54 +++++++++++++--- 3 files changed, 102 insertions(+), 53 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 64d7061fc..9f7c52c9c 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -140,30 +140,32 @@ class Base5ActionRLEnv(gym.Env): if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" + self._last_trade_tick = None elif action == Actions.Long_enter.value: self._position = Positions.Long trade_type = "long" + self._last_trade_tick = self._current_tick elif action == Actions.Short_enter.value: self._position = Positions.Short trade_type = "short" + self._last_trade_tick = self._current_tick elif action == Actions.Long_exit.value: self._position = Positions.Neutral trade_type = "neutral" + self._last_trade_tick = None elif action == Actions.Short_exit.value: self._position = Positions.Neutral trade_type = "neutral" + self._last_trade_tick = None else: print("case not defined") - # Update last trade tick - self._last_trade_tick = self._current_tick - if trade_type is not None: self.trade_history.append( {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - if self._total_profit < 0.2: + if self._total_profit < 0.5: self._done = True self._position_history.append(self._position) @@ -221,8 +223,7 @@ class Base5ActionRLEnv(gym.Env): def is_tradesignal(self, action: int): # trade signal """ - not trade signal is : - Determine if the signal is non sensical + Determine if the signal is a trade signal e.g.: agent wants a Actions.Long_exit while it is in a Positions.short """ return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or @@ -237,6 +238,24 @@ class Base5ActionRLEnv(gym.Env): (action == Actions.Long_exit.value and self._position == Positions.Short) or (action == Actions.Long_exit.value and self._position == Positions.Neutral)) + def _is_valid(self, action: int): + # trade signal + """ + Determine if the signal is valid. + e.g.: agent wants a Actions.Long_exit while it is in a Positions.short + """ + # Agent should only try to exit if it is in position + if action in (Actions.Short_exit.value, Actions.Long_exit.value): + if self._position not in (Positions.Short, Positions.Long): + return False + + # Agent should only try to enter if it is not in position + if action in (Actions.Short_enter.value, Actions.Long_enter.value): + if self._position != Positions.Neutral: + return False + + return True + def _is_trade(self, action: Actions): return ((action == Actions.Long_enter.value and self._position == Positions.Neutral) or (action == Actions.Short_enter.value and self._position == Positions.Neutral)) @@ -278,13 +297,8 @@ class Base5ActionRLEnv(gym.Env): if self._is_trade(action) or self._done: pnl = self.get_unrealized_profit() - if self._position == Positions.Long: - self._total_profit = self._total_profit + self._total_profit * pnl - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - if self._position == Positions.Short: - self._total_profit = self._total_profit + self._total_profit * pnl + if self._position in (Positions.Long, Positions.Short): + self._total_profit *= (1 + pnl) self._profits.append((self._current_tick, self._total_profit)) self.close_trade_profit.append(pnl) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 0f0120365..84d19f269 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -19,7 +19,6 @@ from typing import Callable from datetime import datetime, timezone from stable_baselines3.common.utils import set_random_seed import gym -from pathlib import Path logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') @@ -112,27 +111,14 @@ class BaseReinforcementLearningModel(IFreqaiModel): test_df = data_dictionary["test_features"] eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) - # environments - if not self.train_env: - self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config) - self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config)) - self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, - best_model_save_path=str(dk.data_path)) - else: - self.train_env.reset() - self.eval_env.reset() - self.train_env.reset_env(train_df, prices_train, self.CONV_WIDTH, self.reward_params) - self.eval_env.reset_env(test_df, prices_test, self.CONV_WIDTH, self.reward_params) - # self.eval_callback.eval_env = self.eval_env - # self.eval_callback.best_model_save_path = str(dk.data_path) - # self.eval_callback._init_callback() - self.eval_callback.__init__(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, - best_model_save_path=str(dk.data_path)) + self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, config=self.config) + self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, config=self.config)) + self.eval_callback = EvalCallback(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=str(dk.data_path)) @abstractmethod def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): @@ -284,30 +270,43 @@ class MyRLEnv(Base5ActionRLEnv): def calculate_reward(self, action): - if self._last_trade_tick is None: - return 0. + # first, penalize if the action is not valid + if not self._is_valid(action): + return -15 pnl = self.get_unrealized_profit() - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 100) + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value): + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -15 + + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) trade_duration = self._current_tick - self._last_trade_tick - factor = 1 if trade_duration <= max_trade_duration: factor *= 1.5 elif trade_duration > max_trade_duration: factor *= 0.5 + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long): + return -50 * trade_duration / max_trade_duration + # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(pnl * factor) + return float(rew * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - factor = 1 - if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(pnl * factor) + return float(rew * factor) return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index f7f016ab4..2d1cafab5 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -6,6 +6,10 @@ from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from pathlib import Path +from pandas import DataFrame +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.monitor import Monitor +import numpy as np logger = logging.getLogger(__name__) @@ -49,6 +53,25 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return model + def set_train_and_eval_environments(self, data_dictionary: Dict[str, DataFrame], + prices_train: DataFrame, prices_test: DataFrame, + dk: FreqaiDataKitchen): + """ + User can override this if they are using a custom MyRLEnv + """ + train_df = data_dictionary["train_features"] + test_df = data_dictionary["test_features"] + eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) + + self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, config=self.config) + self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, config=self.config)) + self.eval_callback = EvalCallback(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=str(dk.data_path)) + class MyRLEnv(Base5ActionRLEnv): """ @@ -58,30 +81,43 @@ class MyRLEnv(Base5ActionRLEnv): def calculate_reward(self, action): - if self._last_trade_tick is None: - return 0. + # first, penalize if the action is not valid + if not self._is_valid(action): + return -15 pnl = self.get_unrealized_profit() - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 100) + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value): + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -15 + + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) trade_duration = self._current_tick - self._last_trade_tick - factor = 1 if trade_duration <= max_trade_duration: factor *= 1.5 elif trade_duration > max_trade_duration: factor *= 0.5 + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long): + return -50 * trade_duration / max_trade_duration + # close long if action == Actions.Long_exit.value and self._position == Positions.Long: - if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(pnl * factor) + return float(rew * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: - factor = 1 - if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr: + if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(pnl * factor) + return float(rew * factor) return 0. From 94cfc8e63febe0590bae324f932cde390fc3a7a2 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 25 Aug 2022 11:46:18 +0200 Subject: [PATCH 058/232] fix multiproc callback, add continual learning to multiproc, fix totalprofit bug in env, set eval_freq automatically, improve default reward --- config_examples/config_freqai-rl.example.json | 14 ++--- freqtrade/freqai/RL/Base3ActionRLEnv.py | 2 + freqtrade/freqai/RL/Base5ActionRLEnv.py | 7 +-- .../RL/BaseReinforcementLearningModel.py | 24 ++++---- .../prediction_models/ReinforcementLearner.py | 16 +++--- .../ReinforcementLearner_multiproc.py | 57 +++++++++---------- 6 files changed, 58 insertions(+), 62 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index b3f8737be..e8852a0cf 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -56,9 +56,9 @@ "freqai": { "enabled": true, "model_save_type": "stable_baselines", - "conv_width": 10, + "conv_width": 4, "purge_old_models": true, - "train_period_days": 10, + "train_period_days": 5, "backtest_period_days": 2, "identifier": "unique-id", "data_kitchen_thread_count": 2, @@ -72,7 +72,7 @@ "30m" ], "indicator_max_period_candles": 10, - "indicator_periods_candles": [5, 10] + "indicator_periods_candles": [5] }, "data_split_parameters": { "test_size": 0.5, @@ -85,13 +85,13 @@ "verbose": 1 }, "rl_config": { - "train_cycles": 3, - "eval_cycles": 3, + "train_cycles": 6, "thread_count": 4, - "max_trade_duration_candles": 100, + "max_trade_duration_candles": 300, "model_type": "PPO", "policy_type": "MlpPolicy", - "continual_retraining": true, + "continual_learning": false, + "max_training_drawdown_pct": 0.5, "model_reward_parameters": { "rr": 1, "profit_aim": 0.02, diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index cddd2f6f9..fe51d3b13 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -1,3 +1,5 @@ +# Example of a 3 action environment. + # import logging # from enum import Enum diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 9f7c52c9c..b93d6e6ff 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -77,8 +77,7 @@ class Base5ActionRLEnv(gym.Env): self._position = Positions.Neutral self._position_history: list = [None] self.total_reward: float = 0 - self._total_profit: float = 0 - self._first_rendering: bool = False + self._total_profit: float = 1 self.history: dict = {} self.trade_history: list = [] @@ -101,7 +100,6 @@ class Base5ActionRLEnv(gym.Env): self.total_reward = 0. self._total_profit = 1. # unit - self._first_rendering = True self.history = {} self.trade_history = [] self.portfolio_log_returns = np.zeros(len(self.prices)) @@ -165,7 +163,7 @@ class Base5ActionRLEnv(gym.Env): {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - if self._total_profit < 0.5: + if self._total_profit < 1 - self.rl_config.get('max_training_drawdown_pct', 0.8): self._done = True self._position_history.append(self._position) @@ -293,7 +291,6 @@ class Base5ActionRLEnv(gym.Env): return 0. def _update_profit(self, action): - # if self._is_trade(action) or self._done: if self._is_trade(action) or self._done: pnl = self.get_unrealized_profit() diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 84d19f269..7a524ba87 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -23,8 +23,8 @@ logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') -SB3_MODELS = ['PPO', 'A2C', 'DQN', 'TD3', 'SAC'] -SB3_CONTRIB_MODELS = ['TRPO', 'ARS'] +SB3_MODELS = ['PPO', 'A2C', 'DQN'] +SB3_CONTRIB_MODELS = ['TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO'] class BaseReinforcementLearningModel(IFreqaiModel): @@ -41,7 +41,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_callback: EvalCallback = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] - self.continual_retraining = self.rl_config.get('continual_retraining', False) + self.continual_learning = self.rl_config.get('continual_learning', False) if self.model_type in SB3_MODELS: import_str = 'stable_baselines3' elif self.model_type in SB3_CONTRIB_MODELS: @@ -109,7 +109,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config) @@ -117,7 +116,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, + render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) @abstractmethod @@ -138,6 +137,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): for trade in open_trades: if trade.pair == pair: # FIXME: mypy typing doesnt like that strategy may be "None" (it never will be) + # FIXME: get_rate and trade_udration shouldn't work with backtesting, + # we need to use candle dates and prices to compute that. current_value = self.strategy.dp._exchange.get_rate( pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate @@ -256,7 +257,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df: DataFrame, price: Data env = MyRLEnv(df=train_df, prices=price, window_size=window_size, reward_kwargs=reward_params, id=env_id, seed=seed + rank, config=config) if monitor: - env = Monitor(env, ".") + env = Monitor(env) return env set_random_seed(seed) return _init @@ -272,18 +273,19 @@ class MyRLEnv(Base5ActionRLEnv): # first, penalize if the action is not valid if not self._is_valid(action): - return -15 + return -2 pnl = self.get_unrealized_profit() rew = np.sign(pnl) * (pnl + 1) factor = 100 # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value): + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: - return -15 + return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) trade_duration = self._current_tick - self._last_trade_tick @@ -294,8 +296,8 @@ class MyRLEnv(Base5ActionRLEnv): factor *= 0.5 # discourage sitting in position - if self._position in (Positions.Short, Positions.Long): - return -50 * trade_duration / max_trade_duration + if self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration # close long if action == Actions.Long_exit.value and self._position == Positions.Long: diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 2d1cafab5..36cc821e4 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -27,7 +27,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[512, 512, 256]) - if dk.pair not in self.dd.model_dictionary or not self.continual_retraining: + if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, tensorboard_log=Path(dk.data_path / "tensorboard"), **self.freqai_info['model_training_parameters'] @@ -61,7 +61,6 @@ class ReinforcementLearner(BaseReinforcementLearningModel): """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config) @@ -69,7 +68,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, + render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) @@ -83,18 +82,19 @@ class MyRLEnv(Base5ActionRLEnv): # first, penalize if the action is not valid if not self._is_valid(action): - return -15 + return -2 pnl = self.get_unrealized_profit() rew = np.sign(pnl) * (pnl + 1) factor = 100 # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value): + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: - return -15 + return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) trade_duration = self._current_tick - self._last_trade_tick @@ -105,8 +105,8 @@ class MyRLEnv(Base5ActionRLEnv): factor *= 0.5 # discourage sitting in position - if self._position in (Positions.Short, Positions.Long): - return -50 * trade_duration / max_trade_duration + if self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration # close long if action == Actions.Long_exit.value and self._position == Positions.Long: diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 3a4c245aa..7e8141b23 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -26,12 +26,19 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[512, 512, 256]) + net_arch=[256, 256]) - model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path(dk.full_path / "tensorboard"), - **self.freqai_info['model_training_parameters'] - ) + if dk.pair not in self.dd.model_dictionary or not self.continual_learning: + model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, + tensorboard_log=Path(dk.full_path / "tensorboard"), + **self.freqai_info['model_training_parameters'] + ) + else: + logger.info('Continual training activated - starting training from previously ' + 'trained agent.') + model = self.dd.model_dictionary[dk.pair] + model.tensorboard_log = Path(dk.data_path / "tensorboard") + model.set_env(self.train_env) model.learn( total_timesteps=int(total_timesteps), @@ -57,30 +64,18 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): test_df = data_dictionary["test_features"] eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) - # environments - if not self.train_env: - env_id = "train_env" - num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2) - self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH, - config=self.config) for i - in range(num_cpu)]) + env_id = "train_env" + num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2) + self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.reward_params, self.CONV_WIDTH, + config=self.config) for i + in range(num_cpu)]) - eval_env_id = 'eval_env' - self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config) for i - in range(num_cpu)]) - self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, - best_model_save_path=dk.data_path) - else: - self.train_env.env_method('reset') - self.eval_env.env_method('reset') - self.train_env.env_method('reset_env', train_df, prices_train, - self.CONV_WIDTH, self.reward_params) - self.eval_env.env_method('reset_env', train_df, prices_train, - self.CONV_WIDTH, self.reward_params) - self.eval_callback.__init__(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, - best_model_save_path=dk.data_path) + eval_env_id = 'eval_env' + self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.reward_params, self.CONV_WIDTH, monitor=True, + config=self.config) for i + in range(num_cpu)]) + self.eval_callback = EvalCallback(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=dk.data_path) From 05ccebf9a16ac2059f7f0bbdde7f4f1e4bd0bcb4 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 25 Aug 2022 12:29:48 +0200 Subject: [PATCH 059/232] automate eval freq in multiproc --- .../freqai/prediction_models/ReinforcementLearner_multiproc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 7e8141b23..18a843b6d 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -62,7 +62,6 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df) env_id = "train_env" num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2) @@ -77,5 +76,5 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): config=self.config) for i in range(num_cpu)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, + render=False, eval_freq=len(train_df), best_model_save_path=dk.data_path) From 3199eb453b2a855ae4949fefe586583e941b3235 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 25 Aug 2022 19:05:51 +0200 Subject: [PATCH 060/232] reduce code for base use-case, ensure multiproc inherits custom env, add ability to limit ram use. --- config_examples/config_freqai-rl.example.json | 1 + .../RL/BaseReinforcementLearningModel.py | 129 ++++++++++-------- freqtrade/freqai/data_drawer.py | 9 +- .../prediction_models/ReinforcementLearner.py | 102 ++++++-------- .../ReinforcementLearner_multiproc.py | 7 +- 5 files changed, 125 insertions(+), 123 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index e8852a0cf..dc7c62e4a 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -58,6 +58,7 @@ "model_save_type": "stable_baselines", "conv_width": 4, "purge_old_models": true, + "limit_ram_usage": false, "train_period_days": 5, "backtest_period_days": 2, "identifier": "unique-id", diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 7a524ba87..5a7ae4372 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -19,6 +19,7 @@ from typing import Callable from datetime import datetime, timezone from stable_baselines3.common.utils import set_random_seed import gym +from pathlib import Path logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') @@ -110,9 +111,9 @@ class BaseReinforcementLearningModel(IFreqaiModel): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config) - self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, + self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, config=self.config) + self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, @@ -126,7 +127,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): go in here. Abstract method, so this function must be overridden by user class. """ - return def get_state_info(self, pair: str): @@ -232,6 +232,72 @@ class BaseReinforcementLearningModel(IFreqaiModel): return prices_train, prices_test + def load_model_from_disk(self, dk: FreqaiDataKitchen) -> Any: + """ + Can be used by user if they are trying to limit_ram_usage *and* + perform continual learning. + For now, this is unused. + """ + exists = Path(dk.data_path / f"{dk.model_filename}_model").is_file() + if exists: + model = self.MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model") + else: + logger.info('No model file on disk to continue learning from.') + + return model + + # Nested class which can be overridden by user to customize further + class MyRLEnv(Base5ActionRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env. Here the user + sets a custom reward based on profit and trade duration. + """ + + def calculate_reward(self, action): + + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick + + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and \ + action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration + + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + + return 0. + # TODO take care of this appendage. Right now it needs to be called because FreqAI enforces it. # But FreqaiRL needs more objects passed to fit() (like DK) and we dont want to go refactor # all the other existing fit() functions to include dk argument. For now we instantiate and @@ -240,7 +306,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): return -def make_env(env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, +def make_env(MyRLEnv: Base5ActionRLEnv, env_id: str, rank: int, + seed: int, train_df: DataFrame, price: DataFrame, reward_params: Dict[str, int], window_size: int, monitor: bool = False, config: Dict[str, Any] = {}) -> Callable: """ @@ -252,6 +319,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df: DataFrame, price: Data :param rank: (int) index of the subprocess :return: (Callable) """ + def _init() -> gym.Env: env = MyRLEnv(df=train_df, prices=price, window_size=window_size, @@ -261,54 +329,3 @@ def make_env(env_id: str, rank: int, seed: int, train_df: DataFrame, price: Data return env set_random_seed(seed) return _init - - -class MyRLEnv(Base5ActionRLEnv): - """ - User can override any function in BaseRLEnv and gym.Env. Here the user - sets a custom reward based on profit and trade duration. - """ - - def calculate_reward(self, action): - - # first, penalize if the action is not valid - if not self._is_valid(action): - return -2 - - pnl = self.get_unrealized_profit() - rew = np.sign(pnl) * (pnl + 1) - factor = 100 - - # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ - and self._position == Positions.Neutral: - return 25 - # discourage agent from not entering trades - if action == Actions.Neutral.value and self._position == Positions.Neutral: - return -1 - - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) - trade_duration = self._current_tick - self._last_trade_tick - - if trade_duration <= max_trade_duration: - factor *= 1.5 - elif trade_duration > max_trade_duration: - factor *= 0.5 - - # discourage sitting in position - if self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value: - return -1 * trade_duration / max_trade_duration - - # close long - if action == Actions.Long_exit.value and self._position == Positions.Long: - if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) - - # close short - if action == Actions.Short_exit.value and self._position == Positions.Short: - if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) - - return 0. diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index ae3e92f5e..64a5502ad 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -90,6 +90,7 @@ class FreqaiDataDrawer: self.empty_pair_dict: pair_info = { "model_filename": "", "trained_timestamp": 0, "priority": 1, "first": True, "data_path": "", "extras": {}} + self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False) def load_drawer_from_disk(self): """ @@ -423,8 +424,8 @@ class FreqaiDataDrawer: dk.pca, open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "wb") ) - # if self.live: - self.model_dictionary[coin] = model + if not self.limit_ram_use: + self.model_dictionary[coin] = model self.pair_dict[coin]["model_filename"] = dk.model_filename self.pair_dict[coin]["data_path"] = str(dk.data_path) self.save_drawer_to_disk() @@ -464,7 +465,7 @@ class FreqaiDataDrawer: model_type = self.freqai_info.get('model_save_type', 'joblib') # try to access model in memory instead of loading object from disk to save time - if dk.live and coin in self.model_dictionary: + if dk.live and coin in self.model_dictionary and not self.limit_ram_use: model = self.model_dictionary[coin] elif model_type == 'joblib': model = load(dk.data_path / f"{dk.model_filename}_model.joblib") @@ -486,7 +487,7 @@ class FreqaiDataDrawer: ) # load it into ram if it was loaded from disk - if coin not in self.model_dictionary: + if coin not in self.model_dictionary and not self.limit_ram_use: self.model_dictionary[coin] = model if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]: diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 36cc821e4..a72a56e20 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -3,12 +3,12 @@ from typing import Any, Dict import torch as th from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions +from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Positions from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from pathlib import Path -from pandas import DataFrame -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.monitor import Monitor +# from pandas import DataFrame +# from stable_baselines3.common.callbacks import EvalCallback +# from stable_baselines3.common.monitor import Monitor import numpy as np logger = logging.getLogger(__name__) @@ -53,71 +53,53 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return model - def set_train_and_eval_environments(self, data_dictionary: Dict[str, DataFrame], - prices_train: DataFrame, prices_test: DataFrame, - dk: FreqaiDataKitchen): + class MyRLEnv(BaseReinforcementLearningModel.MyRLEnv): """ - User can override this if they are using a custom MyRLEnv + User can override any function in BaseRLEnv and gym.Env. Here the user + sets a custom reward based on profit and trade duration. """ - train_df = data_dictionary["train_features"] - test_df = data_dictionary["test_features"] - self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config) - self.eval_env = Monitor(MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config)) - self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=len(train_df), - best_model_save_path=str(dk.data_path)) + def calculate_reward(self, action): + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 -class MyRLEnv(Base5ActionRLEnv): - """ - User can override any function in BaseRLEnv and gym.Env. Here the user - sets a custom reward based on profit and trade duration. - """ + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100 - def calculate_reward(self, action): + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 - # first, penalize if the action is not valid - if not self._is_valid(action): - return -2 + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick - pnl = self.get_unrealized_profit() - rew = np.sign(pnl) * (pnl + 1) - factor = 100 + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 - # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ - and self._position == Positions.Neutral: - return 25 - # discourage agent from not entering trades - if action == Actions.Neutral.value and self._position == Positions.Neutral: - return -1 + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and \ + action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) - trade_duration = self._current_tick - self._last_trade_tick + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) - if trade_duration <= max_trade_duration: - factor *= 1.5 - elif trade_duration > max_trade_duration: - factor *= 0.5 + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) - # discourage sitting in position - if self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value: - return -1 * trade_duration / max_trade_duration - - # close long - if action == Actions.Long_exit.value and self._position == Positions.Long: - if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) - - # close short - if action == Actions.Short_exit.value and self._position == Positions.Short: - if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) - - return 0. + return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 18a843b6d..f301da981 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -34,7 +34,7 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): **self.freqai_info['model_training_parameters'] ) else: - logger.info('Continual training activated - starting training from previously ' + logger.info('Continual learning activated - starting training from previously ' 'trained agent.') model = self.dd.model_dictionary[dk.pair] model.tensorboard_log = Path(dk.data_path / "tensorboard") @@ -65,13 +65,14 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): env_id = "train_env" num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2) - self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train, + self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, config=self.config) for i in range(num_cpu)]) eval_env_id = 'eval_env' - self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test, + self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, + test_df, prices_test, self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i in range(num_cpu)]) From d31926efdf9d3eafea87aa3b334de4d389c7308f Mon Sep 17 00:00:00 2001 From: richardjozsa Date: Thu, 25 Aug 2022 21:40:16 +0200 Subject: [PATCH 061/232] Added Base4Action --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 346 ++++++++++++++++++++++++ 1 file changed, 346 insertions(+) create mode 100644 freqtrade/freqai/RL/Base4ActionRLEnv.py diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py new file mode 100644 index 000000000..478507639 --- /dev/null +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -0,0 +1,346 @@ +import logging +from enum import Enum +from typing import Optional + +import gym +import numpy as np +from gym import spaces +from gym.utils import seeding +from pandas import DataFrame +import pandas as pd +from abc import abstractmethod +logger = logging.getLogger(__name__) + + +class Actions(Enum): + Neutral = 0 + Exit = 1 + Long_enter = 2 + Short_enter = 3 + + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + + +def mean_over_std(x): + std = np.std(x, ddof=1) + mean = np.mean(x) + return mean / std if std > 0 else 0 + + +class Base4ActionRLEnv(gym.Env): + """ + Base class for a 5 action environment + """ + metadata = {'render.modes': ['human']} + + def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), + reward_kwargs: dict = {}, window_size=10, starting_point=True, + id: str = 'baseenv-1', seed: int = 1, config: dict = {}): + + self.rl_config = config['freqai']['rl_config'] + self.id = id + self.seed(seed) + self.reset_env(df, prices, window_size, reward_kwargs, starting_point) + + def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, + reward_kwargs: dict, starting_point=True): + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee = 0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1] + 3) + self.action_space = spaces.Discrete(len(Actions)) + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick: int = self.window_size + self._end_tick: int = len(self.prices) - 1 + self._done: bool = False + self._current_tick: int = self._start_tick + self._last_trade_tick: Optional[int] = None + self._position = Positions.Neutral + self._position_history: list = [None] + self.total_reward: float = 0 + self._total_profit: float = 1 + self.history: dict = {} + self.trade_history: list = [] + + def seed(self, seed: int = 1): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + + self._done = False + + if self.starting_point is True: + self._position_history = (self._start_tick * [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + + return self._get_observation() + + def step(self, action: int): + self._done = False + self._current_tick += 1 + + if self._current_tick == self._end_tick: + self._done = True + + self.update_portfolio_log_returns(action) + + self._update_profit(action) + step_reward = self.calculate_reward(action) + self.total_reward += step_reward + + trade_type = None + if self.is_tradesignal(action): + """ + Action: Neutral, position: Long -> Close Long + Action: Neutral, position: Short -> Close Short + + Action: Long, position: Neutral -> Open Long + Action: Long, position: Short -> Close Short and Open Long + + Action: Short, position: Neutral -> Open Short + Action: Short, position: Long -> Close Long and Open Short + """ + + if action == Actions.Neutral.value: + self._position = Positions.Neutral + trade_type = "neutral" + self._last_trade_tick = None + elif action == Actions.Long_enter.value: + self._position = Positions.Long + trade_type = "long" + self._last_trade_tick = self._current_tick + elif action == Actions.Short_enter.value: + self._position = Positions.Short + trade_type = "short" + self._last_trade_tick = self._current_tick + elif action == Actions.Exit.value: + self._position = Positions.Neutral + trade_type = "neutral" + self._last_trade_tick = None + elif action == Actions.Exit.value: + self._position = Positions.Neutral + trade_type = "neutral" + self._last_trade_tick = None + else: + print("case not defined") + + if trade_type is not None: + self.trade_history.append( + {'price': self.current_price(), 'index': self._current_tick, + 'type': trade_type}) + + if self._total_profit < 1 - self.rl_config.get('max_training_drawdown_pct', 0.8): + self._done = True + + self._position_history.append(self._position) + + info = dict( + tick=self._current_tick, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value + ) + + observation = self._get_observation() + + self._update_history(info) + + return observation, step_reward, self._done, info + + def _get_observation(self): + features_window = self.signal_features[( + self._current_tick - self.window_size):self._current_tick] + features_and_state = DataFrame(np.zeros((len(features_window), 3)), + columns=['current_profit_pct', 'position', 'trade_duration'], + index=features_window.index) + + features_and_state['current_profit_pct'] = self.get_unrealized_profit() + features_and_state['position'] = self._position.value + features_and_state['trade_duration'] = self.get_trade_duration() + features_and_state = pd.concat([features_window, features_and_state], axis=1) + return features_and_state + + def get_trade_duration(self): + if self._last_trade_tick is None: + return 0 + else: + return self._current_tick - self._last_trade_tick + + def get_unrealized_profit(self): + + if self._last_trade_tick is None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price) / last_trade_price + else: + return 0. + + def is_tradesignal(self, action: int): + # trade signal + """ + Determine if the signal is a trade signal + e.g.: agent wants a Actions.Long_exit while it is in a Positions.short + """ + return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Short_enter.value and self._position == Positions.Short) or + (action == Actions.Short_enter.value and self._position == Positions.Long) or + (action == Actions.Exit.value and self._position == Positions.Neutral) or + (action == Actions.Long_enter.value and self._position == Positions.Long) or + (action == Actions.Long_enter.value and self._position == Positions.Short)) + + def _is_valid(self, action: int): + # trade signal + """ + Determine if the signal is valid. + e.g.: agent wants a Actions.Long_exit while it is in a Positions.short + """ + # Agent should only try to exit if it is in position + if action in (Actions.Exit.value): + if self._position not in (Positions.Short, Positions.Long): + return False + + # Agent should only try to enter if it is not in position + if action in (Actions.Short_enter.value, Actions.Long_enter.value): + if self._position != Positions.Neutral: + return False + + return True + + def _is_trade(self, action: Actions): + return ((action == Actions.Long_enter.value and self._position == Positions.Neutral) or + (action == Actions.Short_enter.value and self._position == Positions.Neutral)) + + def is_hold(self, action): + return ((action == Actions.Short_enter.value and self._position == Positions.Short) or + (action == Actions.Long_enter.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Long) or + (action == Actions.Neutral.value and self._position == Positions.Short) or + (action == Actions.Neutral.value and self._position == Positions.Neutral)) + + def add_entry_fee(self, price): + return price * (1 + self.fee) + + def add_exit_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + def get_sharpe_ratio(self): + return mean_over_std(self.get_portfolio_log_returns()) + + @abstractmethod + def calculate_reward(self, action): + """ + Reward is created by BaseReinforcementLearningModel and can + be inherited/edited by the user made ReinforcementLearner file. + """ + + return 0. + + def _update_profit(self, action): + if self._is_trade(action) or self._done: + pnl = self.get_unrealized_profit() + + if self._position in (Positions.Long, Positions.Short): + self._total_profit *= (1 + pnl) + self._profits.append((self._current_tick, self._total_profit)) + self.close_trade_profit.append(pnl) + + def most_recent_return(self, action: int): + """ + Calculate the tick to tick return if in a trade. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_entry_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_exit_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick - 1].open + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward From cdc550da9a40ed8e46150bb2c9780e81147fb3b8 Mon Sep 17 00:00:00 2001 From: richardjozsa Date: Fri, 26 Aug 2022 09:59:17 +0200 Subject: [PATCH 062/232] Revert the docker changes to be inline with the original freqtrade image Reverted the changes, and added a new way of doing, Dockerfile.freqai with that file the users can make their own dockerimage. --- Dockerfile | 2 +- docker/Dockerfile.freqai | 59 +++++++++++++++++++++++++++++++++++---- requirements-freqai.txt | 2 +- requirements-hyperopt.txt | 1 - 4 files changed, 56 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index d06b53202..d37555cd8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -33,7 +33,7 @@ RUN cd /tmp && /tmp/install_ta-lib.sh && rm -r /tmp/*ta-lib* ENV LD_LIBRARY_PATH /usr/local/lib # Install dependencies -COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt requirements-freqai.txt /freqtrade/ +COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt /freqtrade/ USER ftuser RUN pip install --user --no-cache-dir numpy \ && pip install --user --no-cache-dir -r requirements-hyperopt.txt diff --git a/docker/Dockerfile.freqai b/docker/Dockerfile.freqai index 9a2f75700..af9da4c25 100644 --- a/docker/Dockerfile.freqai +++ b/docker/Dockerfile.freqai @@ -1,9 +1,58 @@ -ARG sourceimage=freqtradeorg/freqtrade -ARG sourcetag=develop -FROM ${sourceimage}:${sourcetag} +FROM python:3.10.6-slim-bullseye as base + +# Setup env +ENV LANG C.UTF-8 +ENV LC_ALL C.UTF-8 +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONFAULTHANDLER 1 +ENV PATH=/home/ftuser/.local/bin:$PATH +ENV FT_APP_ENV="docker" + +# Prepare environment +RUN mkdir /freqtrade \ + && apt-get update \ + && apt-get -y install sudo libatlas3-base curl sqlite3 libhdf5-serial-dev \ + && apt-get clean \ + && useradd -u 1000 -G sudo -U -m -s /bin/bash ftuser \ + && chown ftuser:ftuser /freqtrade \ + # Allow sudoers + && echo "ftuser ALL=(ALL) NOPASSWD: /bin/chown" >> /etc/sudoers + +WORKDIR /freqtrade # Install dependencies -COPY requirements-freqai.txt /freqtrade/ +FROM base as python-deps +RUN apt-get update \ + && apt-get -y install build-essential libssl-dev git libffi-dev libgfortran5 pkg-config cmake gcc \ + && apt-get clean \ + && pip install --upgrade pip -RUN pip install -r requirements-freqai.txt --user --no-cache-dir +# Install TA-lib +COPY build_helpers/* /tmp/ +RUN cd /tmp && /tmp/install_ta-lib.sh && rm -r /tmp/*ta-lib* +ENV LD_LIBRARY_PATH /usr/local/lib +# Install dependencies +COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt requirements-freqai.txt /freqtrade/ +USER ftuser +RUN pip install --user --no-cache-dir numpy \ + && pip install --user --no-cache-dir -r requirements-freqai.txt + +# Copy dependencies to runtime-image +FROM base as runtime-image +COPY --from=python-deps /usr/local/lib /usr/local/lib +ENV LD_LIBRARY_PATH /usr/local/lib + +COPY --from=python-deps --chown=ftuser:ftuser /home/ftuser/.local /home/ftuser/.local + +USER ftuser +# Install and execute +COPY --chown=ftuser:ftuser . /freqtrade/ + +RUN pip install -e . --user --no-cache-dir --no-build-isolation \ + && mkdir /freqtrade/user_data/ \ + && freqtrade install-ui + +ENTRYPOINT ["freqtrade"] +# Default to trade mode +CMD [ "trade" ] diff --git a/requirements-freqai.txt b/requirements-freqai.txt index de1b6670a..aebce1fae 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -1,5 +1,5 @@ # Include all requirements to run the bot. --r requirements.txt +-r requirements-hyperopt.txt # Required for freqai scikit-learn==1.1.2 diff --git a/requirements-hyperopt.txt b/requirements-hyperopt.txt index e19eb27c1..020ccdda8 100644 --- a/requirements-hyperopt.txt +++ b/requirements-hyperopt.txt @@ -7,4 +7,3 @@ scikit-learn==1.1.2 scikit-optimize==0.9.0 filelock==3.8.0 progressbar2==4.0.0 --r requirements-freqai.txt \ No newline at end of file From baa4f8e3d0d7f4d1dcb950b799da749171365989 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 26 Aug 2022 11:03:17 +0200 Subject: [PATCH 063/232] remove Base3ActionEnv in favor of Base4Action --- freqtrade/freqai/RL/Base3ActionRLEnv.py | 332 ------------------------ 1 file changed, 332 deletions(-) delete mode 100644 freqtrade/freqai/RL/Base3ActionRLEnv.py diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py deleted file mode 100644 index fe51d3b13..000000000 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ /dev/null @@ -1,332 +0,0 @@ -# Example of a 3 action environment. - -# import logging -# from enum import Enum - -# import gym -# import numpy as np -# import pandas as pd -# from gym import spaces -# from gym.utils import seeding -# from pandas import DataFrame - - -# # from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union - -# logger = logging.getLogger(__name__) - - -# class Actions(Enum): -# Short = 0 -# Long = 1 -# Neutral = 2 - - -# class Positions(Enum): -# Short = 0 -# Long = 1 -# Neutral = 0.5 - -# def opposite(self): -# return Positions.Short if self == Positions.Long else Positions.Long - - -# def mean_over_std(x): -# std = np.std(x, ddof=1) -# mean = np.mean(x) -# return mean / std if std > 0 else 0 - - -# class Base3ActionRLEnv(gym.Env): - -# metadata = {'render.modes': ['human']} - -# def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), -# reward_kwargs: dict = {}, window_size=10, starting_point=True, -# id: str = 'baseenv-1', seed: int = 1): -# assert df.ndim == 2 - -# self.id = id -# self.seed(seed) -# self.reset_env(df, prices, window_size, reward_kwargs, starting_point) - -# def reset_env(self, df, prices, window_size, reward_kwargs, starting_point=True): -# self.df = df -# self.signal_features = self.df -# self.prices = prices -# self.window_size = window_size -# self.starting_point = starting_point -# self.rr = reward_kwargs["rr"] -# self.profit_aim = reward_kwargs["profit_aim"] - -# self.fee = 0.0015 - -# # # spaces -# self.shape = (window_size, self.signal_features.shape[1] + 2) -# self.action_space = spaces.Discrete(len(Actions)) -# self.observation_space = spaces.Box( -# low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - -# # episode -# self._start_tick = self.window_size -# self._end_tick = len(self.prices) - 1 -# self._done = None -# self._current_tick = None -# self._last_trade_tick = None -# self._position = Positions.Neutral -# self._position_history = None -# self.total_reward = None -# self._total_profit = None -# self._first_rendering = None -# self.history = None -# self.trade_history = [] - -# def seed(self, seed: int = 1): -# self.np_random, seed = seeding.np_random(seed) -# return [seed] - -# def reset(self): - -# self._done = False - -# if self.starting_point is True: -# self._position_history = (self._start_tick * [None]) + [self._position] -# else: -# self._position_history = (self.window_size * [None]) + [self._position] - -# self._current_tick = self._start_tick -# self._last_trade_tick = None -# self._position = Positions.Neutral - -# self.total_reward = 0. -# self._total_profit = 1. # unit -# self._first_rendering = True -# self.history = {} -# self.trade_history = [] -# self.portfolio_log_returns = np.zeros(len(self.prices)) - -# self._profits = [(self._start_tick, 1)] -# self.close_trade_profit = [] - -# return self._get_observation() - -# def step(self, action: int): -# self._done = False -# self._current_tick += 1 - -# if self._current_tick == self._end_tick: -# self._done = True - -# self.update_portfolio_log_returns(action) - -# self._update_profit(action) -# step_reward = self.calculate_reward(action) -# self.total_reward += step_reward - -# trade_type = None -# if self.is_tradesignal(action): # exclude 3 case not trade -# # Update position -# """ -# Action: Neutral, position: Long -> Close Long -# Action: Neutral, position: Short -> Close Short - -# Action: Long, position: Neutral -> Open Long -# Action: Long, position: Short -> Close Short and Open Long - -# Action: Short, position: Neutral -> Open Short -# Action: Short, position: Long -> Close Long and Open Short -# """ - -# if action == Actions.Neutral.value: -# self._position = Positions.Neutral -# trade_type = "neutral" -# elif action == Actions.Long.value: -# self._position = Positions.Long -# trade_type = "long" -# elif action == Actions.Short.value: -# self._position = Positions.Short -# trade_type = "short" -# else: -# print("case not defined") - -# # Update last trade tick -# self._last_trade_tick = self._current_tick - -# if trade_type is not None: -# self.trade_history.append( -# {'price': self.current_price(), 'index': self._current_tick, -# 'type': trade_type}) - -# if self._total_profit < 0.2: -# self._done = True - -# self._position_history.append(self._position) -# observation = self._get_observation() -# info = dict( -# tick=self._current_tick, -# total_reward=self.total_reward, -# total_profit=self._total_profit, -# position=self._position.value -# ) -# self._update_history(info) - -# return observation, step_reward, self._done, info - -# def _get_observation(self): -# features_window = self.signal_features[( -# self._current_tick - self.window_size):self._current_tick] -# features_and_state = DataFrame(np.zeros((len(features_window), 2)), -# columns=['current_profit_pct', 'position'], -# index=features_window.index) - -# features_and_state['current_profit_pct'] = self.get_unrealized_profit() -# features_and_state['position'] = self._position.value -# features_and_state = pd.concat([features_window, features_and_state], axis=1) -# return features_and_state - -# def get_unrealized_profit(self): - -# if self._last_trade_tick is None: -# return 0. - -# if self._position == Positions.Neutral: -# return 0. -# elif self._position == Positions.Short: -# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) -# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) -# return (last_trade_price - current_price) / last_trade_price -# elif self._position == Positions.Long: -# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) -# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) -# return (current_price - last_trade_price) / last_trade_price -# else: -# return 0. - -# def is_tradesignal(self, action: int): -# # trade signal -# """ -# not trade signal is : -# Action: Neutral, position: Neutral -> Nothing -# Action: Long, position: Long -> Hold Long -# Action: Short, position: Short -> Hold Short -# """ -# return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) -# or (action == Actions.Short.value and self._position == Positions.Short) -# or (action == Actions.Long.value and self._position == Positions.Long)) - -# def _is_trade(self, action: Actions): -# return ((action == Actions.Long.value and self._position == Positions.Short) or -# (action == Actions.Short.value and self._position == Positions.Long) or -# (action == Actions.Neutral.value and self._position == Positions.Long) or -# (action == Actions.Neutral.value and self._position == Positions.Short) -# ) - -# def is_hold(self, action): -# return ((action == Actions.Short.value and self._position == Positions.Short) -# or (action == Actions.Long.value and self._position == Positions.Long)) - -# def add_buy_fee(self, price): -# return price * (1 + self.fee) - -# def add_sell_fee(self, price): -# return price / (1 + self.fee) - -# def _update_history(self, info): -# if not self.history: -# self.history = {key: [] for key in info.keys()} - -# for key, value in info.items(): -# self.history[key].append(value) - -# def get_sharpe_ratio(self): -# return mean_over_std(self.get_portfolio_log_returns()) - -# def calculate_reward(self, action): - -# if self._last_trade_tick is None: -# return 0. - -# # close long -# if (action == Actions.Short.value or -# action == Actions.Neutral.value) and self._position == Positions.Long: -# last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) -# return float(np.log(current_price) - np.log(last_trade_price)) - -# # close short -# if (action == Actions.Long.value or -# action == Actions.Neutral.value) and self._position == Positions.Short: -# last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) -# current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) -# return float(np.log(last_trade_price) - np.log(current_price)) - -# return 0. - -# def _update_profit(self, action): -# if self._is_trade(action) or self._done: -# pnl = self.get_unrealized_profit() - -# if self._position == Positions.Long: -# self._total_profit = self._total_profit + self._total_profit * pnl -# self._profits.append((self._current_tick, self._total_profit)) -# self.close_trade_profit.append(pnl) - -# if self._position == Positions.Short: -# self._total_profit = self._total_profit + self._total_profit * pnl -# self._profits.append((self._current_tick, self._total_profit)) -# self.close_trade_profit.append(pnl) - -# def most_recent_return(self, action: int): -# """ -# We support Long, Neutral and Short positions. -# Return is generated from rising prices in Long -# and falling prices in Short positions. -# The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. -# """ -# # Long positions -# if self._position == Positions.Long: -# current_price = self.prices.iloc[self._current_tick].open -# if action == Actions.Short.value or action == Actions.Neutral.value: -# current_price = self.add_sell_fee(current_price) - -# previous_price = self.prices.iloc[self._current_tick - 1].open - -# if (self._position_history[self._current_tick - 1] == Positions.Short -# or self._position_history[self._current_tick - 1] == Positions.Neutral): -# previous_price = self.add_buy_fee(previous_price) - -# return np.log(current_price) - np.log(previous_price) - -# # Short positions -# if self._position == Positions.Short: -# current_price = self.prices.iloc[self._current_tick].open -# if action == Actions.Long.value or action == Actions.Neutral.value: -# current_price = self.add_buy_fee(current_price) - -# previous_price = self.prices.iloc[self._current_tick - 1].open -# if (self._position_history[self._current_tick - 1] == Positions.Long -# or self._position_history[self._current_tick - 1] == Positions.Neutral): -# previous_price = self.add_sell_fee(previous_price) - -# return np.log(previous_price) - np.log(current_price) - -# return 0 - -# def get_portfolio_log_returns(self): -# return self.portfolio_log_returns[1:self._current_tick + 1] - -# def update_portfolio_log_returns(self, action): -# self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - -# def current_price(self) -> float: -# return self.prices.iloc[self._current_tick].open - -# def prev_price(self) -> float: -# return self.prices.iloc[self._current_tick - 1].open - -# def sharpe_ratio(self) -> float: -# if len(self.close_trade_profit) == 0: -# return 0. -# returns = np.array(self.close_trade_profit) -# reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) -# return reward From 8c313b431d9c1094e588acf179152a944ca84de0 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 26 Aug 2022 11:14:01 +0200 Subject: [PATCH 064/232] remove whitespace from Dockerfile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d37555cd8..14a67edc8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -33,7 +33,7 @@ RUN cd /tmp && /tmp/install_ta-lib.sh && rm -r /tmp/*ta-lib* ENV LD_LIBRARY_PATH /usr/local/lib # Install dependencies -COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt /freqtrade/ +COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt /freqtrade/ USER ftuser RUN pip install --user --no-cache-dir numpy \ && pip install --user --no-cache-dir -r requirements-hyperopt.txt From 7766350c1558ae257cc540a22dadc7aefcafe384 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 28 Aug 2022 19:21:57 +0200 Subject: [PATCH 065/232] refactor environment inheritence tree to accommodate flexible action types/counts. fix bug in train profit handling --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 234 +-------------- freqtrade/freqai/RL/Base5ActionRLEnv.py | 187 +----------- freqtrade/freqai/RL/BaseEnvironment.py | 270 ++++++++++++++++++ .../RL/BaseReinforcementLearningModel.py | 35 +-- .../RL/ReinforcementLearnerCustomAgent.py | 23 +- freqtrade/freqai/freqai_interface.py | 2 +- .../prediction_models/ReinforcementLearner.py | 17 +- .../ReinforcementLearner_multiproc.py | 11 +- 8 files changed, 339 insertions(+), 440 deletions(-) create mode 100644 freqtrade/freqai/RL/BaseEnvironment.py diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 478507639..ef5b1c107 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -1,14 +1,11 @@ import logging from enum import Enum -from typing import Optional -import gym -import numpy as np from gym import spaces -from gym.utils import seeding -from pandas import DataFrame -import pandas as pd -from abc import abstractmethod + +from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions + + logger = logging.getLogger(__name__) @@ -19,95 +16,13 @@ class Actions(Enum): Short_enter = 3 - -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 - - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long - - -def mean_over_std(x): - std = np.std(x, ddof=1) - mean = np.mean(x) - return mean / std if std > 0 else 0 - - -class Base4ActionRLEnv(gym.Env): +class Base4ActionRLEnv(BaseEnvironment): """ - Base class for a 5 action environment + Base class for a 4 action environment """ - metadata = {'render.modes': ['human']} - def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), - reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1, config: dict = {}): - - self.rl_config = config['freqai']['rl_config'] - self.id = id - self.seed(seed) - self.reset_env(df, prices, window_size, reward_kwargs, starting_point) - - def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, - reward_kwargs: dict, starting_point=True): - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] - - self.fee = 0.0015 - - # # spaces - self.shape = (window_size, self.signal_features.shape[1] + 3) + def set_action_space(self): self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick: int = self.window_size - self._end_tick: int = len(self.prices) - 1 - self._done: bool = False - self._current_tick: int = self._start_tick - self._last_trade_tick: Optional[int] = None - self._position = Positions.Neutral - self._position_history: list = [None] - self.total_reward: float = 0 - self._total_profit: float = 1 - self.history: dict = {} - self.trade_history: list = [] - - def seed(self, seed: int = 1): - self.np_random, seed = seeding.np_random(seed) - return [seed] - - def reset(self): - - self._done = False - - if self.starting_point is True: - self._position_history = (self._start_tick * [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - - return self._get_observation() def step(self, action: int): self._done = False @@ -181,43 +96,6 @@ class Base4ActionRLEnv(gym.Env): return observation, step_reward, self._done, info - def _get_observation(self): - features_window = self.signal_features[( - self._current_tick - self.window_size):self._current_tick] - features_and_state = DataFrame(np.zeros((len(features_window), 3)), - columns=['current_profit_pct', 'position', 'trade_duration'], - index=features_window.index) - - features_and_state['current_profit_pct'] = self.get_unrealized_profit() - features_and_state['position'] = self._position.value - features_and_state['trade_duration'] = self.get_trade_duration() - features_and_state = pd.concat([features_window, features_and_state], axis=1) - return features_and_state - - def get_trade_duration(self): - if self._last_trade_tick is None: - return 0 - else: - return self._current_tick - self._last_trade_tick - - def get_unrealized_profit(self): - - if self._last_trade_tick is None: - return 0. - - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price) / last_trade_price - elif self._position == Positions.Long: - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price) / last_trade_price - else: - return 0. - def is_tradesignal(self, action: int): # trade signal """ @@ -228,7 +106,7 @@ class Base4ActionRLEnv(gym.Env): (action == Actions.Neutral.value and self._position == Positions.Short) or (action == Actions.Neutral.value and self._position == Positions.Long) or (action == Actions.Short_enter.value and self._position == Positions.Short) or - (action == Actions.Short_enter.value and self._position == Positions.Long) or + (action == Actions.Short_enter.value and self._position == Positions.Long) or (action == Actions.Exit.value and self._position == Positions.Neutral) or (action == Actions.Long_enter.value and self._position == Positions.Long) or (action == Actions.Long_enter.value and self._position == Positions.Short)) @@ -240,7 +118,7 @@ class Base4ActionRLEnv(gym.Env): e.g.: agent wants a Actions.Long_exit while it is in a Positions.short """ # Agent should only try to exit if it is in position - if action in (Actions.Exit.value): + if action == Actions.Exit.value: if self._position not in (Positions.Short, Positions.Long): return False @@ -250,97 +128,3 @@ class Base4ActionRLEnv(gym.Env): return False return True - - def _is_trade(self, action: Actions): - return ((action == Actions.Long_enter.value and self._position == Positions.Neutral) or - (action == Actions.Short_enter.value and self._position == Positions.Neutral)) - - def is_hold(self, action): - return ((action == Actions.Short_enter.value and self._position == Positions.Short) or - (action == Actions.Long_enter.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) or - (action == Actions.Neutral.value and self._position == Positions.Neutral)) - - def add_entry_fee(self, price): - return price * (1 + self.fee) - - def add_exit_fee(self, price): - return price / (1 + self.fee) - - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} - - for key, value in info.items(): - self.history[key].append(value) - - def get_sharpe_ratio(self): - return mean_over_std(self.get_portfolio_log_returns()) - - @abstractmethod - def calculate_reward(self, action): - """ - Reward is created by BaseReinforcementLearningModel and can - be inherited/edited by the user made ReinforcementLearner file. - """ - - return 0. - - def _update_profit(self, action): - if self._is_trade(action) or self._done: - pnl = self.get_unrealized_profit() - - if self._position in (Positions.Long, Positions.Short): - self._total_profit *= (1 + pnl) - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - def most_recent_return(self, action: int): - """ - Calculate the tick to tick return if in a trade. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_entry_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_exit_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - - def current_price(self) -> float: - return self.prices.iloc[self._current_tick].open - - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick - 1].open - - def sharpe_ratio(self): - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index b93d6e6ff..e0a38f9d1 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -1,14 +1,14 @@ import logging from enum import Enum -from typing import Optional -import gym import numpy as np -from gym import spaces -from gym.utils import seeding -from pandas import DataFrame import pandas as pd -from abc import abstractmethod +from gym import spaces +from pandas import DataFrame + +from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions + + logger = logging.getLogger(__name__) @@ -20,70 +20,19 @@ class Actions(Enum): Short_exit = 4 -class Positions(Enum): - Short = 0 - Long = 1 - Neutral = 0.5 - - def opposite(self): - return Positions.Short if self == Positions.Long else Positions.Long - - def mean_over_std(x): std = np.std(x, ddof=1) mean = np.mean(x) return mean / std if std > 0 else 0 -class Base5ActionRLEnv(gym.Env): +class Base5ActionRLEnv(BaseEnvironment): """ Base class for a 5 action environment """ - metadata = {'render.modes': ['human']} - def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), - reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1, config: dict = {}): - - self.rl_config = config['freqai']['rl_config'] - self.id = id - self.seed(seed) - self.reset_env(df, prices, window_size, reward_kwargs, starting_point) - - def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, - reward_kwargs: dict, starting_point=True): - self.df = df - self.signal_features = self.df - self.prices = prices - self.window_size = window_size - self.starting_point = starting_point - self.rr = reward_kwargs["rr"] - self.profit_aim = reward_kwargs["profit_aim"] - - self.fee = 0.0015 - - # # spaces - self.shape = (window_size, self.signal_features.shape[1] + 3) + def set_action_space(self): self.action_space = spaces.Discrete(len(Actions)) - self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) - - # episode - self._start_tick: int = self.window_size - self._end_tick: int = len(self.prices) - 1 - self._done: bool = False - self._current_tick: int = self._start_tick - self._last_trade_tick: Optional[int] = None - self._position = Positions.Neutral - self._position_history: list = [None] - self.total_reward: float = 0 - self._total_profit: float = 1 - self.history: dict = {} - self.trade_history: list = [] - - def seed(self, seed: int = 1): - self.np_random, seed = seeding.np_random(seed) - return [seed] def reset(self): @@ -106,6 +55,7 @@ class Base5ActionRLEnv(gym.Env): self._profits = [(self._start_tick, 1)] self.close_trade_profit = [] + self._total_unrealized_profit = 1 return self._get_observation() @@ -118,7 +68,7 @@ class Base5ActionRLEnv(gym.Env): self.update_portfolio_log_returns(action) - self._update_profit(action) + self._update_unrealized_total_profit() step_reward = self.calculate_reward(action) self.total_reward += step_reward @@ -148,10 +98,12 @@ class Base5ActionRLEnv(gym.Env): trade_type = "short" self._last_trade_tick = self._current_tick elif action == Actions.Long_exit.value: + self._update_total_profit() self._position = Positions.Neutral trade_type = "neutral" self._last_trade_tick = None elif action == Actions.Short_exit.value: + self._update_total_profit() self._position = Positions.Neutral trade_type = "neutral" self._last_trade_tick = None @@ -163,7 +115,8 @@ class Base5ActionRLEnv(gym.Env): {'price': self.current_price(), 'index': self._current_tick, 'type': trade_type}) - if self._total_profit < 1 - self.rl_config.get('max_training_drawdown_pct', 0.8): + if (self._total_profit < self.max_drawdown or + self._total_unrealized_profit < self.max_drawdown): self._done = True self._position_history.append(self._position) @@ -200,24 +153,6 @@ class Base5ActionRLEnv(gym.Env): else: return self._current_tick - self._last_trade_tick - def get_unrealized_profit(self): - - if self._last_trade_tick is None: - return 0. - - if self._position == Positions.Neutral: - return 0. - elif self._position == Positions.Short: - current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price) / last_trade_price - elif self._position == Positions.Long: - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - return (current_price - last_trade_price) / last_trade_price - else: - return 0. - def is_tradesignal(self, action: int): # trade signal """ @@ -253,97 +188,3 @@ class Base5ActionRLEnv(gym.Env): return False return True - - def _is_trade(self, action: Actions): - return ((action == Actions.Long_enter.value and self._position == Positions.Neutral) or - (action == Actions.Short_enter.value and self._position == Positions.Neutral)) - - def is_hold(self, action): - return ((action == Actions.Short_enter.value and self._position == Positions.Short) or - (action == Actions.Long_enter.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Neutral.value and self._position == Positions.Short) or - (action == Actions.Neutral.value and self._position == Positions.Neutral)) - - def add_entry_fee(self, price): - return price * (1 + self.fee) - - def add_exit_fee(self, price): - return price / (1 + self.fee) - - def _update_history(self, info): - if not self.history: - self.history = {key: [] for key in info.keys()} - - for key, value in info.items(): - self.history[key].append(value) - - def get_sharpe_ratio(self): - return mean_over_std(self.get_portfolio_log_returns()) - - @abstractmethod - def calculate_reward(self, action): - """ - Reward is created by BaseReinforcementLearningModel and can - be inherited/edited by the user made ReinforcementLearner file. - """ - - return 0. - - def _update_profit(self, action): - if self._is_trade(action) or self._done: - pnl = self.get_unrealized_profit() - - if self._position in (Positions.Long, Positions.Short): - self._total_profit *= (1 + pnl) - self._profits.append((self._current_tick, self._total_profit)) - self.close_trade_profit.append(pnl) - - def most_recent_return(self, action: int): - """ - Calculate the tick to tick return if in a trade. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_entry_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_exit_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - - def current_price(self) -> float: - return self.prices.iloc[self._current_tick].open - - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick - 1].open - - def sharpe_ratio(self): - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py new file mode 100644 index 000000000..bba3c4a1b --- /dev/null +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -0,0 +1,270 @@ +import logging +from abc import abstractmethod +from enum import Enum +from typing import Optional + +import gym +import numpy as np +import pandas as pd +from gym import spaces +from gym.utils import seeding +from pandas import DataFrame + + +logger = logging.getLogger(__name__) + + +class Positions(Enum): + Short = 0 + Long = 1 + Neutral = 0.5 + + def opposite(self): + return Positions.Short if self == Positions.Long else Positions.Long + + +class BaseEnvironment(gym.Env): + """ + Base class for environments. This class is agnostic to action count. + Inherited classes customize this to include varying action counts/types, + See RL/Base5ActionRLEnv.py and RL/Base4ActionRLEnv.py + """ + + def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), + reward_kwargs: dict = {}, window_size=10, starting_point=True, + id: str = 'baseenv-1', seed: int = 1, config: dict = {}): + + self.rl_config = config['freqai']['rl_config'] + self.id = id + self.seed(seed) + self.reset_env(df, prices, window_size, reward_kwargs, starting_point) + self.max_drawdown = 1 - self.rl_config.get('max_training_drawdown_pct', 0.8) + self.compound_trades = config['stake_amount'] == 'unlimited' + + def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, + reward_kwargs: dict, starting_point=True): + self.df = df + self.signal_features = self.df + self.prices = prices + self.window_size = window_size + self.starting_point = starting_point + self.rr = reward_kwargs["rr"] + self.profit_aim = reward_kwargs["profit_aim"] + + self.fee = 0.0015 + + # # spaces + self.shape = (window_size, self.signal_features.shape[1] + 3) + self.set_action_space() + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + + # episode + self._start_tick: int = self.window_size + self._end_tick: int = len(self.prices) - 1 + self._done: bool = False + self._current_tick: int = self._start_tick + self._last_trade_tick: Optional[int] = None + self._position = Positions.Neutral + self._position_history: list = [None] + self.total_reward: float = 0 + self._total_profit: float = 1 + self._total_unrealized_profit: float = 1 + self.history: dict = {} + self.trade_history: list = [] + + @abstractmethod + def set_action_space(self): + """ + Unique to the environment action count. Must be inherited. + """ + + def seed(self, seed: int = 1): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + def reset(self): + + self._done = False + + if self.starting_point is True: + self._position_history = (self._start_tick * [None]) + [self._position] + else: + self._position_history = (self.window_size * [None]) + [self._position] + + self._current_tick = self._start_tick + self._last_trade_tick = None + self._position = Positions.Neutral + + self.total_reward = 0. + self._total_profit = 1. # unit + self.history = {} + self.trade_history = [] + self.portfolio_log_returns = np.zeros(len(self.prices)) + + self._profits = [(self._start_tick, 1)] + self.close_trade_profit = [] + self._total_unrealized_profit = 1 + + return self._get_observation() + + @abstractmethod + def step(self, action: int): + """ + Step depeneds on action types, this must be inherited. + """ + return + + def _get_observation(self): + """ + This may or may not be independent of action types, user can inherit + this in their custom "MyRLEnv" + """ + features_window = self.signal_features[( + self._current_tick - self.window_size):self._current_tick] + features_and_state = DataFrame(np.zeros((len(features_window), 3)), + columns=['current_profit_pct', 'position', 'trade_duration'], + index=features_window.index) + + features_and_state['current_profit_pct'] = self.get_unrealized_profit() + features_and_state['position'] = self._position.value + features_and_state['trade_duration'] = self.get_trade_duration() + features_and_state = pd.concat([features_window, features_and_state], axis=1) + return features_and_state + + def get_trade_duration(self): + if self._last_trade_tick is None: + return 0 + else: + return self._current_tick - self._last_trade_tick + + def get_unrealized_profit(self): + + if self._last_trade_tick is None: + return 0. + + if self._position == Positions.Neutral: + return 0. + elif self._position == Positions.Short: + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + return (current_price - last_trade_price) / last_trade_price + else: + return 0. + + @abstractmethod + def is_tradesignal(self, action: int): + # trade signal + """ + Determine if the signal is a trade signal. This is + unique to the actions in the environment, and therefore must be + inherited. + """ + return + + def _is_valid(self, action: int): + # trade signal + """ + Determine if the signal is valid.This is + unique to the actions in the environment, and therefore must be + inherited. + """ + return + + def add_entry_fee(self, price): + return price * (1 + self.fee) + + def add_exit_fee(self, price): + return price / (1 + self.fee) + + def _update_history(self, info): + if not self.history: + self.history = {key: [] for key in info.keys()} + + for key, value in info.items(): + self.history[key].append(value) + + @abstractmethod + def calculate_reward(self, action): + """ + Reward is created by BaseReinforcementLearningModel and can + be inherited/edited by the user made ReinforcementLearner file. + """ + + return 0. + + def _update_unrealized_total_profit(self): + """ + Update the unrealized total profit incase of episode end. + """ + if self._position in (Positions.Long, Positions.Short): + pnl = self.get_unrealized_profit() + if self.compound_trades: + # assumes unit stake and compounding + unrl_profit = self._total_profit * (1 + pnl) + else: + # assumes unit stake and no compounding + unrl_profit = self._total_profit + pnl + self._total_unrealized_profit = unrl_profit + + def _update_total_profit(self): + pnl = self.get_unrealized_profit() + if self.compound_trades: + # assumes unite stake and compounding + self._total_profit = self._total_profit * (1 + pnl) + else: + # assumes unit stake and no compounding + self._total_profit += pnl + + def most_recent_return(self, action: int): + """ + Calculate the tick to tick return if in a trade. + Return is generated from rising prices in Long + and falling prices in Short positions. + The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + """ + # Long positions + if self._position == Positions.Long: + current_price = self.prices.iloc[self._current_tick].open + previous_price = self.prices.iloc[self._current_tick - 1].open + + if (self._position_history[self._current_tick - 1] == Positions.Short + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_entry_fee(previous_price) + + return np.log(current_price) - np.log(previous_price) + + # Short positions + if self._position == Positions.Short: + current_price = self.prices.iloc[self._current_tick].open + previous_price = self.prices.iloc[self._current_tick - 1].open + if (self._position_history[self._current_tick - 1] == Positions.Long + or self._position_history[self._current_tick - 1] == Positions.Neutral): + previous_price = self.add_exit_fee(previous_price) + + return np.log(previous_price) - np.log(current_price) + + return 0 + + def get_portfolio_log_returns(self): + return self.portfolio_log_returns[1:self._current_tick + 1] + + def update_portfolio_log_returns(self, action): + self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) + + def current_price(self) -> float: + return self.prices.iloc[self._current_tick].open + + def prev_price(self) -> float: + return self.prices.iloc[self._current_tick - 1].open + + def sharpe_ratio(self): + if len(self.close_trade_profit) == 0: + return 0. + returns = np.array(self.close_trade_profit) + reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) + return reward diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 5a7ae4372..77db9c655 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -1,25 +1,28 @@ import logging -from typing import Any, Dict, Tuple +from abc import abstractmethod +from datetime import datetime, timezone +from pathlib import Path +from typing import Any, Callable, Dict, Tuple +import gym import numpy as np import numpy.typing as npt import pandas as pd +import torch as th +import torch.multiprocessing from pandas import DataFrame -from abc import abstractmethod +from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.monitor import Monitor +from stable_baselines3.common.utils import set_random_seed + from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel -from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv, Actions, Positions +from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv +from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions from freqtrade.persistence import Trade -import torch.multiprocessing -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.monitor import Monitor -import torch as th -from typing import Callable -from datetime import datetime, timezone -from stable_baselines3.common.utils import set_random_seed -import gym -from pathlib import Path + + logger = logging.getLogger(__name__) torch.multiprocessing.set_sharing_strategy('file_system') @@ -37,8 +40,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): super().__init__(config=kwargs['config']) th.set_num_threads(self.freqai_info['rl_config'].get('thread_count', 4)) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] - self.train_env: Base5ActionRLEnv = None - self.eval_env: Base5ActionRLEnv = None + self.train_env: BaseEnvironment = None + self.eval_env: BaseEnvironment = None self.eval_callback: EvalCallback = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] @@ -194,7 +197,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def _predict(window): market_side, current_profit, trade_duration = self.get_state_info(dk.pair) observations = dataframe.iloc[window.index] - observations['current_profit'] = current_profit + observations['current_profit_pct'] = current_profit observations['position'] = market_side observations['trade_duration'] = trade_duration res, _ = model.predict(observations, deterministic=True) @@ -306,7 +309,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): return -def make_env(MyRLEnv: Base5ActionRLEnv, env_id: str, rank: int, +def make_env(MyRLEnv: BaseEnvironment, env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, reward_params: Dict[str, int], window_size: int, monitor: bool = False, config: Dict[str, Any] = {}) -> Callable: diff --git a/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py index fcd813ce6..4ad95c214 100644 --- a/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py +++ b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py @@ -1,19 +1,20 @@ import logging -import torch as th +from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Type, Union -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel + +import gym +import torch as th from stable_baselines3 import DQN from stable_baselines3.common.buffers import ReplayBuffer -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from pathlib import Path -from stable_baselines3.dqn.policies import (CnnPolicy, DQNPolicy, MlpPolicy, - QNetwork) -from torch import nn -import gym -from stable_baselines3.common.torch_layers import (BaseFeaturesExtractor, - FlattenExtractor) -from stable_baselines3.common.type_aliases import GymEnv, Schedule from stable_baselines3.common.policies import BasePolicy +from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor +from stable_baselines3.common.type_aliases import GymEnv, Schedule +from stable_baselines3.dqn.policies import CnnPolicy, DQNPolicy, MlpPolicy, QNetwork +from torch import nn + +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel + logger = logging.getLogger(__name__) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 21b79e003..b3367f9de 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -7,7 +7,7 @@ import time from abc import ABC, abstractmethod from pathlib import Path from threading import Lock -from typing import Any, Dict, Tuple, Optional +from typing import Any, Dict, Optional, Tuple import numpy as np import pandas as pd diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index a72a56e20..0e156d28e 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -1,15 +1,14 @@ import logging +from pathlib import Path from typing import Any, Dict -import torch as th -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel -from pathlib import Path -# from pandas import DataFrame -# from stable_baselines3.common.callbacks import EvalCallback -# from stable_baselines3.common.monitor import Monitor import numpy as np +import torch as th + +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel + logger = logging.getLogger(__name__) @@ -53,7 +52,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return model - class MyRLEnv(BaseReinforcementLearningModel.MyRLEnv): + class MyRLEnv(Base5ActionRLEnv): """ User can override any function in BaseRLEnv and gym.Env. Here the user sets a custom reward based on profit and trade duration. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index f301da981..9f6a66729 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -1,15 +1,16 @@ import logging +from pathlib import Path from typing import Any, Dict # , Tuple # import numpy.typing as npt import torch as th from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv + +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.BaseReinforcementLearningModel import (BaseReinforcementLearningModel, make_env) -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from pathlib import Path logger = logging.getLogger(__name__) @@ -26,7 +27,7 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256]) + net_arch=[256, 256, 128]) if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, @@ -64,9 +65,9 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): test_df = data_dictionary["test_features"] env_id = "train_env" - num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2) + num_cpu = int(self.freqai_info["rl_config"]["thread_count"]) self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH, + self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i in range(num_cpu)]) From af8f308584a270c4e35d2ad6d768099459975cb1 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 28 Aug 2022 20:52:03 +0200 Subject: [PATCH 066/232] start the reinforcement learning doc --- docs/assets/tensorboard.png | Bin 0 -> 9273 bytes docs/freqai.md | 101 +++++++++++++++++++++++++++++++++++- 2 files changed, 99 insertions(+), 2 deletions(-) create mode 100644 docs/assets/tensorboard.png diff --git a/docs/assets/tensorboard.png b/docs/assets/tensorboard.png new file mode 100644 index 0000000000000000000000000000000000000000..b986900435b28c89e9d9e8d1bdb5413d4411f913 GIT binary patch literal 9273 zcmWk!1yodB5GGbiN?hsg24U&$Zcyp&j-^3V8tD?GQ$QB!knUcPSQ;c;Iz+nu_n-6P zoO|Bey?5r$%=dlsVzf1t@Nmd+P*70tRFvg)fqMXO+F)S-*P^<7T;PW7p={`lf`Z@o z|A{)}{>=s$B=b>t=cDIt=i_JTWsBnH=f~~n=Im{4>0!(5?q&b=SezUMg%(9cUPj+P z=P1`O$rzEf{vHW?N8tczTXb>L&|Q(&fhdfUt7I@4TC-h#win-tVX07MhmG1)w5zl0 zWvP_@O)6*PsnBO92v9NLOKnz?QzWVf_TvU5635(y1u*c_QJ0&&8n!33N6_wH)!`jIpk40vwn`sgL=kMlEO zpmIgw5DXhES((KPTu2#RN5tu%*taeC$I6$Qxr%RC=I=2PXvymlbr+=9f^`NmjF>W7 zAMA)UbL1z@jRUnTd1#Y&#=3jezer(Wu~L89)8qBW@(Qv%errKYCx9~tw>U-z5n)G< z6E|3|_dLx#DGDy3%SrsjWbKPO4ZWp7oyxy?nRdpr^>D3)WiKg1CK>&ADsK3eu_vnw zMfyciajLB{uP2f1!Sp3wL`Oju;dOW_4gD_#`O4_JBhc^uN3j!XVQxJVc&DY`cbv1nE|Q_J?N_g#wmW#d?6a?9b%N; zar)fbSVWvWwIlrM9!BJuoNulw{w4KoIKstm-f23g?`PfZvOA(j`Yqd!~Yt$3Ozum+arIyr5rjh9zSZEfw{ zd?Z{fydX&P@!yP=3M0{%vKV9lX=KmF2CXcXzCo!zJ6*Vig#`*7Xu>X4pIw0v)L_K* zYY&2wVQaZ8m|d1Uw!^ED8v1E()o)z^0UaD16b`wC#1UoD^iT?vflyJ zohQYPCjT!a?BVW;9|CFEKGPCE1M&0oo3wgG0ApLi z_?;)I%8W_6zgz?>-c;cd5CHQnbJVTSCzMTsBRx%psBJQAUkeK>lSV2p=6wJwSXx+w zy{U?@B)#rT+!AJum1Rzi5fi;V$$uNISo{J$60eAkfRd4tN*m1Ky>GEKynbBb5+(Q{ zE*Xk^a8s2G_{&&dU*CS(`@Cev@zjEAc18 zjDh)TW9z0iZ=|N%T6;_pt3LZHd@9lY{(jJ&Pxb+#2Czf;mT>#OUux3EySfG!c^;q5 z?LjhA6R@!zzUnt|fR`W|3|MFo*M~!H@V6o zE(Eh`tTEBYe0L9Oo^tr@rG7_A-JZ2co*=NBR(lC#VJ59a&r@Ht1$sI;#ah)ympRTl zHNF$YI29*g+lka(>-mJK{YpkM-}tKSy=j4?Xe=S%#h+Cs|0=a%2Y&}C-22aMJIw4) z);iQRHH!{hYqWt~QBj_^4`gTLWUjTp|Cg^q?sXw|zU1-1ZX1ZiAM)hIEv}&z5*2NO zy}w4h;P709@uQ=o=Lma#bZW$DC|wWin0(IDPH6s8@J)T@NN;S+J0cz#it_n)!Y>a2y5mTFj!eRJ~5FzVfWK*AJKy{lt`19 zcy+k=k_HZk`%xj!<2p4pX68fI-iNud7L}(qyDkqlxh}skFtkTjKil!jk}dpk7_i)A zeV|{G=?*EXux~|O3ox}=T2)^F+xVPqDoo}}B_adBaSBe2Lj`0*9h+2}?Sr6ftm6zALQWzMJ(a<34;gWq24X}dD z3X0 zCYyg5ulQ+XL}hSj$kQ~_sd0387;Dby)mYsy^4uXi($)|pBTb+D&a<-Be7DZuwB0|M z7R$1TF37gedy@+`t$@_qPJZxq_F&;R;wB>tCQBrzN`BG z%=Vi6@$m3;IEhDgyh?Q06crUosi;b|SuG6bE6JOvfdn)j`0;9#n`WL7IAjP5D%lf` zHVQ1F&@i82Mr3al5A${^8W_mJpOr_vtszo z!!VZmY}j!_kn^((zEn8ME4gdo*n~2yoDqt&wfomz7D>tLkC;_Wi)sb4rweY`phCuU zRjVNG4}>KKBkPlq;%NjTQjVV9og;4vaL*fi7hz3m&)jZ#J?I zTf9DQMo&JMBuWzJk;eMqxF(q=>h)HUU9H@@j~92ap6-K#63oBI)hS)~7w$%1K>ktW zWCH3-F7WnC(Kh;mKC(^KS8}Wnm{~uMR;vH?Yp?a2;%gC(o0dM_&k=pQ!nd=O`6bL) zeMylA{InoT!VQ(4pmO;6a7?j{d+6Zgk>MXN@__n>wY=rrtvmbj07rLN)$tYVZGdMK zS!Q))N+zkCm3n&sva$UyyjJU;V&&6&lm;=nWaZNyOd5}kZd4$o;D?8tLkS@-=sM^j&TbW(fhLp@CiR-vtqYL#xoTj=btgAX*ixa8M6qo~FK*`Eb}oD+fa_@+ zN;E=`UHUaUky1pKG*bNjv=61_c%MNUw{N#-+i8Xc{Cqzy%fieFM8ExfK*2hqGR()+ z2b68;qvN%6K~uleTC=76s?VX>7=iau`?A!AjcLv)-9<1ijgJ~PcRhioC_oTasdE8R zt&-tRwm>LcY|uebvsp9X#;oOy(8BHPHtF zoem~}LuF>L4vE*$nBJ}h#?QJ3KSMyzN@6)?FFk3o(FqW*)L2DQNBT1w13NSe_Jes( zuQ%rjp_~fLse^fbn+I>?Mt-Ppnsp4@)&Flu*4U3}DT>9@3L|B7Y*?=}goF>W z#XD9}432bp^h>$0$l9jug4z;tIu!nNGlbir$$^IPRWBoAD`J-?N19xVTevY%c>Yb5 zCXfU!G>zS4Y|PGb;WJAlxYJnd&l)o(N_|2Gj4yLr)_+hgJB69b{)zjd88o#t)7-N2 z%>PZg3-hzQIG!F)mldN|3=Z&tJ)b}W)TpJ7d6|>`Yx?kLF;1L`qY|6FphMn`w@egF zryfVdyK!#g3aXccl^E5;ohQFdZo%iZJilAHWkLR&x^;1}GIyrm8BJM;V&0iWgBmlo zv^`8Ky)}#~a5V;Y_Wwa9d+eu2Hhz24=Xa}TvyMRK_`4YYnc@Upp-y>_mng+8%ryyU zOyij>502woIJ3u?7$+FwF+E?PB(7O5^wZR20#(xAxJ)9iVpV($Wd2=O8a%@aCd%TJ zsrul3qleLT&DmMh#MTm(Qd}3BCjR=hOsk03nXk?s8#tiXnUYk;Rl;HlB48* zws~zTs-~|(bSuDl$ijob!waCq4S6b;&+N8!7IOD`wT@4G6;feNEt%O9R+cQ=?k?Yn z5SKs+CEOInmz~bgtt*=Ge~9|BbytflQ(<=!C%G%sD){zAHf}6@S8GJf@7M-F65UqY zjvGHW+DH|NC$O)5mNs|inqMsEDS2n(=3MK(r3&(5H$YhT^YUI%F}HXMP`8BAf3PGC zA1v}9bvoLbg3Lo$I8?P*;1d%%+?gt+x>$6ZC?9}Qw!6y;;AaXUHvsDbVD!mU>QqyQ z72M|1onZ^r@lu@r;gXqFX;I0ZjjJ8VYN+=N?E_!kE9Cu<%@yS#7QMa*v_oYM`?{l# z!E@0oZ3P>CzJ%_!&wP0Z9<62ZG>!7EdHU@BxaF>QyT^gsbsF#g0<8hlR7OBp4?0}F zRHYJJ{))gz3fGg00s*zIydX}HaCc3^mB?*(M!=xWS~ZxW$Zc)a640{eYp@JQxcn}i zen(tN$BhVielFe9cwBTZ)-UybsQd$lnP%2 zYfNdqH^(4r_s%fwk-Q7yn z7VT#5b8-mR=ie_%++jLx$F=?)e@8h?+Gg!b#ecZ3OLjh9)YNR!IV&M^IA6t|&4@t=dCk2+qzrj!#={;`RJ}Rr%vdYCPNt2c_G=l3J4a z=DhGg$k;1MN=a3Z`k4Ko;G_Ode?q&+W(!h9N@YSB9UpbaB+aJ9#;gghC887+h%`Ay zPvdi3$w%Dzp>nIP3nNxT*0Fc(C28+66a>GPF1lQHh!FJ`O`26lmC*2I;YQ(8#lH8O z9Fh6Po1x}uA*?W;7Hq2&%aL_d!eeG#6H4ukG=mrntppJg3rlZ&8wdCEejjQJ} zeVXrMa6Lfqd3!aiU@m=<6cQah=d9E|mJLG>V`kLl4lQ_)RJgM^YujWkJfS00R*=Dv z75+>Q1zm;9W7x=~8B%ATtCsvN%p7d_@eT#Bd!t+WQR;H#c*@ZWTvI>2rA&15E z$I@CDZ9e{VprSs)nW`9;1uWE(1pM;%lMm{{$Tor~mI14-y3;ErF zsF0mXLklT1nbx>(RcUZZSgL3y6@LWFV3w+~VSgtVdPKgrJL!6=2{~wg`7O3dWFq?o zzz!@Ii1pPqtledh8yPy3iYh_WZI7&Z1`!dhYFHFuzZ-hLaG#1#K~YOcDF1E*;8@lx z2K3;gxHhu>xL6P*jHK)9>+=)~UN4_>8hGvSKnZ?f)%Z>6^rK*uhbajiDDULOY$a=g z9G3d~6ZlB`=}*%uv5bLXsk@mL7btt$;rd91Ex@srnU@v+UEnl#7;k&Ewn${DmyOj> z!lRzAA88VYz)@?feyq2y)tRYMQ&N8Z#7u9k9IpA$QUCqzDIYF6l}Z7b1Db4K|BQe> z+|uHS!9gkE<(G;UYW_@?7SEXeGy8c)|7W37U8#B%`rxoL&53<{9=j{VIsneeS zRu@M)kOdkFmZX;w2;-(-Ih2dPGJ`aIR#ZlEUHhR}5#2)S4Xc=ECGusuSPC1MmfyMe z?h*<{3#yLibm|IDr~ttNj${Mlfa3=4mU^ZxRmmbh*czsd+2Z!m=DO8y0D?mOVn1X; zxZTgUDHkl5tJ#b^ZxvxrR}IcjOEIceY&41=b6Gy#v={^UxdkaklPfUOO>ve;OYQ16 zTlVF;u9e^CoFL?QDC+zlvXp5*+!#?QO)KN!H9y6L-#De))i1uGii*8f6a)Laxw+w0 zY1lkzu6@@wfMRpJlJ$xAoq_!H`XK$YUd%g2K3mq_G+E+NbH-R6y(=|B5!EjjD!>00 z*-oX11AS&l|0Aox`5&#v3`>~+oKGSqD2yo}?CMZk- zAM&g$dm7d**VyyY2@8?vdNM}E;b;)R4CK(TwcOX+^V;(t<_L5|UtI;A8R8pYret?N z?;-d-YttRYe>5$&oI<%;7}9yD0x+r*L4*nEr9~e+@tQY zzgJVF>_yr@ifKXO&3X!YORj28ADdyAxaB7y#?A>Q$_niMKJu1+X1rerg;DRFM5Pxw zYuw3(e?*z)i$Y`7nE^<}NECwyW$DeWDew=Q+j8%^>vMaxzff!KX;nX``H$eo&D7jT zcA4ugTAi7Ho}c-=$b+cNUZd%96BPwG!`Oq$FoAPS7OQq&0YRWEzDfbIOdN3b_03Q% zm1R$N8d7m2my?rQX%FD{1%LkhS(`nbl$zSk&MuM{myCiU?%ZO(ZBmY4l{pbgAP$dv zm;)ZJ)w)AHrp!YQ#MlL4m#du`u_0_5Hr=YD4>xkt?*Dvs|K-%U8L_IXY8HGpARu*s zzy?r&JiSbXK7BrXXfV6$W{hS<%6b^xH^lu^=>5$nH{a@U6D3Hcg%Fk>wfs1oFyY9y z(+Ch)zNA#mvEKye(PBA3$XR2Fo@)bS*{XJUTs;UAtT6)^0rMhI;*wp#KWWo(Yr4ah zFl?1};Q?zuoj=-$EQ?GY;-s#j0myI}w#mwj!!~^5I2=CX00D%& z{}GBZ<7r5~T|Iza46Tl0iQ=S`&l+mA4H|!@_SH(THgmuH6Fb~%mC^nQ)BQxtY=Zt` z!rJvNxVS2nXHNEUm4aLAMD;Y+S~7D?aA=TESYq*Ckt=r57VFPE!jG-3B3`YO7lBl! z2!MMFC)8W@re$T3{rU5!CGdiMYisKat(g`}*kc35z3V~HxXHy5z+@(!!DMVqohST~ zlAVQY6jHal44h+D4i2oEa(j1@z(X3=Au6P>xI5qs4lAJx9z{4%TdH z6}hb=XMsElc&$6@NW%`F-vvDooizvmD*}2M<&b4yV0K|49U%1O==~q!y?O{JdHkR& z`V&wDJ5Rh17hd=BGJc&%(JTkVkTRocuiYs+q8QLcVD8!3neRK7`c}p+WFW&Rk_voX~w1~6<8<9 zSc=`lRA{nimF$l<520&8o$lALpf#A-0KDs|*v4JyeqI5DHLi6%I zM!sraa)+Y2-MfUb-lQq0hnvY@YiPv%d6x!h3}bI#O{7Ebje0#OCu>-<2Q@RQ|=Z+Wv!e zM?lRRW5wbG-k0I&hT#WEA%9LTLSoWYpDj0SwN&qt!_RO3obO*9rcW9sBs%W4`xf>$ zeO9C^KCM4ECyku!Z=dM8-V_8Yd|%3%nH#b74m&x1!VpXOSfkpKy*4TtJp)lV4YbN| zT`mJ^s@eDGN`^PVut9gQ9SrwBvIo0lgVaYsABZ)8bKGltJPwAvU z4{6SJYuoCb+x68g42Bc>Z;x_aT%0oMSz;N^m&l$(c&7CC&=}l*K7Sh+vi|AYbtHAL zjdV6x3?`cdKT&ax#WthH>Qb$?2c!Zxkvp^RDg;pFS<_VkJr9ual9iLxnE_$2%TpAxi-$L$qEdX(ox6({bHrJ-%w5Sd4VKxxA_%Q%2o@hJ^R;$8%Ue2#_4-DRE3Q72o!??_mG+ zfJyvU5Dla|H{$wEshJzQKe1EnSsQfZu^8ekUfxP5>qz{o+r`@8;!=S<-Dv)aGLkNn z+f-Hu?0d4NLE7YPt(dPVZ#ylJYs-h*YI1cdJJQ@#Nvaq$Cr=1EYE82}hec_4g|@_3}=4MN+0-uzTQM7*wHhsiDyzP|ery%#^t9NTVdhzc zLPcHK?=su&rV@Xmzo%%rT@?*c9=T*s6w@W6vK*jK?b? zltaIP(&NYFZ{SLxhbiIVXP%LK0taw`nKvNV_foK#4oY{NIJmOa?Fzm`${b}$}t0$+T{e%shWLmO~Cg1v*y+i!NUO zcBjSc5JVp6{x#dDJIMpv+!{OuIyL9#GnoNvIV@+}Z`+PLD10R(B&aY+vdb0`-)eJu z`ku>@e}J!>eLt;MQd*8fm|(7Z|A6NYsxhiaoteC)&)fOTQPmjlp8W&Yz(52r@co_U zBD7#%xNv*8K>m3!TDi(mui+?cW3v>y^YRe8|J1tKRCp}_mT5|Y6R{w*LWUP0=6h7c zx0+(|9da{YT!eX-Ftz>Z@D^A6tnj6kTo5+RBwSLnn z(}=2mk(`Cna0cc8DH;+QMThT81I#iq^0rV~wBQb&ZtTT-))I=7J|qr;oUZ2PK;WPFLiby?SDM)p)zUHV@`Jd=NKr1o4x}hsz?{kiVvFfJW|5So;Tui^MbYbf;~E{AzH_vO?k%NJ z?!3U;05kSoWitAmU%93hwcq)v1kOxkteJ=l}9ZE zSuB2*%?%7_-7DC~Fx(pe{;Tw0r;hv~x7_C4xFvSKXkiiSJ;8&HBHlu_wh9@}lTT1& zxMRNyK}KEcWS!y?NA`Y4#&v7$5~GWksJN__4MdxDG9+AMy*Va+q+bE7Mlx> zxP3Ft)BTC9SC*U=KbB`bONcoQYH1h#lQp|O}c%e0m&`?QkG4DUP<(j#%GfZghXM+^&cgH_HS%F4h zj+kFU^Lpr4-HO?PU+xGPAao)Rb-@vK^&@*T%U-$fv2D7tcPsD&WJMc43 zC#v<@my=>Ar}M_{16LXv8fS6l_w?LxKa8B#AZ&Tm7S^Yi1feYf3l~|7NuN=R9V4_= zAIHSF4dqVV^Lel1v?TZx#X>gJf{tBNrLrB7gkqAFf8WFXO}>Q(BcmsM&yJf-}nibcQ4K@Yc?Cx_d5Wi)gM>tf0 zzCP3WDd^2zllnErv?hGwmPp9?XP~F~;zi)G@fph4R;P|v10%$U_X_AJ?_6nhD)|@4 zx{4nMu_*ap%)PZ%`^AtbUT5{@Bj!6YfTUQvxP0fBvefhWJ6)`_r6GM{yEFu62yC`& z-`cp1m)+J~0s5;ce=*-ltspcL+BFD%B=1gaK@-oUPnY~zyPq(v;UMQym4iFrvmX={ M1x **Datatype:** Positive float < 1. | `shuffle` | Shuffle the training data points during training. Typically, for time-series forecasting, this is set to `False`.
| | **Model training parameters** -| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, this dictionary can contain any parameter from that model.
**Datatype:** Dictionary.**Datatype:** Boolean. +| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, such as `PPO` from stable_baselines3, this dictionary can contain any parameter from that model.
**Datatype:** Dictionary | `n_estimators` | The number of boosted trees to fit in regression.
**Datatype:** Integer. | `learning_rate` | Boosting learning rate during regression.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. +| | *Reinforcement Learning Parameters** +| `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. +| `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. +| `thread_count` | Number of threads to dedicate to the Reinforcement Learning training process.
**Datatype:** int. +| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. +| `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. +| `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. +| `continual_learning` | Number of threads to dedicate to the Reinforcement Learning training process.
**Datatype:** int. +| `thread_count` | If true, the agent will start new trainings from the model selected during the previous training. If false, a new agent is trained from scratch for each training.
**Datatype:** Bool. +| `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | | **Extraneous parameters** | `keras` | If your model makes use of keras (typical of Tensorflow based prediction models), activate this flag so that the model save/loading follows keras standards. Default value `false`
**Datatype:** boolean. | `conv_width` | The width of a convolutional neural network input tensor or the `ReinforcementLearningModel` `window_size`. This replaces the need for `shift` by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. Default value, 2
**Datatype:** integer. @@ -731,6 +741,93 @@ Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](#https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html)) with `min_samples` ($N$) taken as double the no. of user-defined features, and `eps` ($\varepsilon$) taken as the longest distance in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set. +## Reinforcement Learning + +Setting up and running a Reinforcement Learning model is as quick and simple as running a Regressor. Users can start training and trading live from example files using: + +```bash +freqtrade trade --freqaimodel ReinforcementLearner --strategy ReinforcementLearningExample5ac --strategy-path freqtrade/freqai/example_strats --config config_examples/config_freqai-rl.example.json +``` + +As users begin to modify the strategy and the prediction model, they will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, the user sets a `calculate_reward()` function inside their custom `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to give users the necessary building blocks to start their own models. It is inside the `calculate_reward()` where users express their creative theories about the market. For example, the user wants to reward their agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: + +```python + class MyRLEnv(Base5ActionRLEnv): + """ + User made custom environment. This class inherits from BaseEnvironment and gym.env. + Users can override any functions from those parent classes. Here is an example + of a user customized `calculate_reward()` function. + """ + + def calculate_reward(self, action): + + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick + + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and \ + action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration + + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + + return 0. + +``` + +After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple (as shown in `example_strats/ReinforcementLearningExample5ac.py`). The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy. + + +### Using Tensorboard + +Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer: + +```bash +pip3 install tensorboard +``` + +Next, the user can activate Tensorboard with the following command: + +```bash +cd freqtrade +tensorboard --logdir user_data/models/unique-id +``` + +where `unique-id` is the `identifier` set in the `freqai` configuration file. + +![tensorboard](assets/tensorboard.png) + ## Additional information ### Common pitfalls @@ -738,7 +835,7 @@ FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's we FreqAI cannot be combined with dynamic `VolumePairlists` (or any pairlist filter that adds and removes pairs dynamically). This is for performance reasons - FreqAI relies on making quick predictions/retrains. To do this effectively, it needs to download all the training data at the beginning of a dry/live instance. FreqAI stores and appends -new candles automatically for future retrains. This means that if new pairs arrive later in the dry run due to a volume pairlist, it will not have the data ready. However, FreqAI does work with the `ShufflePairlist` or a `VolumePairlist` which keeps the total pairlist constant (but reorders the pairs according to volume). +new candles automatically for future retrains. This means that if new pairs arrive later in the dry run due to a volume pairlist, it will not have the data ready. However, FreqAI does work with the `ShuffleFilter` or a `VolumePairlist` which keeps the total pairlist constant (but reorders the pairs according to volume). ## Credits From 67cddae756833dc4716fe0d08defc5b487384bb8 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 28 Aug 2022 21:00:26 +0200 Subject: [PATCH 067/232] fix tensorboard image --- docs/assets/tensorboard.jpg | Bin 0 -> 370209 bytes docs/assets/tensorboard.png | Bin 9273 -> 0 bytes docs/freqai.md | 2 +- 3 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 docs/assets/tensorboard.jpg delete mode 100644 docs/assets/tensorboard.png diff --git a/docs/assets/tensorboard.jpg b/docs/assets/tensorboard.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2aefa869dc475aa007fb4f1fd4dd52cd0b9cbaab GIT binary patch literal 370209 zcmeFZ1z42Z+AutbfFdCZC~W~sigberh=_=EOGyk3Lzg1bAt)f-ATdLCs7Of+Jv7oW zgfN5*;r9%RviEkM_nh~f_rI?1{l7KOHP7mGuejH`YdzlvzI_Iry(c9j1;WAtfv|ud z=-V(z0)&0y1m*$UN#Mabg@beQBo00v9_}dud;)?q_-D=#5}i9sNJK((=FD00vn1z9 z$;ikEh%a0uC%t%%l#CQp2^Ka`<|NK(9Gugngl7mz|7G{>1Bm1l9{AKcHr5r;2@))9 z60C0@K{No6C$O;q2Rd>maj8n-uu?6ry?6oDT zYi7@`Xq{S2;M~0VA8s0pjiIRz9aT1X9~vgprzc55a4^alzM#a1Jp6da<^77I}&wzxTg1Q`!s zJnuR)Dn0jPEeo;LK&|~4WQpKa7JA=funZG$-;uGt2$~jleL1ddr{vowWVJByWjxgj zdpuOrAlKHi2wuaUI3&CjZE0p7+e(NFZA24ID6%*g*LKaOsJ_&FJdTc6E-iiTcBN-S7OR=#`Y~vn2wSM3S>_ZV$tYZW& zIoW~8>$H^>o571XPZcT6ucxKd_AfKDSS0k9M6W#wB#lT_?`fqD0Ie_n3Q zS=NB@im7gS4R4--t_X)GtS)ad5!KOoSFU_Ou*O3~Ue?arZ`Hbt`B$rbvF%9BvaABj zpy*j*G)#ogBr2;rla{{w2Xo14mgjP5$Uz&-2wVRK;tHwHw6nLUE=p3Cy%7B}MRi-P zCFOO-_LvBgMr*KCw!^)YM~>H`htKs2K2;b{Ed@v6n6OmgriZaqfV1vB%9)>YO@l2q zzCkRZ0LFWgI-A(olDFi3)3l1b%DsN@f?iH(F?4(DNuEKgW9mvwI_ry32~WxKld)gP zX{uxTKKgxGrP3bc&dxcd|E!OPbXdf~P|{FjX4|N`-c!a(p^vt;<;$|Ce*EYCj07h0 zclJr1MUKEqP3Fwf^eFfBmtS24(%Djp%c4BDGkcKo$7 z#Yeu-QS!N1@oYU6`M_|iZPo7xs6fCV)1!Pj?E|N<+0)|n_82^v!AHl#s0xMxYlS9m`mnAG4Ms9 ztjk&Ig$1(o-SxR;>o)VHPJPYYNFfjm9-qF*8t)&VhgM)|>JFPHhm)qGN@eHRNRmt3Y z61pMZKo-+X=JxIyH?CiyUPx$tU~UKa_iqy?(XLpcl^QgKKLi&v_jJ$3x>R^eE-!UCL&+r5x&H{$Jw$afB%QLDF8` z8@{WHvZ3>x0YM2tytnqIV89C zb})*Kw_COZ*Y-08Dy&QscV0ib)Ndfc1FyX{kAN~CuDR~VYUF3<9?^QVhgjI`)b*qn zdpwuyb!?{|Hb4%w^qXuVQx0Mh(iSR8YJxhq;vZ2|-S*9nzYUWMv#>3`@nY`n^b5=0 zJoEWtu8R*sY2}%D)#HXC>{slBqbq7N`q~4g5*m1OLgYQPm$W7~+HK%lSFA==1_mb< zN!mY5C909TGV+Yexb(Zjp|jOVk`(%x>87p6!5yxV(^_C(}Nd^$Hu zW`bX6JQE>G&WM?A-sN}^l{`OJGX{NCuUypmGS6L1#PI?B@B^+mZdWSLOxC4W3HK~~ zmz{?Wp(e8H@|~mdSm9%Lo)KKgwI@!A)f|^fm+qG4D;at5w9Yrgs=Mi?Z+~O3ZW)q6 zZMV4R?r6Q3+OpA;#I6^0nVW=seaprxnb)f3u_c2$n*<9+>Ti!r1v6Sx*UC%x;kQt| zFp#RR3#M!}rf%G}p=<#RfQ>{{y6N<3 z*QD8X$F(F&PG3>q9^7>0PTw2zks1@SwI5>s`Ju+-#75yQd;_&nM0oqNd8SM>c$ z6VaRYOEc{B`w9RQX2Ih@TI;GyT8cl;Y&xsn*@ z>N%~dmfn$k2v1GE*YC#YKOJ{8QBluNvx%mB045597#{ZKYM*Et;S)xE74j-qz`P^H zOHh)y72=L=Fw_LlAT+f=HhwV+9Y- zC0u8h7mNqPmY4M;qw*JPG+%GtE(&)jH)imT9P!`ICK8@1YT{SvD_5h7uXOL5NblWPJRh&< z7-JW94`~o-xG2&=B3jlT*P5m#&xd!rdVt6?&0+qd1yxofw?;y|m?7FG)a%z zb~VS1zY%3a;NP@6TWxCr%T$PQ2t}U@n=YyxkMLi0wT}pGSgKA!c7s{ofp*Smo!I!# zu0}G^7*Ux6^R{Xh8Jm4l5(N-8JQ=n#@S<|yxw_bIo5E{8|M@}^ki;?l+w}idB))um zU0NN_tPVLkat_UKIr;Bv5fMuS)qpN~c<7-$;&vNs<3kY~a||;>PMP4pFZ3^@kgApp zxFpIr#xdC}EN$E$X-$3O1tLw1t-E_**;ajHlNG*kU*a~VLR^B+zpMBA0AZubB<2%| zl+P3rcZ_*Ty73v?ZqENM`@bA#Ivj|-FHdk^#;DXpAsA*n zn=hB?Iqm*18Uqf#Z2n`;iq+d7(uP)e5+(+OJ$?Gina)G+f9&s(hvoj&ovK@3-m{Kx z+?AMn2O{J(NZ{v&i%}OEOH&f+KB6QR+{ej|8Gk^eY|w>wt#(|>XU>zlw$+1Dh&C8X zEx*Alx^M}Ahe)rBpCiXX$L_9Wv?+UN;cBs>b$csY@U~RT#o(%*Rr-05LCcp+vvMN; zSWJg@aP9R+bzepL(Wjg?d8Tpcf4he<{KD*bw`Z&!`x$px$9g0~bSZd)IyHDLeaPriIoX4Dso7_269@aum>gE+PSB_dfTe4qlCih=Fk2`?1ussp3^f!CmQDy(? zyD5SK4<)(~+D;MWBa#8YeF}j4Z{05lx(P=^f&j(`<0z$ozGSi;zP`u&@&D5Kp;`&U zA92`sd9DAL{Fl%nOwtRU08B4VTZtx5(h2<~&(8=E_pgSp$sb}r8g}N;UDch(y&;5F z2P4JD1G8#{$aKnvK9JE!V)eZhGQ-_fknF!Nh`5Yw66`}(M}-ya3i^(yKj55Yi?;uC zMz))q;7lQ_aC!2`%{{@R74@?nz=Qotzl-<8x=2wO8K?71L5)QEcsp+8lIn7A%#Lw6 zs=~0X8kKT(*O6vP0yQ@;B6N2r$wFKyk5AisRq(yg|)_h!Z_1=p|oa)>Gb6qNE42f(%2m!h?jX zUn7~5WJ)+5Y2Rgf(n9&7rWc2e#>-|b(1nY@oj~7H#}g2bP`*6gw=V(<{9~Dac#jYTo=inL zk%`O&tr zZ@r*?_t#oMB-O1K`Q^1KYdc8_r!Tt6kRUC+iS&G;Z^NVSv_?N#j3*U;8U1)SS`T$e zdNW$0{qWfSO+80ZhsZC#VAaka<$K!7Gx-^v0I*=)s1?oNceFnTelDr)6(Wx%FUyOi zk!s3{QS20mku#jDpco&@DtpQ7xS0<8!Bm0KTx0 zYgZdt6h1z;xqN1UgZi%YIkIk}@~XhFs`Fl?o8 zU%x@~Es|}|JLZA&skJu+QvUH4Bm+aYVPiJ>A&rK|Y79}k>VA!x-dO$mSB~Nhc|!mq zI*MccKE6Cw{jKVNLF5AlQCbvU9jskUs5|zFf2BS@#Ia~Bn9i`6QaXNG1uWQR7rb$5 z_sw6a_FtPmrhiyabC=mhy-+$WWZn>R;Yus+VjpgQ-;M3iEY*)ux;FY;uTIr*yk~nE z+dzNuaeiikjMO=W7iXB%)f@a3i6u)Q!45+MUO13o8-uB3@^BsIpxGAnr83ns#IGV4 zXLbzo`;(Vk@88J8(A$KFrNKsXDXvq_0qzg zE$u;{X{M!cZvc{VJ~M)j`gFyXXq1}&o zz7eFN*{#_^cE4G@Rf@}YdS`0l+=oK)LUYR6;1q=JXCz&mOS)Ck{;9@|Iw7EL$#&}R zhzr(aD>hF823$++a1!-FlnyNlEEfiI%!|XgM=`<9T!dpWR5+V?&Xe^r}6)cAb7 zVv)B<%eyL~p)sN}+`w|t2c2bn$ACu7XBFil(ER+4mi@dB64d!QrlBQR-Xhq1zlPYL zCCGaxbE9lFuD$*vqOP+5FYZ_3ai77hn;6-_TqlT5Wbf9!!0VVq0_v$uH@*g#LWmL% zNrwLL^eH>3Oi#svu!uWMyF*h~Ond@b8zD2X9dBv|AKjHx(fJ@pId+EN=B5Ti#?14W z>$w~QVG$vIgHhsE-_Kuo(mT2IvJWg>2h3>jENq3DcyDl4D@C&#VtPwvn8s*G*n@B7a9 zkVzWF-O!I>s9#c4Wl?K)sHgtdWl6LO$U3b)U)-19 z01K7n+fuoRRE1box(@6xOm{FxUa2Ci{p~#Ew%3n88uz??N5mYMfD=r@GgtiYraZd- zDuDhko?DA(X!_*7m1XJPH_*^Bm{mO>4&-tS1KE;oc4_a zd1Y*aCj(cTibNCDqG2U@QMCom2YJQT+DKvBwPN-}4~WXUd|3XEy^CDw(L)RzSb64^ zU4=i8>Tq)--Fn?EX?&uwlHYwAu6a!*h2L#q_W`Vw$JA-ZEo>u8Z4X(pQj@=+-ZZBbgr6y}wW}SKIlpEa! z0~=J}jEF*rhsVI$O5p$nAg+jUGLTI}RN>SxzELoGgr)$nYwXfR&d;1)KZXj}elfII zeBGS`v$m10yv(|T=OLb4KgO3El^Z!w@QKJ4v8mD8tXx}`y=;C zD>0%RAERh2)edxHhH1R?h(YXTz_7vHUID^4DSfM@jrgvgXvVGd+8B1%R>eR+=$RjI z(J|+~NOjdLc-=~(t(%X)0WjgKeuUgSC4D;4MuGA;f<-EM(KyQ`lSR=Mp{QXR-^V?A zfj&r^tn~sf#P%Z?)lYw4Hb_HOK2k+5V2FGOtV!t5K;753OK4xc87abQA6cV6#-wZK z{?b|tywq=taDmnKSS(u)I`6fTS3q}+z9c1Xy~Cj2?mP=Q0~lEA%g=#{#q#Fs&U<_C z#c8O~T$s1^>{Go9@ zhd~Kh9ZaXQyw$5!T2x|&Ud?@Ie`6PHdO`G7(q=gNAk_syu8~`0yCh`wc6~*YEr(aN zY%#Y0s=7-1y>88|>ep7enlfYo71X5yQti6ev;tj5iouk%5lGb7`;7f?+yd}>BTu5h zdx^2y`#2v3w&mFs8y z;}MmN*G?CooQ1+EHnk13ukl$dvTnTM4PdV$Sxbzgl{SLa#Ts}EM@Upawu!Ocn&w$0@X~nO+m+XH?qT=F%ok+u0-yh&3T;rMkwyJ3kq37u$pO=w#X3 zlrrZpj%{(^tX zJm$#`cy3Ag6Y_OP&11D3pMY`}$j+oT={|i6U4u=j;#@W>L-!mT^XuIPrg8O>ZcQ>f zr=l;TiL8(VR79V4V&Z_U%6@PC1=_Wx?D0xjgY~+J#d_TYwVW5!ed>mRda73=0kb~+ z&uBHwA^}nMx8_l?!&75i7C3U>a$E5z?|g*4aR(P$o%fg{u~t(x;86}bC(YEVo;#ht zxY^G=Qq#|Hp7epK#9#wEc!-4~_I$9)h=bam?wqKnyl=;WfvM|OYG3pQ%k@Q0z4o+H z0cf8L6rCtNBdkGN$4zc8OiguZ>?W`UH;pGeHY&IzZw4*MZ8Yz^xtb396lt><06%HB zA!jFlO@SpRmpAm@yvue+_Hryd7gDC=Le#olS!5P}PM0vr7=X9` z66Q=%d#wUdLCd}f)$5lFuT=G39ar=3>*pSBcrlhTE~CsdLQB*pJXTvTFqc)9wVYE% z(yBc*-aeJMO0Lc8w!0QhPPTS__G(LJfVN%JM=*aU%1GAuxo+Rkimc zU|2~;cR_Mr@=D1JrzvHTDJ27VZ%9_|ynRz?9A~E8K~IBKmaCV~c0e6YMJoM-nh_z|SC_@oN-x9sHL?6s)mC z(rZ`RbPqshnNx#^YQ6%Iav8j`%abXxwnU9|nTuB*l|Jbbvl~pDWw$&IduF~!MMS0E z%$mApAn_QZ==7j9H!(lej~*=z@90}8N!5>%G?DRx@lcMuu4-T!)-LYW_SEXx^(5>K zg^Vqku$o=#Z6R-d7NSd=WH>B)gLJ*%&G==v#&MSGnGyMq1LFNFO|=UhV=q%?r}MaG0lKdMg0L8p z(vfd9s)SgAQQ1}L6^Vmi(b^7y7x)&Tn%x!XX(CPhpaDBm)=+rfRxMmRn>rA%Q|CQk2v+WEmNRVn69jnBX=n#mYiTkP)6aPvrj&UU#FmR2pLu$ zww{p4ZrHXEua<9O2<`n)lvmO(Pi!%noA0gfZm=oO~T`Am^hO=d=?sTZNGK zdF3&^!_~lw*x;kH4M4)7=Tur*p&YhpE;R1)dCPv3QaM9Mr`-~eK1uR&sh1X4$|__jZ!y7S zOcU#C19`KS9baR!I&elQ5ASTIUrgestMh9TVx5mKV#AGSd;pg49N5lS_mTd^HvmjAEv0C0q*KOazwqp|1y<<2 zrK$!Fg8T0SPany5fw?%O5y1O5c2YTQ4DW1z19f4+e@7(zn6@Kv9VXb65{ptnTb%Md zoyVYFa*Qx|#t5gF4!{fQ+Uouy(ba%H>B@}%W@pU^TKd#V!74YoTKU)ZN4`mpzK1BK z6q@2LaoaOqmS$f(>oDA{-ML{6_OQ1^>AHrgJAVZj0nT1cWf`*k5RHZW%mQs=WpHh1 zqKE0VGx)n$%3y_Rm3$U(apXtPl}`cp09A9tHB*aDySSQ@Cd1A82%cb*0d-s+d4{s9 zLKj_1$?uGm58?`g4U@x}wM?xccCkc8BYAIcwgmHJFG|wGg&I*CEu#_7kSD1$knFEDRl9*1c*1cyDk*T5)aYh*aaV(v|Q+UYMR zl2wmr3@0z1j6HB9-ELwB$OUfaxJY$G1}eHq9@T0tmw}7{XIj{q%qiF=GRmRa@;ZF! z-l$!Ol@}hDV|Sj=78^8J&xx=up6Q#@6aN!aeI!-}rV;z$u>Eaq65>&>MSfK~rYM~0qT{Bk>Ei+vu~d4EMheH!(-5N82P9;0Y za~1l`kymx&w;U*!GXv#}pwNMwvcNgfMd}LylVzR@ZrG7c6=qe4^Q-Ai1DD69xLYsC zxwBW)HbW3!(gGJ7s4IQo94!XQH5wCT6Zrs*j*>B9+|OsuE;aMN(hhJ5(l>wxeh}&a z)ZaO^B<(y$maSvlKj3=*G+Z}sVS5$)86cYyK=a~j?D zS|-_Ijp0SM0C%X~tsQN3me9@s!4;*qIoH1?;=Zoj5_(m}aJnhN$?GxP3()->xu3WN z@=TCA1B`*9iDZqIBEV8&rH-0mJEX&qzv)K*!e)BIY1Bx(X7tjM40 zI=UA(%dX*PQ&Z*CXlLPCe&@>AQO!Tze>}gwOZyI~U~vTi(XJ_&T9qsp4}LNBB^}6K z$uSJ+S%nwKoxo5Q5>7d};9M~fH0qobPpAQ|1COd&zXk0Z!34)jV$%67AGO(ON--;}Q?_hI0+Ftl}Yhjh0q>cI~ z$%{_DM$;v=zNK>rHuLN0S4u;a?+m$F7zNH=oUPc#W(m93&kl(<93jzcxIuakVaZ)X zKeYe8rTS69L{rcN9Gj}j2D=f}b%KGSQ%yR~WA&mXub4_=L{=4bm3zemi(Hz@8aLtU;5Vqki zUVpe#86tNh()PSl!9W+BA^CLO_&rC!G@>@!?ewV9a%a_ z6?nrokHd;;%@^>!yNQ`4?TwWK0?d=$<^s_0j)5DNWkDB@B)eK_jS+zYZtdhM%5FUw zU#jl0E$h9j>gC)e+vcvW%CZ}yichte8^yJ}IY0SGyAxTHLBVHJYQAPu!N!_@a3n$FV{d~7m59rK>YcF2cK<`eXAvN1pPRMrpK5yI*!`AnyQ?ePYl`~G6 z=LSw!4~Ir*Ph(Zn#J&#v+=1?j)@rH47tIj$cRT4TmG*YJ>K~(6u!4X+An@$rhT-AP zfp30#w+ZjnX3XG~ZkIR3;_c(A87oTE{gm5EQ#1+u^`*(MCCb_G@rej;fyvkt&M5Uk z?h%W^;7R?~mI)-Hdh>yu>Vlw@TKI}vAgipjooicf6e4E{G(*sOrxr0m5>v}mO2L&>5eU$vC%GAywDI(u7+T4_x(uC z-&8By%Wss&)=HQ5g|&G9!VY@)Noz8yy`wZ`8|AxL*Xo{{lPI;UyypmaS?ywyQAw~c zfT{XKVP#J+-vN;eLE;Zvk#n8D7BC#Y;>ihPUK)8(+XzcG8!CI;XqEGNQ&khc9AMLf zwCH33{h*wkB_+MiK3}K1rB1M@Ykf2fu}x@FtXALX#Av&>+RM>bE7qIh_-Mna(lUod zP{qH{em|zKH?B_=C(r_OTxyRjIP|c|5?3y36>uI2+kS|h3C0z-10suzGJUgM8mg6B zN%H!a$T~OI$GVz!L%Dry8^&d{p(`%^;X?)`YsL0Fw5;4?84;$LPk`J?aUO_tgzl%A zecuH6CL0KJ#daMbT@hGK=4npQ)WXX!1dtC7%;tYlxfm~qw!euDs7>F{?Ia zgXiYNu!`H#ikTcVb{Y)Pdy@UBC7-XE6dHg8S5K^FjNpZO5OU%uzt^lbi+%gn63y0O zGMtcny}cMoC6lH**&Upau`*OI!@nc4&u=0e7-^-d~V7B1dgSxDDYwi7XHP; zq~(k}Rk`lmMYiJLadlYn9F5h=gAlFPDlYWOE9qCJ(l^)bEb8Ta5ZbUU{}EBRD{YTK zQyDb7JwB9C8f2t>PUkh=leZ(ztG5H+0#c$uMSVPYN(yOt{^pXtrP!6-wIx`u!-|Zt zLZ>6OGan!l0OuM+;<|Y{2I#`;xFT^2iUwqePp?2CYpBP;x#05MrC-P9cSbLmK4_6* z>-iqKnr5gXAP~?9JscOCLpsNvv0#yc9M|k%=o!CD?SY9-)sMy=*7p?zPoBuAb-U=Y zlV~$B+7$CnaDlR;J$Hev9ffqtw#a7Ph@?0FYSdqsZ!O}13~8ag6IQ(3q&>u{nqdQN z#WTBB?yd|=s1CO^Xesr&Y5dOk7nTt?bA);Fctq?CzA4X->hcu3BcgSLzj%4x+O#vb z&pFU2Y4z-ZK&#bfYGbYGMEpD%f*m9~`-{ejIT-wG-TlflLu2~enDWGa)_a__03tCo z7SGK?-J`aFa)Mx>hM>G?&(5D zZ3XXXPwDoe`ylXZgJ^~V#d$|M-NbEw{%sH50lu-qtwCRRN)8}4pOIvB~!%L3!zYbt;!E<_EGvfxP8NLIfD)=lb9CIbMHJ4f4XgbZ`1 z^_PU$%N!rrAjr8tq4ZSG05VyqYV09O2isN;D1T6Gez_emu3LaeUK}NBpDa)%xalF` z3lm@76U?(y(_)U_h!kijElaD)uBcYC*o@Vt*Yl;LNQvs$^(AW$s4x&WkY&i!-ifl` z+ZidhG}klm8I?1gq!pT+qgT0)1lr}hGQs-p=b(NkVGfNTEm`O{JUg+7BSAPg-$35$ zRRvP~oVg+Qkyj%Gnx{Joj|E0|b?0Xv|E%PP=Z_~O)_P^L8E;7y1)xR(B=6S!jvlIg zO^W@H{U!_Mx24%04~B$8iVP!Zz^^u9@Iw^}HNNJTpw!H^H2egLSTq~5P-A72CgnQF z-W|9!&uHF4d1;7nXiW^Pb0yR+9SQ}MR0RbXoVB$+t7$vAOO<2cOZ86Ktbn{aGUxW#*Gw|F`p|=+$7A%8 zH4mz6EWd$vzJcn#=IVKIaj0}kWq$C zF1@a3^sy-JrN}fZnO$IxMo5c#ZtrYQhaGr(cx?JYtaY!|ZnwGEE8@7PV+8x_*+Vbx z!fKWG8tv4;cA=4*D!Blr{9?xmq*SjDw09*L@|MO{5Q2^RgI|Gw3_oa(?;`snnHDDc ziYzML28rMrYAFg@#JO`LKN$%NbkDR?gOH5Nggjf1m9c4_Eq;yI&mN0hZ1k1s(cfe{ zQ{$!X{FzLp#A!Ihp+mI>F}4&}mDOqAe;_qqJlgL&V(H8=y)iEEJ8GnL!)i{ey+vn~ zHgNmq?E2?lWB(`LA5p;c6#48Zk>yp{l0YY=iE3jFKG!?vPHeQxSMgNj`MTq=fG=wl zl9uP~8fgH%rjx?_tt$AW200;_HgSW16fjz27bBg$@BP~%6T}9ImCwS zrBYy(+b^ETG+4!otFHGp)^Q4T7cIYx8F1{pkExQZsL+K41R}YyC5js;lRriZCh*Z& zm2pr-?!GA2IId5Yv?qM_UBRrn#Qh$_Kv&l3LC;`RzZjfV3z+ds&K%w>wTxr{FiqXp zk%+cBuC>_a0hp+|z_I&L|2T5Z-u58ktplx~$LT@-8hdT-QE$~T0dVUD3t>QSt+E7( z?s&?8RD;#cSN1P|LW6VALg2Wo)3Fhm&j67?{_tPbk)Dzs)@>ti$#_%uXD_Y41BRH; zdSGbp*@VP>%f&831kY8#`t#53m`Vhb#N8gMHSq56(EECa--->*z=i%g|{xxBglE^O|zvv+)R9#7I# z`K=E+lITh6wpFU0ht?v8PmjxL)>lCvXkI*bb9Mn%K=tuN>8C1+;8vHqE6}-|TW03AkGq+ZPOI^|Q<^p$;$;~x2;)$|X=>dex^l&90fzk4Wr z0FSEMbKM-tW1hR)fYpymAic@1fjC#8N+A2|`aU0m4=oxjIkmUh`%(Z;O4HU4HZn)U z+ZdC#5V~`!?w|JbTQHT38>^Idw`g!kwo>wM(b6Lz7X@9;l^|3vgg`W)%)$E_%bD%7 zqQTc%S9n+^_cexP4;vwP*^DkNYOpG?)d_wY-$8zTw0LWWsV4GXm(eqZTLZsF=Frj# zfmTH67*AS-z-0GvsT1!1O8&F&7&C!MdMX4GO8{Y=Z~~qB2D0pL^qz2z|H_N6AW&~nPj8e53;)_+5sD}Q zgyT*-n0A5Aa-)9QG)8>T&UP&A2N>9QM>ntPcQ0AWdD=`2zw89e?xihrn3=B(p52Dh z7tsJ4R-Z$j5?q~9-PdBqxuX~adN}UPW$Lu|W@%GcZTXjg20>j}%rPEO_SL*^=#r{z6H-SU_9_9j!m3-r!gsAP9+oFs#m!$8K zbf2_;`fqvw%jutIb$_YWx%y^7Tz+k>kvTePe=bfRxtJ!NLMokwDtVly+=Tewl;19iqzf#fhNc` zaXC+`h%B85fy8t4dHuN*Cj3VTf0yH5k2`*-$wNcs_i?Dl0C4`!{pnEqyKMhb{GkOW zY$J2F8vb#dMkEnU!0+bw_cfrlNfMNe!~2w2m2dz5`p*X8E_{Hwt5(ms=J4boal1UU z*$cGpBDF4iCz8aF*6VN#Y287j2WLtwo*tbqhp}{*+q`tXXE+T12mVeZqB7?bJ6gVe z8SgpRrgT0Fa^B$5)`t|VvQS8A$XfYqa#7%-Bu(>ZOIOr;+j+DN&y6+uP9O~tdgQ42 z8FJpr2VZTMOEV|*N=&K-Kk;hVgk&RI9M5YNaaw4+*f4XN+~qN&s8o0Nd=}PHX`w`Eeo4TJ`S=6m#SH}zmYIXF4cEEX%56wqe|Bd%`t1P%9cmGud z1NNmI=!O~y^s`^VHPWkkurSa`)2j*=_5TwOAigCIV|>RhhQ~7KF!9lm^@}Z?u$PUz zcAD)n`^NRTjs3^}M%Rvt0Y8Ld!A z(0pH89x`Nqfhk8&_LYl3j$DGdrCkbLl9pxhO2<`Oi0XyKhLJy=@* zaHrf%vi2M3t)($zSwW1QOf1i|Efp2}i_xa-3SZHot$4MXY2D=F$ymA77@NFFq=xeq zwwhupolJ$2*RDpw?|_wsLjI3j?Ziff&VK`S-1z!sxc6X#^6MAS31Z&G4nYx@`Ma5O z72BndGx+Ydb#}ID56921KMw;|$DiGw*_%J7|98TNgP*&%vPL2p?A3$26`Q?7+gpl zCVFg#ompTgl5ZpoWV*`nyzMb(wG-!WVdZcUF0fO~tnif+=GYgmHqBT>rBSugrS{+| zc75Jznr>U$*jN|77?kTt0E=#eiQEtJBuM&gO-GT3Jhsg~%}zsCQ=_}|Dn}?Sz?dG$ z@9P0y&~YYo&mSLsh_DoOQ`T}kWyc>Hw+ZQYKX+$uBrx}OPvQI%{9R#BNM?R=;5|HP= zkzpB$$JSGdyvpF+oke`>@#Z78Wq9SOqoFwd_HZsQX@2Os!_`9G$f0gbq>j{SD2bh| zfo;~Wj}1m7#g!TKI}{bH=dFs1*uuNCiV-lEPi^MRdLiQ%-r-_5mp<@QLLLgA`UJe?z?)Y|X6df40vjl^TNg%cm zWvGzQz$Vx7Na{>RRfYv=g`TxUiD#`5jZ0VW+V>MSK62SXGw({}TcFgS`Z7ho1spl@p61jFh*W$qr8Q!R4cx(5T#|uoqon)T=4VEAL_+Rd(s#LQN z4(UpJN-Xv>`!UZEBgikR*7GUExkxFN+ajY963lDccW=PeBSW}-L+M??mbOX77N>Qu z$8sjou%Q$qFus+PdG@R6+Kv*>?v7VTjo39;d1ZE?abyf&<0|KJ=iCJ@9c$&ti~Y>} z98+7Dkpznif@;Z^w6FxqX;O7isJ0W~R}va{TDACHF?SpJwp2s~YP5M_PdeohrEy?h z+uQ|?J(|SmrAmM4DARTDf%!@>G3{NSGLtUe2%x8b?+&@*Il8X#IBn{un~93M9}Sv- z4@`K9M&!5?Bk`uUamJBnM4M}LGpb4w*@Q9?-0xdVd$mx>?TzP9VHjOz8ruLsT3nZ% zd{qftAh09stWs@clT#XsYqDY2sxs$MNnRQz_U(_YI0WKoW5vpm7!?CzXobC1hfc&y z0~fRJ_*fri`OTk*ok$EbU}Ob|BH-e4o&C`C0B{%x-?{MJZ!-TO>gT5WAVdV+^mc!e zbIJ>ZPuDu0<;mUY8i1tj3Qeq0)(2=mOYKlnpRjx0bcPrJV!*w*0 z#~xwgmdG?C2V{V~e4D7uhrQYq*VmRBEM|1F&4J5FceS3(jh<}HSlY-`u7{fAF(IjU zIj+udztjuWX@qFYv+kUK5Z_H?hhJ70H`T^2ZGu{|AsTN%AA)~OD}}YrPcLhTG0C0} zeyz@exo-P`_3)l9Es5FT_1Z6iN|@`l5+Broo3WF(q3ZO9*LM{$*LV5CvSSX-xqXzn zfk5ctCUTBA~J+znjM=+gE?6VjN@P6HJj$cSM^>*(?>K8F*g5&XAln7g$T z^r|=i3ITr0wKNO_%5s?7zF%7c2JZlw0q}y=!{Lr(Mk~=f^f!v`~20h z%>GH3)(+kGb;sUS7Z(q#yHMrbcgBstXIw;I)#=$}TWNMY-Xyb1p02U$*zeJ;=vS}7 z^Pbj}nvib+!v%6fZBcmA1jLGWbi+U1b(oqlO7dU4n^S~lnw=wilpe%gb^?08VEx*A zn2r&NunZXw#S7}XJL=-ZO9jcFMxM(&#sF|^&;bnhk!%othpjfa`E1sn;@uCM@L$Rc zTo#Hi=Rr7bU(dsnjwwbknzvjrl4)rXnc(R4te4bZzl=T#Px~4b@4jv}8u$zqv|-t! zr54C8^uyTbtNX*@b7T}}xcHwXOvaT1U#;iCP=7sLTrZAOEb=iVi_WAsHoE@pTE))m_NpwKJ)>+B;W?Z(k#WRQ@GVAxlC zof_@=n-(}e=P*|YbAOm@V9SpPQ9{U68jW|)4>G!RZr77Z28+7@-?_rCQxl28eDo^N zK;g0RV~PX4ua6TIg@8|BA@0Fg2XGy*5-OFQEiqrjI?v2-P7R2l{E+evNS|5@+PPJz z7ym5B!tPE{aB1XPs&#FGj4;nws}l++3@a{sOtCmylt-%-ZWx%@<3Q}L#=zK9I8Vpa))lJC%*vy*`}QRG0rJ~&xwo3w$YWC$|LiD zx-~>DzhIS5o|oiJ*wT4BZ5|Kl+j8;Quv3fXH|H#O6yqf?cuS*@w`lxOVNqJ9A=?gB zwm%d5-eGo^Kfjpn$#$UJtns`M$&NxFz0DguMfe8+nX=jG3(}JBlfk|^t)DgxD&cc6 z;=?nxnO$x%ZJeYl#p?2#Z(l}X8Fkt-nblF2fs*zfz4iU|Gtvf;nvC z&?t-6|1+=>chMaCTS90Y;Rb$d&<&A2wDxzEBS7n(0!&4YYqGIUnBU06T)_sG60oUL znzL6lg_h@T2g@0~%beXF=FqmfXgzqS3qQ<+HJLrtI3x~eDiPMdF?NirlRB`qx4qCW zth=(7co76rS?|bfaK8{VZj6K!t?vzNxKCQ@_ycx(wZIGOjA#vK~IlYc4Y`+^X5vXx~W&pV{W%O#v;L0mhPnl+a13eL>RSnkBb=?z$4;miG%AJjnD%q0nHZwW8_XlcQ$|U^EEHb*c`e;ZO4!Ky9@)6j%N+{49ElN!O=b; z^&#z&I8BFee-$(TL zR@~q%a^mqtJBi`Ds$`{V@UGwl_eZPbE;Wzj{ttU^0an%4H40;)f`~Md(hZ7qN;gP1 zDBazubR*p$-5}klbT@1o>D(Y48}P1;dCq&z_n-5=-+%9S?{io@YmYf*t}$!OF~^uQ z3Ga1-(bRH#!fbok**B%t35yrAtjd|P?N7;ONk%(u`|D^!i-mY*C6%L~7HL{Aj~q*i z`YMUAzgd|*iioG)H#TCG$SgJ?{ChJ4ruL^#)_=1vG~|wstx8wdp(JLk2Vn|^kjvgpnnS(E02MF_H)zdTCtIk zq&0T4m9_DrF~I&wTS{5Lj4`>ss7ZpgxKa!LO*apOb-DTBk<{yYO|lOu8SgDvi`8us zXwm?CWIKR($1oj0%O87mogG#2z!^2{d~Cb=X1p#~#;7VV-qy_#lg+>n%;TxHc>7*l7s{yAj{&&9brPM^kR9tYP1}(gcC3TtN>Z5PZ)G zP4=syHIS%Q<-chEvAPAHudFSZ2XpFg&%X}@wynoSp$jIMYx%9rq&0V@Q4f8_vU8Y@ zs@BF+7(5;1reNc0@Kp}uSQQPh>a8t|u^)zI^)U%HJeb02>w`wTzL5M0koEkSxU)u> z1BSOtsxByr`o{ZhkK7TAK({^udMlILs|CnrGQ$AKH2~i+;213kzELX1uZw6a{zE8uZ-*ILG4%O0btP{ zP*ZJv(>%FSjK6+!TvqDY1M5P8L(a=d4ySq9kG?s2<5{Z!b2JHg1~iTDhok($ru~7& z*eds6O5*Nd#~kKiN}bOvH4LWpOUh@^QapI1%ui`b$VX*=kFnifW3$uJs+F16v3wvw zysIBJyQIH}NPQCn*16%S9%B#&Yk+B!&l8F0?Ew(Uhj^WdL(X^5L~gZmegrD<`&tLU z0MoIYn(QXm^AvEC)lkVhBSgHi>_s9@l3!4Bk59y0h+-E^NBGy4E_rmu3A^-?65tf=7E;YuJ1^=dAGDbAh9?b!shl(4EM z-iuH{fkTfLz+itTS`+T!oC(~r-}lD)`=;)PV!Gtx;}rUE$b8KiEm+r%UCq`Wb(mfP zs`~`{Q4Rgq_EOxh1e`(eBlRtX@n2Mkzw-LuNZ!_1b!;%#3UlMW35hV2Sik@KJ@FLA z5(qeH67}TY!L^0p{=QlnM4>^JQI6ouC74Z)wzGEeG!PbA$?in}nP}>N%LKyqH4}$kKS0347x!t9!VZ2Y zV(C0n4-Ba2{e?mN_o{lR^n3bRA@)A3~+i>!ujsP1AW5>W{7s5ll#BnUs*CmYDO`VdosAqA5XrH^={7cc-2(1E? zb>1c`F>a=_^G?Ad$JWU866X_P1an*5c@E#ABv7Y5E>);%_^o&REp7#KPq43=-t{z5 zyfsE3Cb=q0eo_PhSiGK6yS=(%2TtWHBOd9OOsS~G7jaV4Mj-0XDa*~)aMES=Pv;ft zE#D78bwuh9*B1G8iTC3qs=vamBNAguGd+CS9mc@T9aqrs^b?ON`PJeuQ~JAur>$_5x!gqBvHNQ`V-Py*DLoLUGA<905niv zWLqfPB{OxA{jfCD9+-1Ft`lHN#~dZFN$H$VCIk%WwxJRZclL-<{%}+RN`rRD&d8bs z)$Ue+Ft3w90gZRiu~>btd!COS{Q4IK)o&xcy!t{oW%s@KWXpod(WCokUCRMQtD4Cr zP%9VQD5kpeICVN~r{Z@J!MQI7jX3OPq|`um5+tJpb_q%8^XN(8bq*E5A)_E=wG#9D zUpX9G^yx7Pt4{55TOi|rseh3+te?^CXR)z98V0#-B0F61H&>LTq<$KpYVztERloiG zXzMqWGiPm4(CXD4V&1j+06O6upx7eD1R20cz&6m-W9Bj}$O+!qo4UE~Hm=v8n%7bV| z%lDlDr+2UcHb&QhytLasNJU6Ig07F3ZoK%7B|?xIbRBz5O{jeN2zGVn`I%WMgT$8X zBKIs$TD`rg*W#O{`YE-mbN^GypshZHMy%g#v=hA=W5L z(d1`pw3~CvF+1K8b5*Zl+e#h7InAqAc6Gz+Si(Gm`xdH>;fqwt@@FKAz}e*v<`_Q{ z0&C{TN(Hvt)u)fG+1r5x!&8p7B#%|aBb9F&go&OD`l{0pKf8k8ki6t)bj^-LX3`Oqk{HqD%@& zj07%_LD1hS&S(Mdad|%OPy*a1!%cQhzIyr&fEbTQ+so%~vBRU;fNF>Xikd)C5(rsI z?4@=VGU5=XtZgg{kEGA3A>-P~cp~+oBIb_Uy`wcWjF8YW6N4AoPYbXh8;g_BTWUaa zig?4oWibNWt^UvpV^Ep!sD&w-?qiGpi#=BM+WWrP^`$g!RY;E5!n2>`{~h#WeSftp z(v$|U>(>fQ&OTbR-TU+_4+mvS~uR`w@py@u?bV!FJ$U#s*K4rxlB#g!uIHHnRN78 z;`Yk(iyrL9QYh>`g)AR*Q&H>bv7A7RID4~W#raUENEROhNLbHq!20nYR`&qJ`U0qk-uiyZQ! zKp)Emm+{s)&E_)?g8S9>)w@)h;$aOqb{ey@EW0H;2fHOo92HbQloXTeHB#b;s~qY}0lZM&`r&d!2T%-wFr2SEdApROq;hGHc$(+V`^I%s7HoWvtQYyF|Uml0%S9`y%%NHgwzcep7X2^CwY7Cvh%O z!$d70$LIQrLY!3$qh~_6mEMz!N{0$sx;DW68refQ9mgzE>43(u>e#L)^g;4tqb+UH z{*m%Zd?kQWJT0#Y*WW)Yw!f3jG|T)pDk_u|kt}Ci=H3 z9rgW~xDm`I89!d*cm`soc@e|BwQ$|xwWhn8lhgy07C6Y+;*djOsIFp?g=2O4m{{`D zy>66^ByRVC{i06ko=er!*_mTM2*@m~&*iX_Xjxsf%^krD^_b4&<}T06m6lh@kH`Yx z5t)c@?q1_~uH;zqh|CWOyco4-EdoGj$A|41fXB1q_eGp!es>qHyFkw(^JQ+9u1q}> z>(=~S(>P@RQi?ERA_}lHBfNNhFbOp|LuWgD4a^_hRP*}Ey;?Zu5XD9wjM_T0VJG>v zI>l|Y4Tu<;6{`tgnAQxN&S0%~SiF{4=}P)O793_HJ!9ii%0nt(T^pxe+}{VxN9{P& zI{xLd|7x%X&FFj&m{8$8Wt9JM`;v<(n)JLB0Y3k@PboLOtp+>hcGe49NbvZ2&&O<^p)PTC=f; zwwuaFtj)=rsmd}@TIMh~kmZwV5#;5)-6gamqfM-P=I&UxXGmN}Y$e4cD9TCE=Bg>Y-f7G{lV`D z_lxs~kQ~jnt!tDd;fC-8Ja>v48U~<^i=eGP1|J6;V{s)+LL|$3&p0W@jbKj}TKsbG zSlwzYFl|khPJE-N-Ar!Ar)~jQzCVUh{F_7srT{4-C1$&DX1#FQOCwL-O&3|aNy(Z` z!Q%pFno#aYvtDAeIR+D!DT{Y274c#tpLXA?DO~u9t&U;NYBqpq2c4&Ej&&8l>V*Q~ z_g@VOZVd_cGxPSDBM<{IO)5DqrRx~N>a>Sluytl&d7!YOv9#pZfY3;6H$J{0)z%bWO*HP8n2g6Am zM~pa=&c_5cLJh8pE1=>u`4?3mw4lPcw+S1L8b9SvfFRG!2`b$H#B>GdB!I;2MRkax za2PRw`n_yU08obby8HQaJtMKK9*Dthf9r70&06X6L)>@`%jVtVP;m>j2GD$y9bWLu zdX<&d!%+o5kT44zmVPC~xq0_pR`Rz^Jm8oM!*^M)evv;yS$NNQQSn}A~fPwCRvp7y7F0_;6Y1esZk;rmjWcd^|f>fjvF zNPD2iIAUxeW(@#9aSdoDBz;@ht}-dq;aD@CJ7~R9$vT(SJQyC&c8zgTM{RRBadE|R z2xqu&(q!Os3Wieon z(&C^k0nPvrBUYDulQu%lvcK)i{=w$(w@HY7178yoD>eV&_r&_){>>!Ra|5C6Jquve zMx!-$O`k2BK&KF0evSYGutfX=0L4PENgZk%=aeEqUG|Gf+z;Eh9{~BTMa0su{;w8y zKW*@+-E}@g%X2+h!0{awke(o9b=|HIU3BoM&BGALS(B;}Z;M*-&% z7igy-T#mOqTin+7L+}0c{XO(!;sHDW%@-mOKsWtP_y-t5F&G$sYM9$WG5|i}0Ols3 ztbj!F3Wh%#=7}z*^^Y*kfItlRgGSNPg}H{T1ao=-r1LHMO_R&3C>+j6OW&hF!vLw> zK!1J}^Xhe_D#mshaK2eAMefSC#cdVxR|^eugMaoQf^<232S{ni-qax@SX!l8X9S)uP6JLC?SYuS_Qd$(LK-`cQWN{u}O?s~biYNhp&)}+yBRipx{ zp8IHnedUeQ=QIA-_W9V`%^hcJKZ}Va1ZuX2fq5i_vrlU7Wq8SWBWH3z^A*g{(WQ)L z+t`_OF<7nG3bPP2x&y#^kU{*CE19VC zKyEAE#QNRE&ny7wN`3LF0yUCl2#)Y;goJ#46W}+FU&do#4lgp%iUtY{?92qPxeiF{ z0IE*c^G4)ACJXtv9e{4st_hJ`Lh*2ksT;I(vLHAn`e0b*{=b91OMpNynSUV_xCv- zs>qLx*Wonq$wlK9wknLO=e7zNJ=Yvj4Kb$Xr07(iRySpYvE=C_EO0O4rj0P z%VDP)#+8CYH$_9E=2A6dXR@%vB8f3aM&H{?t(iM&xRcQnM=3O_1}hDeU8PGh8h@6N zm0R%3%{m`u(k%@sKHq9U@hhur`~;ktPaJBuHmzDdp!r0%Ro=LVEd_H;F*3z;m6E2> z%GFb&I(w7X=lsu7{f_@SvzZTaCsnL3)8zow`Q)!uCvebPiJotw4Vcetb8*yQ2b}FQ zB0J645MJrPT4$<0xFJ!lJj0tH;IcHMpLG1$Mx3LN*jZt|qoEt2Z?OApC7~C!>c#gm z{O%XWGwl<^Qp&LXY`e~9fsz(x#ll`3&$E9+qM|Yq_o?zk9vHLqWMO-GMmA9Dz|GpS zZUt{XrqIeff*F*J$hr3Hcj^6$^QwE=U7(y!X(Y=%I^wQwm-7CXKtB+_>7GBZ{~7RW z2^3f_LbnM<|4JYF&occ@?618T(CG(yaa4%AHpI`?;g{k3H(`PCd*cE4WFx8(oC`4SZy7VId$+KN&UCmkH+jD zuid`}Wbb6m>=B;b6s3=ItXpf%W zet`ajff0l7F+BkhG1Idb{Ld{&m|v0d^6>}=3VoS`xdX(xcI$eVur4uu_(PF<89p+6 z6aXb;CTp_md^9!ZR~)!0Vx9t*4^NnZ64bHApW7pe^+zBD1j5=zQk*E zmHGnXTbv^M1?IWYc;DFIi#OvROkOZO#>PYlf^EB@eXZoxUq%Go!z;@d;0DccWnZQu zE}w%gCoUUL54jKTxK~95$?SMP!pcB2%gV%kDj)X+#t*pr>olPPIKf*q`u+M&ifrEu z8})mihXIqOfkKrKgcu^sQv+@vKQXMa#hMLclenkMKS{mLIc72GF z;De)SHIK3aaHcRNIr*m!Mrp;H{N6M}LmBo8`O`R?jZ#RP@MB6hbuH(fFAj78Bn z7MshLtzm3eyTrqc6ax-awD^n8nwdQTDrDW{!AUnGzlONvjS~WT@5Tln60; z<$y(yEOKpFVZnI5>RMP`E|wzKT%Nx`g@OHcJ=e!V64P^jErNb>C*IbPy+Of1mFOmT z#wZ_w8#2>+qnHMJ?3QQKH^BeBAmk#X%Z*)m^ZTYzGz#BjukQ|a*&ZNHkWa)`D2m6 z;W(qxm0%G^dl?C>u_&{NW^BhKTw%4~2=peM=h)ksOk-hwpPOX0%4BW^Di294Enq$J zvf-6_C`O`UQhYF8Y#DLU%QO-$G8H-An}=pK-xg6SRMF|qNLi*RCPsWlNhmQ?`ZQef zd6-wB1d@3QO%OY$qk#IdI`XD)G|xhxJohMpfpz$ZI`4_V2P<#v!cN4aJwdm4^ol8^ zp&GV6awpnWqqo~DVzwV%A3x;Q*f~0n3sTB=>Bf5pVKBl{ghdKG47ca-Ms^w%t^8eN>^kB-z77Vua#Vhu>r*+5DK-`JLNAYFiQ^& zfU!2TqdAx z)4SJXreJZoGIfSwD_2m~DXgWF*UE)dZ!_GB4AVkX8ke%IZWx1~n4fvsAzz|3;>{@W zUGUv!gqs&2>@gtJjf4$q2|hP1Y&3 zn^|1a@3f;xCWhbMH4b`bpth#>q&o+ENI~#~L;;?Pl7F^Mu7b1SPR}~^u8FVu&P8!! zvJ3K4-XQ{`%^IfQ(U|BUt-<${Utr>lyrQMyvUWGJEOdjiX^Pg3IOF8j+TaCbcc#sv zjPX~y^@m<-%XKB$#$;$~ePFUERHGx?pHc%=<(=Jv6yfXYEuxJiU`aBYn-tKfnAOD| z^M8RcYHfQO9unl`GPtz#`ayI|@S-6fzXGvPU2hHTd+Ty8Q1n#mjKGr&Y++WJY%(&_ zg1S62)e@%K3~3LA+5=m=Gm4a{fl%@;UZXpE@I37bjI}eGxtL4AYSo;mYQ-oS%6Ozo zmGqG&eNWc0{MvJw@K0a6_A1cr1|&oygp3JBzF}RI)KN5<AWkM-E5a+W z7edp`7P*N_oZ)eYX~jQk{!n6?&laaEr#_(cN$4gKwGe+~!W^~SWN?y@NV(2lg;$|c zCqsyoe;u-nAGawtVPs%{NZJB!K`q=<;xf#UP4$g0FcKyWsoTjRhcT$MoMj?PirNGs2UkK1^U{)LR1a^eR83>~P1z2QT@% zdn`y2Jz}<~cPf|A?=Ck=B(kT9>sss}V*t|^%?pt!@8zdq2~&4iuD&XYBJ7f2e;%An zKFF7xhkLu5S>UccO}I7j{3t6vM3dUuJ1xePd}5GyQ*lZU1lB}k+%=tIM1CWdbePoyzSw`e3?j+UZReeh~ z1KFAGk)qjxgY&`g06|>%s9AY#EHzp9r}EDJ-99dvjA9DmG+7B0CbcFdg{QZ>cAi!a zT7B9cTWM%eJkL#Ft7pFP_>&&az2?`k7EkZPzWmU`^3eA`Ksodk^v)rkuYQ5?GG~xW zO&gULEhzrH&UIdHwXPU&ag!veDy>XPA=6k^G5P3F^ANUAeAL7TF8E~kJs$I7+quJB zDE9868;5!O-$Ru<@`0ipvdqwXh8K+2bRz!yR}Bu%-;QJA7ZB!bB4ccr~@n&e(m z{!c*=A%{vNcUYFU@ysoS|4HBQKZTb6hs%+6#@|tgDgV#&4+Z`m3gBiF zr5zX!{PX-nfq#brc8%(xUtrETrx1(3z9bj z@>J206`9cyVi({~2ZLiE-9ZYX`|NT-gtz?c6?b{JrFlu_!(I!o%{4deS_!vJMQ`n@ zqb+$l?9s;<5-8G}#?y95l>3E+39@3_xfJnrq7N?XKJI(gT`iGks3yG$TQuY_BNoC+ zf~*vnh4KYvJjt4PBEBaRTX?kf!^gZF+Jwki3Hlc{D{*<*4|qd0fD^Z{%SSi}VNBeb&cK!x zXylc)n8n-USmnu$P`HTU=6qUaXQ7#FXG_xR6)YNbid&u}ty3POLsQmbmY@n@)x8$X zJ5)t&ipn0@;i)K6FF?aoDIi7Mr?M04Wphi9(@oB z@4G6!j?=mc!IF=W35GD>jeD{w06iY0BX{ts_V=7|Lyn=OmVz)QfpOa*)Mo4Y)AUS) zOd6}Y_l{)ft>)gC<65@$3-!ovc^ZspZ?jCuqeU479`6-u-vF%%F$rj~2n?4irQ4}V zmsXCzd5g}H%->7fA1+LdonV&MixZxfE+KnKJSI&*!q$W%AzT?=hugs8K}~B=!x$4~ zG_mBBFPz5V6y&T+rbMWs9)@N;sR>pPOvQ9)#Mkgi%~=8a8I;~fv5DABre$inWv1xU_vrDJpL4fqZtZmf6Vn<_WW%#ZOA* zBaG2&4brf<;vzjn^k5 zus|g$In1P|_oPrOO`1JHl66WDf#6}4(d>I~YGAdN+434!qGymL)}g&O{z+9N8~kL? zu#M-KKVz)~r0esMbHvf+5|ZVE^poKyRZ_B3b{WRHN_X4kWZcvzr4lH+1Di_d_lwLH zhJDhJhYCz1a(Yk;)X1!R<#%!aCVZq7mVabFh;7#p`ba&f!29e%Q z>oSQ{Gb8EFq~>zvNF5KqDW(H0incH|{uvUTi0&eRS`h)2*6f?1Vd6XDi!ui5=rWtN zP}~h4(&cW#;Ko<(tjHv=}%@G5vUFtK|uw#>*zQWDi7;Few zHkT|@9^uiKoNuV*3x^%15kV%IEE zh(wjPO{NMtguiBvFfp=(1WT*Y^m_TOqA6ylp4~YSN4}v~-n$N=NA;2U^w_OcFDdPY zvYZ0wWilcSk*C2#nK_$CB71qWSTs6swK7AICt>rcESMeam~VjKrj2J*Wu#|Xtcmd9 zWq3-W6dv9g+w2r$AgGr+r7Y?LImsi@p7Xd1eoF{nbXWhO5od?Y{p(l|!$3(Q>I zi$maUQTK_09fNJ|32l1`Aq9meK_esB207~w6QV&{^9`MBtxceU9__V!56RM)*knJd zO{)(9RDGUiw)-?9a;XZ_8tNMeI35=eVGfee6=(M<^voxc&W>W2Js%4uV z*OX)FkR{^s;YAcGBRR@P)NI2Zfa@H*mkai$z!@XJZA`6A7_J$u&B~kC0)2#ERG;`F zBBvIe*oD$4wNS^7F?tW%!r)~mTXqVp@eNVSRSmad>hT5QH8Wv3N@VgZzgezikYtBdpHQ zPB)Y3Jg#ecwm=J~4%R8{9L|8lif22+ zgMEq~qAxJ>_s2Qi2U3pAj?jm8;*{pR`6}{eDsq`JKO!EU^GYF2TT7e`l51aUbq92q%wr zOP;PZag0X?QKz}QYBXG1N22fvhNYN=updNux*3~Sd1Cs=rZ%uFFMw(i-{-+@?__f1 z;fGqU23l5TWagW%<;r5}QSBN!$}rHQ>M-sq$5vBsKXlErF%^sI>Kvz1DaI9;kjd*& z1>scP^wlFWdt9HW?y3Gpp)M+N>ulz%J*}9qg5-`< zskuThYrgb8Vxr69Dn83;dmoqOe&|1r{{|Ywn&K7%%Tj=)ZQ+z<;2$=uJVtAcnz?Fa z@qMMZDHB!f=+DO@4l;_JZ%=2uKVzfyH`Y+h$YaK`_h(osjvdvd8IM-b&e;?p&wEn8 zC>K@r)*Q2|c_Nb|aVCm-?A|WDftX#u#pIKuboJG=m|N|TmDOOK$vqjaZUN=Jftd0G zk1$JfLnGmKrR}7;DxkMbVsL+f;RKe?bb9U;#loW`G~;4zPd%v^G7dAfF2~BMa!CxU zy#`9BE3NG(zQEMfiHo;aq-G?D(8s%Em>7rN?S8BmF_&x42(TNQLikk2 z`aFQ(r87b0UpB9ZFOem>rWJ=cq|t(Vc6J33YBEXir67`k#% zF-?6-)T@aogn0E23l29(fz*0)Z`f|yHEe!?$rzzH*hn=nKjZuwVVx1qDz5~IQY@?C z&OC`^-uhcKn}BO1!y4gH#VzkohlcbBgJzL7)ccphL({0xY)&`F!1f^EK-?VJsI6(2 z)^=>-Hnt6^FEF>uog;vKxPv+u&M@-^t-gauKo$brh*5l}mpm)W>{Ubrv6Fb(XfzuY zoq4T@Oi>D>(@YUmUIG+g1Fx-mzUZW;Y9_TkNx1tK32~IoJeAb}b(zTK_NJz%h8HI) z+4(Xrdq#vnjB3q@=>2J#1afeixwKwS(DRjI)rvXI!>(FA*^6G@#EQ&&mvG~QlP)rs!iqu_Jex0I*~|Xvy zeNX+B8^90Nr-Kp zIz!Z^$9ExT(-+&CmoX+4BC3rquM26WUl*K?^K!&`IkCu4&y5qOZN7=%X{)Aj37%w5 zYe>l2o(H3qihSq`562KP6d(9JB_(o*(^Fd}xC~Yl7=?VnXxx{n zlPTcFfs63Vi-cQS^}~-RweawYs6?NapWS{dnNKb;>a)>o`?B`kjFMRavjTQL!as=r zJLmzoiSBjkYuq@07@kZR1Me@y-^fbmh=F8bDj{2WNn=>)&)cWcpL>gAd#$E0n+_ib zq!VZ6w~CBDjZK2}p{-@wu29=_Jp-^|f)$RaB9<7mDydJ^5EGbw-X2#4mZXi)aFW#@ zD&&TpK|J$FV#wXIYuLu;ZB!_%joj5rjZ&C5X>Y0u>S<;b3#ZtJWhPQpVQ!c0xpxMM z^dm`S*9 z-YjwXF7?mGMmtd+2W3PYqj@$aqPBzYpNY5IbIr5}v@()sE=Bd@)$4dqS?-?L%QEsc(f*t(LGZ@2`qvMJAxH4o+43~eA9$B66lKz>U#s}$y~ zb-c^kxFlB9t@R{{Rn+}h7Kim-83=9JfZ9VNZgQBt)5erSh6ghxQV^nrI?`BXPx=}< zY;sM4Y86T@W<6D$o$z!Kv5CP!k#ejty{Yor6YD-!gB70X-ZWz$8HE_x;|ROJ07cVV zv6)B_FIJ4e!~#vbEiQ=uwB*Jr%kn)8JVjJd4b^)Z7BeYAbqfzVMCxiD5ii9}yMr@G zbV$i!MMU_eRIEax9IsIyDexB*>Gv*VjV6_|vtJ|u!t@F|%1hmg+4`vF+suR;R-?Ao zebol_p1BEvWqIAzYWH8@LL6w*PoS}szJpRz-oCVeVq+8O3| zqoBNQw2-3b0jYQfi1g#gHaxGEDlXHNU_NyHBzC21@P+_*bY(Y80VXlBSIp znoS@XOohHI$cy+GeV%=LYiKlZ{_&f+3v~A{FmLT5z{k_hZVU@>UA!@}9(apUytfk1 zeSta$!{^UT#FWz^po%pSl8%{jI*uCTdu$3;mb_FghSzE#GHviFH`2VRAXi178_XIyT zOVGic@_I^ly8(g7Ajp~BjnShF!HVwax%6(AuqthGy}mJ<(R#VFo)M=p+r-{Srq1i(p}Pp&m~4H zdz)V*I_o4sxmR<*3r!A1ohka<_?AXHW4QR6C!JXaN9|7AoMmSX6*Fu$P2)Dbi{wXx zoed$zgbA2gmfqHQ6AQ%;2_E^2@uEi`=;D&052C76h=e1iVu~c)i8hV7%*&RbO)+j< zVDS4t|4>LI9zutly)(0A)h9OE&%Jq?%)-w~kqgsVD}9j*;~T!fm{zT~i&jPvhJ*>2 z&kJ=`MqcK8{er~{N@&$4rLBDC*CEz6ZPi~Pn3j~PH76XKk0NQu$v;F(iKoUUnnvD* z7wV-h{Lr^-vBqAVO^B@yo4QTaFgP3(7pVav>P|L7TJSxPH*(2PTvj*MCYg74Duq*|vY?*@lK0H=|7jA+5Aa2?^Ja{2nXN9H-JY&oGXip|a zY`C>+`q0?4Lqu$_2X!-B1~=u5W#=NWB&wC2)?qWv$dbi5FC-1YvM(xdy&cw|%SoVo z!i>Uq=xAhPFAI<&6w)&9_ea=_eG2PP*kLtUjgj%HTqf2@8!Ax+7+Lb0J@og* zP0G=gzRda!?P4bNWG0g1^D2W@Z@dk+Qey8a1!vYh(rE38k+ zWP43Z>hsg7;n6Ajl*@)Wd*_+eZTqT%@DQEJZ7);QfSkeyD+rnsxT(%B-KO?9m8s19 zRPx`&*vw*mcDTR}-@uqlT(ah;gLh0MA%<{XL`E>GjWZ{KaLJ6+ebNvzr%AWhtAPv3 zfU%psJl&bcX*oSGeAp~v(0qC;R+ULD-J$zg#ccMy5Pd6^EkepilLZYQEm--F)zf_! zv8@Edm<~&Dl{!=ryV^D?Ulb(rG0;S`PTwZFIWVu=0Ct~_25rBY`T~=tW(k(51CKpd zGop!4rVKU64%k+th+nOI>|*&M#tm;I+W7i#S!>Qb-GDaSy))raSjQ^ReiM>9D8Yw|I-2K~Rj9zjd`kp%fELA7 zv_ddL9(61|U!bn)$zuwByr6_A#m}2Hb+QHl#Gm>ujfpV1m*MN$mP6=vYK@(_w$b4;wyJ}BY`@2H!YrkK~}UvXEra-AA>;U(KCKo@%{yAH(w8I*sUQGd1=CBfm(@f zeh~o%5dn0Qhw z1m|s^=|aCro4p3L_Bv^(>2M#jPZdUeS1^uPr77+r8yaRVoM`(OnBreJ9@q9Y%nfpw=TfUm3aZUdcxC&1b%JVI{85r#f37!w0rtb@_c#p{=UmARKcu?m?f5+d? zGt9D;l<@Pm4-fqe_-|(TmcX8T_$Ib_Z0_1moq?pz<>i#|3!GfaqKRoif27h=A z=tfaaP;TfImS9lkK}(6MEJJacst=bbkCP|c-%ADCMYAz<4^3IFXIVF)(xZAb-XuIA z5Bh27%5pxVypm{gbHGGVnS1Qr6s>}x;$(&qq@Z<|!UK6!x@rHhUr(x- zyuMJ3zoZ7s7{vQ+m~JN4?9@Qj+!X5o-Nfo+OVr+|DjHy4~AQF~- ziZ92O5FLM_y ztByveTOR~doz8Thy*YXxJIJ|l6j0RmCSy3rcGOONB^oh;$i9AGZOZ>CZA<$b4W4{& z97V_biWm148x!9fIG*HE5GIOYC>Er97jEY4tz{1{sb-#!LOF5uWBeuB8DtnmZm@4zadBag5gh9?CP8N@@N^lxGeS*X9{+7F;(`;mt3^j44=hR}UH|%bUXT zyn0>>9`v-m{r>39Mv11pu_(==dUXt578*?e^B}zRhH_Nk0q1x=mA_ zNH`Wi26cP#C@|cckjZ^90DTDO1dBOpq#>6zoX3%kRWaw6ij?c`m+jR@&)-a6P%@4e zTU}<*ofg5VPikh3M22Q<>UW_L;i-p!;~vdK20W!Ug4C=g47(xf#qniqUrJlc;C)I# zuFMZ~c{7~bBeeH^&8p#G8N&MoCNRGb9k2TYb*O7Z|lV@^jMpBBBC)UdhNWRTRN;K>MgoRzl z!!cmka}9E`)}+eQ8eyvOsF@Cyv- zMIqB5cc+RBa6xQ>BAmLVe2g@3?F6ssSeLM*FRLm+js^UWgf9dBzOoJc>K`cV)JUL^`QObz# zp-~N@^{|M(x;WhN21tQxL4ENN?>C}(Iaa{y>dHC{Y{i(pPAo{c>6iR9pg@|kBo+z3 z%2P2RXgsT4Y8&OubZF*`22l#l{ItE)(GtkH7C@H1Hw{KvsXnIOo8_`H3u5Osd1DUO zSxDJ>xVfD=4m+GHa*De#!u$e z*xRrGu*m!sk8ljLs^090>(UZZD{22dJZ_S8Co|iMrt1es`Qs_dg`Ayg_@p-8+v-9H zeGQ5u76{bo%Ep2Sf{|ACSP9$L^3s^cAg^`^%LVM=kfr6<5}c|DRj;xE@ng(SQ<)vJL4T#82m=PE~ z>((mPj@b^@Y@FlVNa4=f2<-1xN{XJ%v7y&zY=l+rlc$xg?Q!fa3+~&1&vL zK(Z1{hFcUD)_G=BqTp<&s42p(6mnOouqbzGE~bs3?h;aW*I}DX%-{qw=}x6nwhjN3 z&PH#;NfBX^Hxv_`@$s>%>mQZRZr^^ljTzwr$>1 zV|=2!*4ZrO-fOLa)e~7|!JZS^QF@3Sr;jzO;(hn1vO zyis|6KCDYbx7mdvS^OG&h5Za)p&C#&UmC(u z$sU|jCwxgc=X1ZPI<#PKL*&@v2-;Ahep)%VFEB+?uFG_V%fKmC4Xw5cnal?6YFl-k zOiX*6qeWfV1!D5dRCY&|*jI)nvn*gK z^Ux~np>R-!pbQZOQlwuOzb+yDUqS?W@D{0gjnI>6ofO4yJ~00AJYQxe9%?g1oPb!7 z7^~oCRr43TCQL7gzRk<(<+%p&fL)QaX-LAQd|b7AvfkY&D2}2fDnzRI>b*H5RKgdE zGP2}6ykrwyMWhzNlrFw1R4qWCAxky6o$vlH#Xfy%s#rEoznkH>x^WYe;8rPb77fVuV6c&v5Gb$ zVE&~_&!$mUU)GL8nE*^%#h6wV19*+CAPxZ4Sy7V1gM3sRcovt+^lJElz z@CtM>vTJ-IOTvR$(v`*QNx<{wZO8v&?>nQSTAqD9M~Nx|lEWZ52}sT&ISx6zO40#xkoaHDvXOJ9+90bV-zC9rSbG-L`dmrvw>%CR8V0TYw7Kf*0Y5r}@B6QzGxXq_Hn%EU!Lij75DgikZr5?V3BCDoyNorkVW zFw~*Sf&}wf7f3MF1-k8hNvXv~(=U)3aPVS9v8P#3%8MY#XAR_c0(fAs6SPZzNfyLL zn=Z)C9&%I~GnD!6ObSXB3TDrtT)vWnU87;bk8O1=;9lVEYHXVuaX?B#^KjJw@+edu zIjy89KPFP3C~R{$4Sy{s-9?- z|6|j~uE|VaHk^qz)Xy_t-?bX4Z~4j}VZJ>>yKw&0!T`5Ff3r|nlKo2=-#Pngx=%@+ zP1EgteYbO6DJI=+nNpo{#`)aL^=XvacZr|OkMuNovrxtylm|`Jg$}M z8QmMAmByy}k4A|)o`hKwC3AvRedrK1ovtM9d|}et^^`Ol1@ScR_UOFK5puncexJAA zk1#0~RXW=F$u@Hl&#bz3Y5&9l)AxsA!{eHh@z6GyHHY|2V z2V2oHFYYB%h>CI_XSNV?v2ofk%*s%mcOlBO$VFT2wFN^~p8>fB)0az8?Z_Yx6KNDz zVHexl|A>7+N$wD#BwpKlDjr}Qx&_8SL2ud?Hzf(kq}Sja!nw8l;!SHL`5TdSEccIaKS0Ohor$uIMy*TN3&V#BuR@g`S(Yue&{M|Z=OPx& zgcS1;=fS%TIfo5RtAQ#t$wmpUwOBI$xNvYVSPoisMDMU6+^@}HrI_t__jlxu@3ZCe z7S(#V=z47)Mg4io**gO@zgQc}kG^s^sPXC7PDPt8dx3N87*3_O{nuxQ#-KeG{AQ^< zTXSiCm|G{0l7I5|;eTz>^pB~Iin~gq+LK7W(}GM*dcr)TBe^(NWJEgtl@%38z;cD* zoV6KnkwPTvY=-=D-k!$gYj@sh}&^O~mR+b2nuHu^Sc=8F*I>VRAq3uP| zaTtd)qb_NG?pyLJ`|T~tL%vN@H+hHHO5-!Du74hK^yrS;J8A||-JQ-T0@Mq6{&^eP zUGdvGE^s#K+VqP^FQt*(RqxApzSbWFoFLK0_E8zHyt_9If7@fEUz@3^v@|q>dHveV z1*Q5biz<|Kr~0NGcXai}w!CGOOsBeFHa3}Xa7D|LuxD)=1WQ-6uw-IqoQfg=8)$I= zI#wmGfpk2sJTND=pl;lC-!RU!*RBkpxpZ@WN={{+xJ&eQIhY=tLp&95%0d~5H&gNRRlG; z*7&;#oLemp=(K-vQv4Cbb62PiGGm@ZJ_S}%&_hHNVULg|*!C+*Kzv>E@LO^bxpVl^u)ms6vV5!{wPGgE(bu{-KZPM%*5ch15#f4b zcl_?x^KBZL0$k^&xkpgKM#GrbJf@Fd%5*XD6zJeIq1`TC;9+bH4$vEIwnoCip9L&3WJ)3=U*@vX4 zPn}{Ef8orV-_07&!aKCQWIxZL(+D-g0u`ON2vP4Y`YoM}&q|+r!%;Q{BL<^zLr*8z z@dfmgQl+6hU)d-6XCCo?ENWr|tAgmCsRO6ITk;>^e-KglYmenNaES|x*;tmEucmbw zwsm-<#`G`0*VdP@I{)rnfOQz+c(rX^PuZifaED(oyuEdVD^ zup5?~-xOynN*d4!II<^qR@Xqb_Q;_?Tkkt(N>tY1GC2x`gs3AWEw1jwBmP#}mq3b| znkY3I{UFtU-6ncXly*k1MsA`?WxwNP{FM#1%m`r?yCl|1pw%-m3llYa8M1fYQOQ-vv2iVA04SJ#^wQsIZ@)S?m15T zL(TyKb)S^#M7#X%m>e}+ZNv`P?h9s#|DtD_!72Yqh-WJ;(n(X+Dg{bs-O1ue&i(LO zJ8X6!r`|4MFkCl!(KJi$kpewX{8^z?_xqBdV$;d&XdXEf5z8eB(UDydx?EC(Izw*o zpZPHbU(ML4)}q*fLP_YsH*~utxaN??zD(}5)L%V=qLu+J`0_E~*t5x&>xE!^jKD|W zI#b$_PHF65ewQoecydX0LkvQeu_{upujrL&aNf0vQ16)P3ar0be@hlC{n4hpViSF$ z(3@lL9;A+ysgsLI^uuQJ5J;0D95G_iE}z225+n>9%zBdBor`~{bh+oE_${cOjFuDz ztin>PCk;Dd2+u+L!5!3|%;|#2fH7VFA5&PD*Oo_D3iM=-#cT$Qd}=+9v8$pIq zNs!y+Zh-H@j0h&q8jtqZ=*R=6OF|-lcDVsL04$s3h+rSW7v#I%&bZ>s05W zrXrA6v9XI{>a;E-=(fiVip0;s*<%}1d6>hke#awSlHY2;Jew}nBWoGlg@xDMQab37 z5CN4a9)|Y-Y@Y4n_qc4cWKC>91OdK*hB>9NE%^y3VaN>#n?a70p@^b6ko?tbvI1%F zVds^)R2*Knkm*LVwCX_RlVcZLJQad__>xg8_!)AalsDO4d=dQsL~k$ds6gZf77nJv z-(w$Kl*S0w0ns@Tq`SDXI@U4SmFm+@R~NdD?!5kyGOsejX7aF#o-j@kn2hrl_p0Zc ze5zqq1FCVV^()VK7&eS0^Np$-tMh-`?IF$+udKQLyl_g?tY8@zyxgEFP7g zCY0vAq?-k^FM97X%(Z-~jz9E&u3#4}mNws*{cUNhW|oSW126S5A@>_wRYV!llq6BY z5)0PY;J3XhQ%->1@bM%VLflhbeWY~l|* ziy_Z3vvKS^>6oCX1(sHN9Yvi1*m{!ZalxkN^Me5Y(7tv1zdL+(yi@${M*h52IE<7v zvpdg?o0#4l&K$(_W3P|g)L!RWKh^=X^xEUEiSsEaJ^j)w5tzXo`m|_2q*xbgCm-MM z2J^oBd5h1N#nRwQ@nDX^f)Lo8h8vGGD6B=Ie+U*(FU!q|_dcjc3YB|TNN&HwV+!n|$Pa088q(HZ%OVq2RGX@B9QKGvzv1wBo8{M(Y*^fR zU*YGiD<7@B#VgZdUmcql$EOuTSlz09ElVhrCqit#vq?N}zQilN8rXR9|Ij~H(%Anx z+{p2HUQB&4df_9HlpTSJGipBO-8`lmD;PF8y6)yRj>#+SK1r&pK+PuBVfpQz%{qbI z-td=c4rtRKDIv4bNo3M@=mlnzWUfe}C3DnTBCYLLQYJ~_4l|M>E=M|xK1-(|`nfFR z+er93sKjO(*Gj&`Krn;BRLG(|b}D5>L)Wr6PgkK?_b}598E`Ep(d;bdA{%IGI1ai% z3!nGwVpa>_n}LZ-HQ;T8W4lpEk-ju8ve zI==W0?Mad+U-@1{U)xl;TMA(cPc7M$Od&z>Oa>e;`7EQ5>WUt4xr=FvX@W-kHas(t8RotCZKix;~MaBYD;S%`1!L*JmN36I8sS#pk+njE@>h-BS-d`Q-ggwhlSD-wZMr+Mz7x*Qj zex&rrAA^HjPdMf{dfAqZssWWz@;57#{q(0Wf(=f5V&0)yiTfk9S#0q%3~JCzU)eED zRza48Dd8j!-J{|xe4mTyJLEwHZ^7mkajxbKCLGP#N=3*VOK!CyK8o0^sr_yRiLV*e z1nR*IZ>;iQZ0k6z_vKp=iE4p9apZ#U*tQb3O;g*;gbD$t1~rscTnM#g)iZ0RmP43h zO|4=xXla1CvXZthT46BtFv;s}%Jczo=bVi2$ZZ`nvP7uadFq+l#RJC14X;^kQQC_z zpZfHRTwYA=R>XuWC0Bkj4=>#5G!%(+UmzJ0;%G!!YFe?P|^fg&kTjdvdAC9HwASLAI$|UA}tzX9&kTL96O1u8&E~_cGC`;9EqH-BWIXT zG7^loIHsoUD}(;4W-wSTYP)@IY?ch4Vx3i+7PLI1trs@gCW4JDAx=mclrs#_VMq4G zCueP)?dja*tCycClHxzIP0(=INOGWCp0no^frHPTzI&PPf!T<|CnraE=vuNA4IfN& z4;jw9^>E{p3W?HsiJ7%2?Vo>T)crMIpMCC^5gtX`EM;){k)`n+)uCu&7-?5M3LqYJ zzWshvlX~bv1HlW95{IGv7Un&BOl_5WXd{4zgX+*G=?ukM;1os>hjkmzUSy^z7K7mi zU~WHtxaR@7+WXl`1dydVUqiIc13MIoJeIYZG!awGQe)S+>0DMMfo6`obN7qZq=#yl zW5F_5`uOS04M$sm!U=t930DihE5A_rYAL_4P<{B!-|n zDJTU*QL~;}=MAJ$beG~E&EkxiPr6ZCnjb4JklKo3q_8N&?qMdA`d}!O_A_k&TZ~N- z?PPA@_K;JW26Iph^M~Ze12)k1%EDH=n3n_(ZIThu`t!ZGYMSJ9BY~#V@$v*yd2(ZM zIv3!7aE~3mJr|-M>Jf*pX547TQgCzlgq_!?4k-L^yhl&~zp~KdGtvHD35Ktgbvx;+ zBR?2ZN^CnPA3N#}cVGP^n_0tI42DBbxxQH1%7$zBO#b0K<$N&;7|eF;Pih`qS&u4p zVUYMb*Q75}FKM1cWARAsD0L-D4FUF_E!rhJ$vYpJ)+8AXi)|T5@09cU71?s{Q-vnJV1X2^Q=r=(Qy=1CIg9Q%J z#8QrOp==lk2cTdWoeVzpl9Z+5VCh4%dozTKH<>ixEPA`qWFZb+m;^&FSu7Ubn;?%- zHhrKxE$aGeVJ;K2c{h@=%VmOpp`v3K-Pa!D;Z5QX zAy9kOYZB$ZruiYU4N`8kTQD1x7U5kEEZtLE(PFG)hYoCCZ( z2Fb~6a4_Wt&H$CI0&}Lx;OvJ+^Or9_=?+^hCP*~eJxl3CzT`GlUzEDECeiZL4~YIe3f>$?123%Tn((~AEP&1 zwI;ewJ+$L^k`dLGi7j8)rEhg`9>30c4*JcoXNbJ=Kc#8~Kr1wAlCSSHu&CceeUW=f z2P#*7gZhs-C{&O7Pg=>~hA+M;b+kaoG9l{zc(`3W+3Nvmp2A>^-jf&-c{I|zr0bff z6uQ!Iq4Ff7RFr9Ye5Xh%@FFePdX50A4CYHZ)Ylj`u|syfLa?9=d=&4iEP$hQj28zP zqtu{jdIG-q9YtY}v}Dis@)-AGI^IWWkHklE>+;2$CbR*(pUdCoRtU^^UGzq-;w8cC z4U>CxuQk`={Q&2l+QBMX4*~A@=asBPW`~(#U50*Wmi%go*v{c}=9A*pSvb3-LBcO9 z*n~%Nhw$r*AsQ>oI%7^H8xk48*mT}fw|DnKsrjH>L0p}0w@Z@C@9K{q@_^MK&e8p&xC&V*h}T#I+Tt-9v(SDRv$H{aWSr$8 z|60FL>zndoA^9rbWtH-4^Yd|T;(Qp@W$Pm7qH(8=O^EJ?SuG`i195yBx$In>7Gf+v z5)SL%l>Ah_A}?3@0_n|S`i`ZbY%Q?qPO;TfN-V2qG?VnJQz&5QE=$K|DpD>yqsoh- zK|C5ymmll+;hc9iK_-@Mo-X`7@z}dJWP#kC0vL`vxwQH`+FwJnA^P#yJ|()t->&;p z+9$6m$XsKvtE!eE`d+@P50 zL_f4+l--^E8(IN4lw}G)V`xwKY70G74*vz*)F#eMVvp*!!X9Uv3|-;}EWOW`&~7Z# z=`oouu<4w8U?s=sb~dQCS8mnYPCK-7W&3=3Zk{$T>E<)+8039)Try}jB)J#AjPj{! zL_7C>=BW`>}R}Dwhf2#W$Pt~;;v>@vO4J>(V)?#AXz91QJHsG|I5XV`#{>8CpUR7kIOnTb2 zYa@k@T*KHW1f^rEI2i?Gj?4bMwcT+b&j>f{Blz)2KC&uZ@j%z*)u*|=^W6eyZeaaG zxuCv*;ji_Q8@xM?gTNm6H}Ls9V%zV{&s*jG=%YtsS3PvrXU7}s!c=fyE1m_51~7TZ zTkL8E#V%SNOQ8&wpMT!^lXD+9W}*F~@db399BLBs8WfzXPPSKbY?;jM5SSP#v}2e? z&}AO8w+V@HRMGHjzaC`g{S_EqHjk2Dw7hGw_sR&d@oUQn%cBMMAJVI;)PMuD@9VRH z^NtM5Iz;;8G`RCRi2*)^5Y3mcLGvpW;=2$ur9}s-=k?T3opYIGGI}M+B`&tjI*-`# ziE0`DBRCCNjt!*eMbwKZmBip_=C|%EYY@c{9+3LF8?7NU79ds3drT@#3#{&s>X z3{Fp`uJemifL11x4L%x4EI5Tr^F19l7yUyxm+?H%9wB*j+wBDO8iprw94ltZPYG*@ zgFip|{K%mrP8vp|#b^^OvtG)Oej~Lg7zinQ?W$#nJYc*?iOMPHU8rT_bF|AQ_?Dfo zeSd@9hnYoyyJr|O6qps9^l%^YseNwwzOzhRui=N2kZH$`{64FcL$U_x_1lSS+}zbc zrLwJfuZp_)D~jqBc)z3hy_!Brf5H`KP5WaK{CaWw@4kd{OH}Jmg;8 zy9l^C^6FcyQ8zAE2)eR$MxxpksUk-m2VD(_ao}3Bor@J8<)HWKUe^_y8%VbE7T~)Z zh}4DD7k38X_f%CES3YNUYvSoCr{_#I8xe6eNOC3pMfV>R7=3s?7WjObCD5+3{>efX zPFD=wv$U4ufxXX=%D+-<`ivIuS)RNmWA>9Z{UIKX&+oL~-8O+P@UkxXzMpGTKhh!n zd`Anlq~_HceSWc&WyCJqSijc4sQC~>T@8;AsDyW4OQr zkWBKJ2?bknLp<{ctRsvYv-=qZKX?*zQcW(g8PB8{&p2V)&gd;ovp`$v0rs}Iw%OOj z(;s<<(}h>FpmVee{h1Wf^SJRa_%fq{9heY9qm3B+9b5-B27)`)CEF*|WF4wqC&4G| z{iAb%JFY;~#os1Dgm)q;zJ}cUGkHPdiWCeTA$fY>)bg2Y1FgB^W7A=4v;rz>#`9OZ*;S(9dl8?% zFsYRb>*s58iVA6*#{b82KuKh6PT8F%Q}1Of2B_%pf>yysdR%7BHQaVm&g66Tn*LE5 zWvKBc+n;N6hZuV*Y>Yv3;c*UQ$Z!u#rvl|8QP$71k6k1B$sX43PIV-9HCW_dEkxsc z-rfZ|-weyPGLfa!VQ!h!mGHx>i|3trtHM-tTfw9Exk-u= zaa)6GUEz3I&I#{W%{wckwV56-HU)i~wzMFbeA?9W&xZBouJXdqTLZQ0;5Zyn@y#-?qwqbLb+xqBhijRhK301xKeX897B<6$3 z(5JD&-K1)mOd2pw6@C=c3Qb2e3Q^*pb!1$4Pt{yj2m(M`2(X8JCl(s9dNVYCIY%>J zxU-G-8`UA*BrPPt>TziN;u?So`%?( z7^f&W*RD!eOaJD6SGi*Z1o3Km4LzqiI|F88!0jFI?9Qd~)EilJf9Et~q64M-fqk3{ zoIh9J_uo%zlK7<5Fe(+A#|lJ>0NfTz`(P_cU3mNis^9tGf?lfS+FsGsK+tGGiNYB& z$I#tIV2~^!4f)|6C_{FX>)W~Vwx&D)^|}kFpshYhE)p4`eZOuIf{28~KXeQOuYvF|xCs_EorhxR;Fc#u3NsG{eo4R&50`Ps zE+kF>N;OxtXMv}vq^C@la=)H={3?N6CHnB%We4>@r{{k4pPw9=z_|Q`a^!3+SyD2S zGM43fzY*ptBLt&V-9zK&9DVp!4W{@-kb!2RN5dFdDWL zu8t(7>L?+v<0$aOpqW=gT5OmfD>tS&ajdL3TSUiYujLQ*xx|p1Xi(KBfG@zuBOs;b zM#?rCQ;`=o)TEFTs2}Dk8AsSIjpkyH);AMgPX?3o0ow9mjX5bIU)a^1lqN?pA|LMR z#w`wh>?)0`UV^IhE{M4t#kE}%#pJ%?mCM0D=P|B!Al_4#=p`|}hO(e6m64;M^sgs@ zye7hhtgAE22cq9U%`LU*aZYoQ$LI{Qp6N~1?#knY{$}s9p8oU-0ALrpq@_jfsm&Wl z-X`!Mf&JYILTCEMDf=qqzh_ElOGpbb6W(bWN1cT_*%MWkZRGT;9SW~VUVH|`3Dw26 zeQv@)l*0Ud70=ONoIqWtnM4vOPb>MwmQ(;m7DKg}He_7xHB@ zR84y~rGD4@XkYaFtE(We2k|1tnII1e%)AT0+xL&kz7qll3{1e`ApOd!1nh~KeeF5^ zHI@1Lvq4WEmUJ`~#BI(M?0$xHL7yFyZ~we?DjfF%gyCzk;dow>pfRWqylyHfZ%Le? zl)SRU5*!1!di$NT^B)%jd$YFDTi7pnO_oi(wqY`ICJUYqIZckT5!9~S*t1+_nZmM) zP=gP$6pWry1rqv$Pz?2+_%y^eNzy0FcLd6q}s&A3(q1V@n@G%$x%(1N1>>=KIe`j_{#X8lk~#Wi-eP+vn< z;n2ItF$F09XoEZzwDwdT52!lOINA$|7s)?1OqMW;_J!EKo;&*0PrrreR$B?mlITG}?iTW#5Os2L`H0 zRm3QE3t*$K%!k{E-5&yg@_oP@I4a+Gm<(OWrbTK~Cde&cW zT8l@!m(Yu};cJ4W+3Xc&k)~nCh}p4^7ZXw(t_2HXEF^8|X;I&-irr!S^xryfEX6X= zjr>QSw5Vydy07SS!~VG0;2k5_rRe1foM~WF*O~o+&GuBSFbFXO~ z$UQI*_`5*O?VhIa>cN1>f4NfU?KE8-i~k|4xElG_k6Ix>dB(ek)Z^A`dHlz!Nn9Rr zFohwVU~HJ6M}=H=g}imUiKU3d-yVT~DZ57Np9KE3clVD)qT4NxZwIfiJPK>E!e1(d zMYczXfdqbdh)j{yF6}MdB$xIUilzvLuX1oiDFhB2ocUUizZVd!qIYaxYNv{%@Dp)7 z`PQ&`J1g${pE*||FoK>x!2O7xW~TF$`UR>H0YH(x_N8XsHs(B_QqXf%pj~@+Z>_k- z#ND*(y3##LhW*sOkkUamVw+w)bH>0YPOx|UVm;J;@?S2^=2>LP6|`#FmZEEx?F7jq zWR{{O1bU{O_$k8MX&pOI7v?+H`KSUIv|%$h`Qh!<>H^*WDoRWn_Mb|DnDg&ujJU5k zgttFJIwn5WI?kY*-zp-5=b>2PU@u9vIEDohq&70XBuFx|?-Bq@j=IED%N32b97OQ^ zs|CUM0xcQhW^8btO;)s;W&OMCeSBZc3v`8P!(2L6Rc?d;%^AL4RtnCGGyQW=D)fmB z<%wI>xvN_^4dzPF;mp8vD-|+K{g=>T2^;wJkoJ~Or0Dr)dMq!tUw5Zc- zI#+tjvKV3+1yBCBQxkEQ_3=i!T}=f@v96!DVt-6H7_nOKvtO<@3a`(`fj@P>YsNs% z%J*g)tMKHu8XX9l`qCOhE4TK*qk$`a)DHM5*}pZZXxuab(1PFl6?vpY@i?PLt~Kbh zR%Sf07jZQ=O(lh6j-$JLaXpLfWkz21{c}p>|ET4jCr{bjhCiiO=|9}IJ@W>ljvY_& z*|E90i>zf1njHg&1UtN*U#7v&tV=p%b%JL`_J3Ft^+CSg|QgO^l@L>ClA_Rq*0lQawmZv4a+9L_;@Ym}^IhbDA zs{VCM?LP`{*Xp$A-A%I}vFG$+Dt_(?tWxR~gYZtARRf~K3+_j#Q++^m%=U|5zr+eG z_vdN}8U|ype$#fVcZ_t^p^Ua|MT7EkIfpE8jlM*}r!&1MGM_o9L?f8m2|IBStAH>Zy zAiN-P_&+6`%#N-FT?xMrg^q7(_PIs#*8FM?igl(WNBqCqV9$~GuM|l)@?Q+FNqy4( zCCP-Ne5+i9jh~E;uI#_B>_KW)!i%-KAvH)RNRqOSbOF5UTP3HU zVM3l{`~uYUF0x~*lH29ytvjbgKuoi1dzjxZ^x+TVR35iSX~_*7_qDBQ=`le zkv2%6hML)S>rwOi@8t^)F3{@J&lq?fLJf$Du8NN^5^++!OqFE$B z)Rq#ra|v~eB%1|;ax9_Fit?~wE+(Q5XxMw%wP&i zOL~+;6ib^Vt0>2npyeelD%@e5{Ep-M$o28K>s9R3t$NDdtIKaSz*E=dp;CTc7j!7V zw|+S!I^Vm_dhRYumeW05deBtdTsYa3@?`lUV_eSCmd1CCpxT!tbc5AA=Sy6XHM&GX z-M-s>pn2&jtD$EN?Ltwp@^xB=olw6yH!nszN%$h;^dMEugCii6Vins1lh`B6ae~@x z|8O_I>6EDjK4Aef{YAa8$TMDE+}dBfFNggNch{j50Iub2CF7$5^j zvaT~GIF0PG`YXJ5<*>Cj#>Qu>8$SyzWiwk!d{?F^Q!z(Apz|>DF4H)YV$a#pf_xvoXschE z7_T71=2dG=vDcIGoTEF)ZXIT*0az#8oYi_aZFeLKsOVX8XjK+5^INkLS0IA2+#C~m zG#x$WW2P$7YR#N_T7tsR;qLf1Ug0_SPlo0q)!8Wp zP6c2kc5ppAZDv_?_L;fGJ}!|sWnKdFRG(pU#n0N|iJ@A|cCNG;Pkc$qYALHD;)B56 zj>>;95*0?AE`JpQ-tp8=(HZkpf!XdB;?Z1lej(jdeJ|Ijd+8dDb_XlyH0-sUYgeG{ zcV_HyJ*uQ$?X3GS${Md7!6i?onsKZ~YuoYW9;TIW!AMYM@gH~~*zFR~VRkHL-kk!r zjpRSlKm8gx&=k{ZtLxSJ`T^8E7Q1=?Z-RC8*Z%SZ;|d|9apW@wUX)-(l+DGC@2Gq# zubHLxFDxtQc)gQ%r-m_-z#g~v!OiEk0ay5Ivo&PX0li`MvrZAa`K}_VyDby@b@jRx zjyE{i+iGimJK=DHNwQ5pq|r@XfC;8>pvA5jq5u^*ti3jT_4Je^S*WAf6GoT3?) z3}%j$T5Kh*KM$!F9mYEM4EA^^d8hbS4p5mNqy~AK4c^rkEs$;Eq8xpN%L5I^|+^8YJz|0P3 zT{>)5$WB_G)r)-6Ka-VW!h*67HubcGuBahIUH?~=JDR|If$Z0{Z0WN&bf9ikWl{KsBA;CN%hxJ={B#=J9vY*_9wvFj+)im0LNc(`uERV5Ku z)vi5wyKmP)kr7=q4VpLmkifP9!yY9*^y9_qg)J(Gz?f+N)jnP@dt&sQ>JIC(8Bqcd z1iQ}t_Jijx-g7l-bv|le7bDsU+yr1Bdo_yRxz0=Gp0_hcQZSI}hb{Fl#kI^d%jXVS zk&^7LX=o23(dF{mYS`24Yl6P%`Wb5DWb!i3;#4Ea57U$;&xQbzGP$tQYk-st3oRk8 zR9D7f>#w;AM&{{RogEz;5Z@^S&qoqy!Yi=h?!9TF?##S2IMFUf$Gm`X`Q|a4tGOUy z#t6$MNo$?ri`?CeJtki}$lk4ezD#XL9{36Q8GVvd*Z=n_wZ7zPA={K49~~(j6)L@{ z90VSx7HyMqELjJ~?3iV1#zn@yXq^skzg$77yuYu%|H%FI_+JO>`{jC}v(Fa}vz+{Y z{0~715;&R;%%DK@haBy!qKzkg|DXP^?tz}ClSd^x{zK3QMrUc(r;TvHtjuc(d9=VU zKRC(YP@kPVz=EWE07PcPnY7)FikIMv-tXwtj2sf4shGM9xsHl_Rf#{;HbnwKC`!K~ zro*!`uSSYdBF6Qyds0a-_HSdW)%$_0jq=P8P9bh+^t|^|w=!0xQ5aX>xuF=#;!orxskyd5i;T@q%6VhfY7!m`A60{_e5ErAx%kE~E;qg{N z@4n11W4EgV)0ak{Wjgjj52TEnfo_`^tEQtOrKQGiKOy z5OP?rear!)zZaLBM^8Yfso=;=OQvaR*cQLP?EEuS@(8?&uHMOX;)RO_5NQ>a`4fuvmE0w(+v^G!xtDvfO-VHlek zQ8>#Gb6dP1qv{)nGYNXqTa)*KisP)*W3b!eb%~3H{U)nE<0O9zF8;g;5rtaDX-0gT zxQ7iPXaZWCpl}ir>Pjgvq%1O%B{66s03Pu-_iLYMn&Eb77YC88MW(@nR}f~o<|)t0 zwh8wJt20Y1Ptn9y;OfsdWJI}JymRuHdU~jGFF;;bce~6!!6;=-G#cGBKEm8>nv+yVQpT<2V=(Ac4?v%GdV}1OUH^lC1{>M+2gh zUq`Dpa=wgJ&AU);v>Wy9U#rofypxTj9A>aI?Y)YA;AR>?}o_fCNPy(r^0!QTJu14 z3CZ!VoNdY@yyO}F+*Gh88yAeP_KRM_lX<5|0{j2;e{m0d*zT6dQE9HUcRu4XoU=Z| zIky9)#}X$GkOI_>`(LIlje}`_YG;+q{N+yT;Q; zJ$aX*b^~fOZWXD{8juyu-T7S^@JJO%j=`flG-QvIp;K@hI3)L?aHH?R{J5?F6RXKMS(&2O?&%+{9$1&5Cipv)FVGf%@3t0ymC zf0{V_(>BeZA}oe>M5-kmSAi|};nO9(2)r6P+xT$$2l(sBV-2K;wVKW6h?Em`?Qn|8 zUJCK_(P%$;J#W$8?z_4tIS6J(Ot~qrDUD{ODV>$Dt-2*k*U!T%9Y_-MIOEAUu#r` zb4SKO*f<_O7E$`R$Mt3M8M4H=LvxxcP%PgBCzjj4>R%Nahp%zT>j2m#I~iAS)6ZL~ zSEq~ZUCvnYH8sR6*|6<{z_Kk2u*HXJcSYVz`knz{upNOT7C4d=**OHvVP-)JMYcgx zL;aMx3D&I{Q;^PG%@H-S+%VeqrZ8vw`Rm*_#|~<#<4pmb}95P!3DBs$q=sbAgl? zr`d0u`3iUv`TX4tCUF5!q%zI3+|2wjgXYcD4LnYTp^9-8v-^^&BuILuOcs;J5}NJI z%o?f{v$^YF$pL80Uxl~_EZL&g25~gmsw5-yn3k zd@@(QI8xkPEnj9APgmT0f>+#=L=JX|XX!kCl1h7OpXsKEz`7TFh__N_o*1Z5IicvL zB=;rzVy^OoKB?LY0p z^}FQt_JU^$_N*h;s#L3V%Z!-cS7{B@JXid?C%H3$rcjp%rdz{$r;FD~Scy%A(s^QQ z>BZLxg!%l!j_iG5VR3IQk4-1(k?mL-MFY%<@xqQH%l%GsYrAf*qVV#|89AQS`e#Y@ z8eRGcLwqf+^Ba+|HU7SJa^q$*oMd{`-;=ku87>bK5lvKz!O*H|>Y_Gt>w80XIF7xE zk0T$Jv$@5FvDwNAXS*exO!iVkCZpB*ZS%I45=2|g!Ezw4ktGEmp*JRSU!%qdnc)k) zO>$FJet36fWR6l~BRM#*@TlFY%-sh|V9#gv)#4Jif%oufr%hZ)qxSBjjl*mG-gFc? zq*2L%hJ>T#HXsAV^i>*pH_r{}EoT-@JXRN}P>CiJliI?;KV0OHcb#TlD%SphV|l+F zh>>Ou=&n*t3#d#Loww0#t+#L#_32|V99F>RLppd?6xT3((+=I_C0I1n$60>CfYAIv zYi@wA{D3-6skN1=)xOgWqQahH+dyh}+-`LrG%E1EBUP}a$cml%mX2)>ha&YLuEX%p zTR1Y~SL*Pn#&xXL5akHr`f(_RO3rqW!it2p=m9hL| zE9{oV&NxO;4>hA@5Yys}JEgoFiFe>kEtQS5Ff$?-&S=8>7eueTVwc#eEV5Fc-a<;h zFX_uyHXXJSGkI+yzBU_Y*;zd0u>C|xiM3ZTT(8hiYjiTiURRb*-i-w`ZR)JjTxZz( zSmT(>)>}wi=4ht$jI@sz#8EgTA2=U9T8Y>vNkG>AS(Fw?<;+Ibw_rgpY%Y+Yd+@Lg zK3*IamFumE@P|H*qloSF4vznfj;v&0azsRPl=dZOpEd=2XmODg2ykkuTA~}LPr;P* zhuo|W+!a&U*j`aHGwB+Ke*mXyAKaz?vngnWgAXRDzelGo2D`25tIJKj@p_Drq9RuaoJ+rUX*BR;_X;$B%e0L&VF37Q$zo-V>_T8x+?|@Xd$+S#6R`u2Q>sMH~ zQzA^ms%&iX^OoTX5Uay+)63h3_*rE}YZ zuCj~qr93c1pKWnNrzNN9^?*v!|wxZzsxN3HRacFZ_arS0+1Z{XTpEq5sV!Z5qJCoc?WC~&!Phkdw zt(lUaXg&V(y@lcS1Xg0aZwZCj)y9f0QmJE0gj33TTl#!8x?hSGjh_tR7TIC8Z&o|V zGh1~Wu|I6o<#bS-x+u>E$r_rAAF#FBNr?!*Uv>KCNc6h;anaDYmF)^UxxNCwuU%jB zM~_R#BH^Kl7~FoQ(LFl$(Hvaghpv)2G#k5b!PAeoKP06lKskrW1K64qts&eGV42$^ z-8Sv&>p{G>3G8G&g`Y?6EK{cnvE|vCGB9?cBB2?G;()K)2nsyR!>{w{04Peb(&bS({ z{T$eygfwi$p!0P*V(;25N{~{-eJa(NtowIo7m{(-<;>Aa4jw$oNngp1d=X|M1_NG= zr6@5#meN|iVD;zcW+}?y*RsRO9|sTNZb3X8hpsn|2#+&8-fXO^PM$>`Up~T3ZDyc8 zPqD#cS+z5{Bv(l}T0TxH@q~M%817 z(T*SA!N!lVXXy`Dkd#*eE5MvPdOF)+E(tI3r@8bQK$GDMs(moHTV0izKntS&zlu?m;N3 zO(o?@N>Im(QLoRrSuvg$NE?HMA}zeQy$*9yKnq_Y61KjNgbpPu;Eo91uIw5`gv84~ zoWb2ABCdrcRn{VMpR^`z4IhpEFZSL7Dz0p46pj#q;K4O$aCb{^ZKR=b2@Z`zqX{Iz z9lCK34vhtO5`w$CB)CIDkR&9Tzmd+&y?185?|Wk#5V4e^!Q9zQQk^YxVQbA7*YCt z;WsA(^_Fc7e(lmR_o%1BV*N}BMmZT!@k;nB*

jjb%fPJLeka_GF56u4f83H*$ac1#f$;Fqt9Q;rUSzZ;5G z$IlFHL5`0Tw*ohe79hb!lEL7LL_ka-@NpoQ?iE|iP_v#KF;}h8g8H-~LagN;W4n20 zEc#3O=bt!p&Bs;n`|r6It_Xw(rB&WtI7LSafhDOndBs*5SlXG(6|!|Dx|ALwL; z=Ut&mW+m~hDKsMc_yGyIY8Rji*^%I5j10WOlc?g<97QWMSmx8sCTf);WUyUR2Zp6s z-+Kc<$Py&YWsT%yt?^J2)QKGCUnqa+7IFEYWO1prE9{doY#GmN6yhq~k1^{t7%10W z#DUNx*zX^c)X?z#=zVS5FsQ@tC9|hym0}Uh4Y(c5iyv%az5}R|V2v@?wDR+-vyG|x zqR#3yW``YSHUWwT^qPg5CA(U^Ooc-Usf?$wgJo*%kK3du2)l`s@3<^&mMqgtb zcP}+4kJ{i(gl=caqUJ!cm8<3uY%CI;2kzw9j@&l$1SicEG&+};CRq7hJ_03b_c!a= z9)XQFJ;_}KpAW8CpE@&+S#N!%^z!m_X5aRd-|^J(ZVEfngWLkKGJ_Nq$a}`&gH~Dg zgmapkTX=>p-D>4~Aripx8qKJuy$R|&+57r2K2sQbsR(Z(h4UDz!k^ddCRe?;6i*J- zrw$p%EVt?jk1_G#f#PU%%4_)4w7|niV`+z4wpM%VoX8-nfLv|;@=>Y-#W;4YUomM* z_A{h=Z$ajNq!%@RJa)SLXFpfv`_MTg4mgS_7RYF&aJe#`EC@N$PoS*kI1-6$NPV;6 zTy<8Dw^eoYJ*rt$1i@M)2XSJ$Wl{PYYr`GBC7>*I-J=jx2_T>uJ;m)CgtI`R-@u-B zsP`OC-r4|PjzIrR&cXoJ*D$%%1NDr?JDA=%Xt(S z==%(2`&ya<1*2M6`WvWn$|FF}gPQFD==>|e%_NI5=jd+Tib_y$m1)Wd&wZ&zzMyie zLRmgV!b}U7$vsjxU*H6DC!;Q%FvV*CWa2gyER9n-rH>5MjabuOZWQWfJsfe?_Ze>+ zJ`~zU80*t|$M#p%^Tx&w#g2{+SB=IGVYc@p$UT7fkVSlr6k;X-IS}Il?to+L3vXY+O;^+sxcCJ;{t7gS1KMv#gD*f0Xo)N`6!H_p zmFzA?-=f$iDU2=Ul_P5r&*H*8VB3g}|I7f~j^Yw?sh&VI}h(i%sVZJyh&qceQt@`2&Xu8vQH3axM+O$3f*UwSwY7PDM}Qv$S&GjqZr3J#Z&p{6#jkO~jBg|3ueH9XkE z?QfSdq2@%?L#t#>=|ezd2s|1boZw)$5vt%|WM|+Wa1vP#D#6VLR>``bBb~P~jIit? z2dQNVN087(oOcMw(}0MM)S32hyIp0%qt&U8VvO&O3br7qV-LVVd6!(;r5>5Qgyrag z(KsJxld=kLrMDX&VTp5TgD?$NG5hSS26bY50|8xrs@5z*C zvGwv3lIC_FUMp;=MmQddy|hQsNb}~_++Q&d{XS8rp;=5xM?0(`^^Hw2YBte}Le@ap z=yScU_k1X1*2_nBF+E*B>{~hz;h$*aK>M;*m%~=KCezCBEPj)PmsD@a0&^lu35o|% zNU6X0t~T+xqBT>*)fuk`tA`Q<^e|LXx#X^4S!P9%Mi~yoNno58jaeo z_Se%ZS9GNH?IT#N>dL7i!@UQ$?%~G;x5~K1#3jq2#|treqfH2&XSqm(I57GHdks*x>q1_m&xNziTuVPP%>apo7(9 zFJGDsbd1&Px7yaH9O8?N4aO7j5}ZPwknVM>axTe zcuOR9pcO!8Wp-4C#2n*UXRgCl@6pxyyu*K)2_0-ADl*atR3<LC8B5ySN9ynLG4P4OO#Ufo#wj%zwH&p*RB#KKW@69yN5cqMJEOp#D&)78FLH=a(&d zt}TELZ^4yEa{`@j3SLirG5Aext0W;>!h?DE*4i03G={spPN%B~L(VV{Ji zgJA_SKwM_FO&m+FYzs&0iui962v4EEd_%Up2_R%UB%zy$5)T)H$jBLVZF5-(!0-eC zlInHmZ_QaU3V1;hLlVGXG#3$IWK({yGK8fnrLTV1-LjffwSpRoS)%<8r%R@xp0^}{%wu{Cs!bT@Ad_tOfbG%DpV%Eie&iMOP$}%?*e>t^Ex&1 zBCqgnHmKWQ6>sm|1gq;^VZA(N4AoV4M~;cj^SQ#hF_)_#D(bvHOp&7$gKM(@#QxYV z$!!?heHm}@{L@mw7Fl_!V2zA$Xg^G+V7`A9?vPc%`Z5*G`TMa^p{RyOuVN}tn2d9J zDYB!fL$mpSde}=u^zoqJZ4E@mzVj94U#Zt;lcnvGd7Y5MgN0Xyu|4UY!M)XUdInpY zE0=e5&47zn(Jf3Mix;$jsi5u@+pz@1)qL=2poGZqKI!~ves>l=SFcx#Mni_d-)o_D zJNO)ynNpMb;rHrBt23=d`I_Gy-bN+j%Y3fZ+t&v@{RcI>=xPJ$7*6d?NY}NeB#K2- zn$P}qzC>(aTF6fH(bLeMm|Nw!B8`iEH758m-b`fyQyQV2^>Qai5)puTMEL5_#=?As zdTq+j2F=csf=%H&JDFB;1a6-hG_YLt4Gz4XAr5M5G^8Hj8Ss!R_Ho%(&7^c?^3+p^ zCTc`wtyMF834Y+0N2*Y4>LR|w)^uH6-)PYr{vo2GJ)%ea| zDK*DF=0=D$)~y5@$bwQGObu~X(io^3cCNd}o>{@Xq{@~gMdV@6i(1S{yQqjd5l*0= z+obcc+nNtl&JVaGsBZ$ag5~6V*6Y7#J|b@LA$(EP949~6gI*rb)rqi~z9Gfka1&-R z4=<0$+&NA%JuhiJ_v1H;Iq&boOuIzv!&vZf)9xcSWf66)E;smIe}O1#R0{f;M9PG~ z1Z}MK4V7^!dNDH4&mcMj7JPgz@XZ75-(1r0+BvL1kVMd+-8t4dC-Azcr0^=_()rWE zjMy4+R;f%!SeD2Ri?yDszX~^NZq*DO>fj5vPt_NdGdXT!~@PH;BSANR7IhY#9>d zp*;dgQ97;V8H%C?+1+2Y^VnP(p?{+Y@a=kU$LBB2yTow~=@%)L*u2ff9)kYGBO`8@ zaR&2FJ&!1};ralExtcvEAySxFmk6rIsV|Yfr+rW5GYLgpAw&x&q(rkpF@mlWwj%E& zmYf@52K(Z5Rd-yYJLX9sLH6df*q}|o&*&6X9E{39Zgsypm#81%|hpr6~lTBfRppJ zLzsOCQ7Xp9!js`KfJ@18>O5fIka8hJP@_9M`X!m;cpj@y!{2DsGy%M{I0_-yWNUJ; z&zSbLA3tJznB_j25BnCWX{aC7pkL_K@+&Z_5#$@%(a*~ERa|BY>Q9YReP-~bqbJWF zXxh2cxf98$t?;CZ#h@Fp#?H*b6$8O?a<{D-mEHk$G|Q|Fssh}K+9?^#bUInwSx_ny zPz4-4iB=c8$*M)&a8U7wZlXTax&0OVDO&{cbry5J*7G>z=B+d-jKv%EGC7IHVR z!Zj+yj)+W8iL5x3);{_#j0Z~tK0tM2$pe)~bih#(KIq=r)VU#{c^{)V6IB8~VMf>~ zo-Q{R+uc5(hgB}Pk&CI21Ci48>DU_LNH*dxKEesKpTO{)PS3TEy zN-rQH#*)@pwp1-e=#=@xwRwGq53itp;Cw(v9%3c$Tyd^*t|?CZN&p8x%OBRtKhF5Z z@W{sWV2o72G@@>ZJgH!id9$u^OOih>>pLa)re~dKq90Q# z#4%U!)tG3`4~ME*&1E7|)14ntRxCV*XGav0`%o3Ef(1z;WwN1ct~;c-u3O~DhG>_% z{&8nsvItokwQhK%8@F%-`FZiYElAe!s^h#CxZzatYt+#`XJ~fB!Z{|W%|A4Ni~&sE z>6Q(3omW)VQw6`{9e2i4>s*nn>-f$MLKKETltt`0~ippo-}*B%V!VK#$sQ^Yu)2+^SN{ zWJO786G&4!ItqhKNaJv;xREDYFE2(@fjjnz&fR6A!9{M1;xL&K4Uonv2~*>i1+Rx+ zC%BtOOTpb0?QnJw!QTE zkEKw?Hv>%zOiIhoU8y#TG!CF=VDCU2x`C85aI%HLV=PFW3Hw*a&1Q}dMp z{>B#KNs{hY2E9Lj`QmRO&MrQj8Tvb2sO@=s>*x`9k*ZlM_sTw(+1q$5FG_u{Wo2ng zN43f#Iw^)$$s0=Ke(x5-YDf0#cl~d4pDF<@dA<<)SV2e%-Sn%HEdfLdmVNcSp)rI} zQ&7j#S!~NWcl%dz!GC^79jvNGNT*2@n`fkkLKfnQ0v#9Ojs$Yfj4rBbGr~uIF2l%=L9O~)FZG0fs%|=vC-f))?H*7^o@r`Yjfn55v_CH z8qx#5tX-ZJuRdo@2#F7Km$yQS=r_e^{W}$LSw{0e)arXP>levsXPIr|zpwwpOqR~;8M$kgPoQC`VkIslzJseZ(KJ9!5DH}+C>eBGLEl!Ja@sPU6V z+;@t&q;xywMT5=G7Lt#39r~D&#Vu^2`e=y-Pcr{^Ht)ol0GALxvd(Vjuk!6 zeG=3Nl(ty)@nI!otuxfK0nylbi46{YI(u1@=9Ie6OTwo}iBI(^v5p+4$V2;GmDN|g zPn*6^FX?pp8bj23ey7w5qdw%ZPqHe(7Uu^Sc`C}tym|?_Psq}v%~Q4auA}l2^IleZ zn3?)8a#&D7e*U?Tm*NEAsIXrOeO|vy5b#K_M;dj>M2#?dZ;{6aMZ=&afTmWhT)y-4 zFZUpnym^xEdu2Hr2w3pNO2AaE>^*<3ny5f7<6iv9BF;U%@ow59y8L`JxQ2S9+!&Yf zl_|Md>z14Y%Lg7=`K1U60xx5mxX*XnpRCN~=f$qha+KH!#Pq#cHOl(q3-K@PQIASH z14Z@6a~GI;p(uq#PnC0^s>)Zp3f*rC)-1rbTmcM4qrfVU#6BfqjeNO%btNahl`x+B zQwlh{QI6eddR&mhiC+nIHBMEso`#A9i0t4ETYSB<)sSk|BSi@4RaD^=f9eD-mYhew zue+j~8|CWEDl$0@yv-DOcOgYS&mk7{RcBr)H@SP_VIf<+VV0$R z-V7J0ILtlY&@;?D`HlFdrnfIMu?aq-{7Ix~_KrFy7Tv6DN+@5uuz&Yr1)z5%XN5##*dqongl7NE&h#;A2J+W{A9~PB#uxtWGd+|!5T+K%SwnSd>_$YRS3AO%$ifOP0^uKlA z-IE)uJe#)g)U(V8m3m{?A1vPwc=Cs7vPO>WC*)hi@Ih^d01=$C^8vG0y_11W+{YY@ z?hmZs0oB}cGd#TA4G;CoSgEzBwrR{EBC;Z9Wd2_G)BIunIh!mc8`lacpP@s#y;yne z$+#kZw-vKGN%e_hZb^2z1Z$RndWH^Ag7iD)-sT^W=AN%X4XP@>0J3#mNx@?)D1IYd zh!lNR^B&fMCfr;t)yZn%xWe}tU78aShtOJ>Gi+0Q9s`y`>+q@|7_uBD``SQ#Vq_{P zuFFA(sd2DuKhss+i%%Y9#MgZHPTrghV#A_#UWD7mN3?{P`{#MO#XnX47C%s=;976> zgYn99i>roQ4pdawq<_c>!nZU=nKT49B?{`}a{6Jgp;vZ@6z6ep^WyQ(9-5yx5leuc zJdNPKy?^=w;A3_`yBbMs6Lj%lx;cB9_L}#R?L>aqCO4~oSUfl{f$~2LUSK*I4tw!LF(jTZ3E_~w!}y=8Q9nT*m461Pa6MUeQUU=yQZQ+ieB znB?D+P7f=j%$DWC*^>;b&T`G^U-_Wt=A#+TLXe+DU~A(j_l;g(JeZ>Dd+|9VBGD3x ztYEGMrwgWN)zcZ5$wbu`!~&j^NOdI(-ko0IG-$F@gXKu(1x1F@;Kc;Gjh4Tjwu=e% znT|41S{FUiNhQAjWAeuyifSxTPFv2mPM@tDv(D|`r0~yKco%N! z@ujQwX^yHj+Mvw7&Rq8nxvY#O*{n#~?mu7t<4P9HC26@35_ujU@;A0`g9YSA|3YbN zYz8iGWysLwHaf7ftn?waiR(cHacGhayz&yXLt|pGHYBoBOH)F8O;uYfCBa`t%f7Jx ztC_Cw%1k%duxcCYrJ&g2i9^gvFXJDSSZ!1DzWOzSh=CV65&#Ad-mC0Q=0MJ^Hq)(40oFUuM#Z6cUb5A zuQ+jYM=T(@1oUr6*Qaj&>J#CP5JmPO1e6r$yJU7D6v_B;nMjz}gI-VVSfWP~r4)m; zfUxc(xt}X$Nv8WwEvg}f?X|sEk=rAb3DLoh1BaJ6w*AtR&ZD@W1HPaemne_>6&`td z3bM%GOG=SEw|1{?f{Ttxx~`NblT`9;A@yC8{e#%^6yFZ9sz*cc09N$f-U+^=xhCB^ zhZxf}hPzK65KXT;x^ece$X_HG--04z%eScZAI)4aMMK?~XP zMJnTFy84tln*t3(WkM{2`$TNl1X2P8C3^HFibqR;;50LiQ6g(}oIwIpdsj1hh&;Q$A}D83IodgDNoEZ2<9c*?EsX6eJS>ucOzXAqj=l<<3gb zIOu`<>O6X=tC@Lf9j~ZK;xmbII|~ZZj40Jvqi)4>;+x}i`1#NK!c&r2eGSYWXLo^p#xXt3M44lds6V62F0X7pM7 z?b?IP?PYP=O{k47LCjRb&`pb7!7YiLzgokJUY-T{vv>DA<7WZFd;OXm;h(5yy&Q`g zu<2jM7tUdiKbtP(cY#P&VmbtBAcDA|rWuYDs;+$k9Th!lce!Na_mA*-1sE1!lc`NF+O~hAZ_UU zdREf?Gb7z)q?u@sSH`s;9BN7&xZJX>@qh5wOIFdTi{mYSdP&%y;akIkz!>gONJ;^a zw{UR1kdMhkch4M8L|T{)%Rx>IV|TjKn#<4faj^}`SbXxbjhP9- z^1T|+J}L7ot`TCZkRV11E~;LsNMwT_B2g17PJHlrooSCeJS{u>vcffjxsb*1qu%$B zAx{G{LZX5-C7;z*DB1AFjKz$C(BrC03Rni&nz?R~`?}O+s=4U9k+C%XsPc*dIl!K{ zg%8KRw%K}X2*zl_vQF7mP8A3V!mDI-sobqrFXg$YrBt~gYm!q|TgWm3@1fNivL-P& z3V(i?_DaR|VhMxks!5Gn6>CYQi4BY-f2(BH~Qw~$5!kw&g0%6U^>x)<<1QAw)jb`g4Z4X>?4lFr_0hy}zkRBS; zKL{2uWt+v$qaquql;k}QHDA^>Be{V4QmoWVW2~l-Zk`LkOmaaMnUeUplp2Jt5OGQ2 z>E#{MF-)ps{HAwb%K&S=Mxskh^lFqEX0K4NV(1yw>9^suiKE=nQun1|YP~RQFl#ox>PjG=?*dCFsXvsp!phxg2P`)4Ibe37*04=FG4n=n|H2xgn`uXI5H3 zG+l6c+*`GPkirka{?-nFU?MN@LJYoP(Gs2$4U3U7dSw1sefFS87zsHs?wbq%tHBQ-m^|g8}fw__gt+`rUne?#?F=^)5(mA;fw_cTKz8=pn zL-?cp<@ zkcbiWq%fDLCt1eQH~O*(Q-`h)Stepcr>%5EVBO?W2oQ&Xvi)&Ov}JSD{DT|vZ~$#Y z*1|7jb!>@8tUAA;M^n?jlX{XvZbIv+n8GPjBUq*TxZF%1FAz=g7L{+b0Zd1L5gl2h zp{+b;ep!Do00XgIVztV#^{^7`!iJIJ-3f+&=4*_CK zB;Z?_-04!2+27aRgiO>~u-)vOHd_>Lu#3Sv{vHaM`G&Cf$}+RF$u6*IG#gPbT+QGN z3wCYikV=ZFTKWUWpr@G2eK|eV^hT z`f^vL(>{as1JhwFT)9vlefAPi-8DpP72&wTe0~@3;(#$JMf@C+RdXsJ*_MH7bfv*{ zAJ;!e$8Zre_TbZG(PT4h&B)l@zJiP0!@6)R?5|6LVT0Nyo^$_Kw z9UCj1)1aL%biY`N{KZ@9)(ql|hIQz6f!X4KsLmZH=T88#LaJYT=4edn8lYZCVWe)8 z8sco1Wl|1)6A?Xy5v$vmQt?fN8*a|flY0c2x3_p7y1e0VM^UmMvPAy?ifB6g92LRV z^kYoB5hvv_OzQE40;1scQ(DOO2S4@C?6`ykn*9W(KX^W=EL_G7`{xbrrXaY^yV%TJ z3*0EB-ZzbD?VRTJHP9xq9%c+P)qEu9+NLl8#i2kyY?krj=#d7 zS(9%pDgrNW@bnMvekp&tmx~P7`j74ptN@87HB)wVO1X+DaXsQH^(3uQ%8JSvj!yaccR<|W}6Oy?g^7>q*QRs^chba7u^YEIbMzIj~u8k%9dusYKf-vqPb^3h5& zv%$`h2y>}XY95S+`Tzd+k#=_2m5ae)t;Wc*R(XPH1_H*Ekq6J9bf4D4ZaRVZu*v+WSNRL571x4a$ znmo>q6hj?o6o1Gcig;;+(la}$^x|s^PPOJXYJvb~NAgHiCwY$8@BpkoRF`EQcaJJ7 zT3&FwWQ-HtvkWs0m5*ek7syHENsUwRj|~C-p$RA>0sr*7j0mFs;V-TKk!breOcF_A z8TJImg{Sv>)uaMQ|I_*Wv`FDu*~LoD``+_;GipPDM8HaCX^F$!WUbQr0-KJ%Y1(A{ zRc?v<`@A$dLM6nP2`tRV1zio48bVv9u^1bemb9)fbdPdHl}@Ql+P@8>NMP1>TLu5z za={{+#b;>Ga41DO;0Q~E2Q~tTb0beRn!qvyKSSBCT?)Uhn{@sb;1Mpi<{I6dkX{j0!n)%-pap!jm0CP?ITG|e zybt}_jMDXVs&BEzo5b%hVP5NYq}RySwy;!uS%K$oF5WtWi8m||X{+$@z3Kn2TF@6T7yP5Z$5>JnyhF_Y z;y})yn3R_yT&;8o(t7{yG|`_pAN;Ev|Mg*+-;Du(V%zwi{ibcFr*w1(m`!&&AQN4D zP5*2*OH{a=dYR&a?Pc$?Us*LGZkk`Vo`3Q`Ugx{YozXf+;cq*?E{nHv0ZX%YsCmHN ziB~Bu?mYmrUr|RS($K(&`@J%8lMI=R;~t+Vu6TX_d8UkWXv@Kr_?;K|&5^;Y4n!Al zJnq|st=D+OKmtDN})LPD^dMkfrUB*E}=+$7yNSY7u{HNwcq*s-sVE`$4p6$ z=p8>prU~J}N&hhM7mP;I1Kk|4{dGqIyNSk;9gvsZ>A!@0XGKwpq=jei9CNw# z{Y!D1&DFqObxZ@vRZSa8!{9+{qopcLs34(3ij;P7p_~MoSa^j#JX^h_SEERltYj3< zv#Qa!l?C9bC(8we^g`g~U*!N}-p+Kux>b$j5L$!wujp~$mAD3ww3zfLys3vyV|>*) zO;aW#K#->>t{d!t-{2}wAq^~?ShJ4<<|F8r3(CbQyrB+w|L}6l2&pLle033-1UWI= zdG(cg+v^OP@`pQ~GVODTy9HYH9EIJ1$wmh{(fYaTe}?UYp7L!y3t(7A-BtVT+Nri& zHm#v`(2jX0e>NnEcKw-vPs8L)4m8TVKeHB1%8%k?i8?ygIYr3*!Q1#QagG_#aOU+{|-F>HE1~w8k7UjgT zRF=wtxjj~!UkYL!Y;9;o%#GWFAgpE{5?T%W(;9>$sIc7VcBr~`ON8ggq`OG*|$ zD;MkuY{{Zkg*%tWX4L;QrcYTj&j#67Vw#&my*)r#PVV^GM&B)scx*A@xJ^T;Cft$L zDe^L)nS}5rMCP226P+o?_M~<)U195{XC>^g&#0nmD>e-`@fkx}$z6kos!lx3?>E1Jco}bL`#YO&B0$(Hxew#UCMkw^z5vR+pDIgMv}0=d-9EO}^TQ zTxaxTgK;~z>r6`+;IL_EN!@G7pUF0r-2r=FfX1Xp9m?Kx)4&d zCEj4#jJ6vMcg6JUZtnv={0Tje05ksM(scCbGTIUHp+`nLdUR#%w2bWTsEr`FvWi*HTg26-x}S3sBZ3 z1OR-GA${NQ;@W3f+wIzCLG9tbP7zHOUGXw|ZX>4==~1Mj6ntJZ`y2M}-6z@HNX~!jueGly(xzPN= zCboNIM>SujFwGo;IQdgZ-Iz1xlmVU6rs2>o-fr9 zQI4PO`-sgY+SLc1fB4H7Vj-APnUaM_DOi{h93==-ckq{9yCumHfWL#<=2R@ z#;>Q;{Lnw=f>Fyc=W?63j$1fxOS?tF!d&h}k1JUX`XkTZuIXLGGhs*006_33EL0i) zu`Kx#TMoynXC^rQ2hURC)Y}t?U@N%#CW?XnIy}Oq!TD^H{dJw@ooLHT5Blrc+8@#d z_n>P&AI=cxhU)m=mzzj0R}jZS?Q$-iRq8WQ43~EbVZ*j#05wJcmTOHp(MrubF_)Ks zNeM*#b?xYSi_Di{hR2)#VWJ{u0dk^ViFp-&pz!JCit;M38S?I3y)F!|tV)+5b}zol zyKEA4u*4oEvpgc;m^W^xVosxCL0M5LAM5wT>i1nZjt3&tBxI`1z) zYni@HT9Ss{vw5sJ9}!TqbDy2jbf-sLTuxT-w`=K%Ria`6NU-s(=$(+OH|$T)Ter`VG{(x9{A!dGprw>({PdyKxf*2lv)}JSt8ub!a9w0X4U{n(-3; z168M&ZH=PfTFWz8^KrvToDyn#NRo`fc{wT?DP`kFKwOopt=I|6W*` z!X{$*I8N@!Pfdd;uOqXD$w%3OmHMl(}dnDRA=!(X2))gmPZ%lhHHmEVqR0gq$2`IkZ#FId~C$| z_T8ayl>^`%zdotzyFPb^V~+ffrC**+q-Scgere!;Qx}&YXWF&W@Z7Dxrz{;_*l!z< z^;R7WuF$y&D77iBZB-v9NDz%NvLbFBeObgaW6v*^BFv&3?*$L!dbogT2j>T1K(hFZ z_%P@2cyFgw-^VdMYmCjyi{u+`P+V97DXefO(+M_`ur5XzNDaT$-5~=6hXyG#wC8hF zrBC=JuY;7QQj{IEH~LdlC{*<g zwb+B8Y9{-$)=guVQk|4$@ktI2oRmkQB=;|MtHg$g|7Zx!Jt-|9=g0uaDoNFMrFfoQ3fpLFcp z*2Uv}ffo95sby#aC?A3q%AH$tvQ;S-hpv%z*U*J*=FCopsN%z1R^OXsi1vIAzubeU z7g~q?=CYxOsRq60PFp)44*VrBFC&3 zrc-x5Nus<+oy_P#b&7sRnImu`vy$0|UE25k^Q+m`$E(jCGJ#?5=I-CmtTFYTt+rK` z2V3xUd5z&v_L~IM4O8k}mlsTHod#iC*{wtQh1{Ya()C$MS94w4%#(Us>jbMy*rV5p zV`J?@U#oL2)W_J}d_<2E+oo_@tI+Ke5M4gWU)uqcno)Wk8EX)K`h|t)ROnvm8EwRT zoyr%jXJS6p_*D5XI_RI;sf|oXaH`3%J*_O*((lb5R5rQKKxbn}a)#=fH8j8#C{N#| z0nO^~8#xweN|Zl(rf${8q{r+kX{($Aw^Q7B*_OMQa%bUB_79Y1i5$Tz&aC53A(H=P1 zmr2U)_y7e%08iNj&5DQY^2Qkh0&+V$LL8)iOu)E0UnrYd&r17eJO@Y8ci5#hRkY6} zYX3kJ5}4lUoFXUwysnfP;!pOq%5pdW{RRsl1KzFY_2ucy(^Gn&)D$7v>S@7dR&Z7A z?wn-@2~jXj=XOdzXS@`=H9Ize3xNem};cLx@kbwcQf6Dw^|4 zXQfa_%}=iWt@3jJ`fLR=9L_+#@D5p76y>JQjUNk^Kp#pmovqbQ)2$)J_3~=|5^PB0 zp2qlKc#=6^(;**Gq66vAoSU4PE__eZ+F8>l2)3S6CywYAzAM9KfcdE!I3~m| zWF1lF{v8oeLR}1gFI;eJPRm!>(uU!q9mpUKWyb+PD4iOI9U7d5v!^yWZ=nJj@i-V( zw;{%3^}R>-+cgZd$P9Xaj;+15chBEt1Py)Dc?&fKgb|93`F0FdeJFR?6JXIlQ@-~< zZ$J%?CDlLkA&bLFqGq*^i#jl#FaPb@^j;t3kI4VkQnz8U%=GBBp^Sy?yNOdt*9M=v z(yH}M@=$tVU=Z++KeLV2{scBtic+ibN>urx{_*S2yT*@}RN6Lr5`cTRkN>?sj=l@t zB-1ly9vk)^Z5F+=@ZTvgvCsf$Zic-*jr;ey)_=3F=Kq+u{!Fi6snt;^>xh{xTlxrv zW{T1mXD6!l?s3-#f3KB~5~5CDs3_{bV~6Sc+?7TkP)||xxXZjpaWVX;+|Mej|6R;z z9BLC~mbyN^d-Bk-c{$R>;h0tzF_INAlHeakB1qdHNLM3BUn2Yfjy#vRz4(~5A5RH# zcz0zqeYA_9vHY&#W*WzYeOx|lPLTCSM{|ZonEq3sC)4a1RXEOp_AXTg>7?*o+rVQw z5JZU`BGI~><-^C<BXKQc!#F*i?8T{6LetnN(P;0Fa&|O4RyEmu#uWXWj8N_aht2fPCM?*;CxY{2=NA zV~^cEHPKSyD4O)iltI30kvOIi(Nt}UiQG@$ART0qUyIeD3Hg@PpR|~iJktIB53|^+hQZhOr$@eJ-g~mi`=oe3u6dX7-H1u^@MCR3+7U235C6vGy5lg2RShtgCtJvG$`$kB)?1X`(EKYL39R+2WI2nHeMWPRE6geSJrKb zz1r@Ld7ZaG1;~fa`o-^LjM&GH5!`!%u?1*t0b4f68hKXzFs|#<$U}Wo6;32>+kl?UNGq-)?gFlDNrVDhbK69e zbSg}yN6&S6xB?vNu#{%C{|Ii66HZzg7SzEdBgbl74FKUu3j4N}e4}9;T>(G0yqLPH z6QJTZGQV9z-zHq4u{ryCyY7^!H|838xbCTyMr>A6P3pdoO40YKJ=Ls9mBC?8%2&?U z^li};>zHtP`j>vv97~GD{&o$N_6F$mn!cjj6BE*z#s-2l2;8HY^m+0s;I#~mgNaX} z(omYJ_br|{(RZx}A~0)hk%gl0F9F(Ya83%aH)Yx$gIqMY+ff%wxp2etO)U9$!sg=CmQg_L*LNCdgr9wn(d zqm_wWWI^qZTuqg0`F6a5ReILDt1;#3q?H_B0{lB>d+vnZ%u*>>$L4%_(>_vhPit?O z0CB8+rSIY8U4K8FwWs>ymdnbn&+{o*adL9!DF5TZgAp9#*It7Ycfokv9A#^V^0}&G zXh}j|*@CA`v&C>l8H@0@9*jKBk3d*JQKLc`cA*Inftt;IN58b2_E9CH1}~=Wm2s22xS1} zp~VwP)kDSN{2!sw>Ob-~+#6pzKdOYz=JLxjSJ7`r6{NPRN1!c6Ge+8Qyi7lkZh!IY z*%(DC$mK8Zd7W_og#jba1K;DIA3(JML6Qo$hC%P^rC@xRMCuOzepN@+=RgsH@y z?$+3xX%>T~E*#)Den#NTugyFuQp9#c!@KE9+GxsyJJIZ2z8k zf6nA_&aT=P&<$lliz-)5V&);FVw+nKbiW}_OJRRec8e@X!lGc!r%+P%Ak6mKVNWJE zw>;=Do#t<*S)_@K8ud*1d}-vXUb~Uc$08zv#KrfkiU9Ep zM~8hS#{yn&OHOL%jQUq!<=|+_v-?EKyEa?9#(K5Nr}cv688K#rr3;f=K|TX=(@%{? z#r6laY%sNtJNTj1h>s z1@tB-ePXE%Nk72S`4M1D)NKPcowV$$=3Um-KG_thqY>^CZe6?gwFzpy7N!?(;Bfw$ zvZ<9fXA#*Xp&qd;iv2cKi!>%%m0Lmo?!T zT6`cge_l72G%VOvYO@##AtR@a%khocl3qjK9)m=yTAQ_!D_0?hw~E%**E$ zrzR4AP|s~tP${K{O%PHYUQc8UZ7dB24bO)&0o=EmArr3-R8_CfyG#2BQhWEEu0rMy zkJM~l+!b}^IMUR}z=n;pgXN2youdz~6{-Fp%`qT!C`f*9lS@oa72#>n`;N;M%U`9o zJSv7&Vu=GtU(K17Q=tT=ol|gEuG>#tU9;XS;lN8Xgc4!<_2e4dKHZ>;$0qXZ9^Iho zh=PRjPSiCV^J|ATnp(6#?_`odwv^}%vc2K8oZnmPw$;xi`pk0k_vgB9&bj%ig@l$4 zgb+PI+te?;LVnoTixpsKr|XD?vDrS-#ADU1S$m`&(sjt+_++51^s<60)U8nEkm_|&_>cQK)&b7()F+ZoiaAuzpMD!Py zO;&J^ozT0F9dHtomF3QRUX-mP&kq595I!XsS(O5l)5!8T9K=l5f$Q$JV#n=ACSWov zDH>=fqM;*2VNYG@K`19f0buX#M=3LK=@@R(W1#hE$1#62g%)}iYRfciG=MnlK2icCF$X?rYP_t+CI;zEWNz$qm@{8Jg^_-`w=QI z+sEDz7k2-4CgU{lUF&ac)+f8bb1K-IYPi2_O_J5vgd}>P?1P(2s9V!i?O<048C&Rp zTYW+w!jzIoxf5{7nD0;1MTpL4*7`Q*F~7XEBITD@3sOCt=ygZ646zEqk8oU(Vvf9d zRC>GuJ0JGyTh*FokpfaJUqf4erfKPaAxHHLGnV6pw;bT z;*F%x*>pVQu|PxvRHE9u(B_@BqY-5;s$=l=lXoar`KOwcGakVymr1R=j>C#$+pjcu zg_zpR2!*wGfTHK+xr!x8N1i6bYo__I0_{U5zvn}8I!{sW$Kx!cRxmdwC%4;TMma;D}(tX@M!HVcZ2dVxMK z(8i+i@NKLYE4>0%7opf-U`>{^JyXx5<8e;|;-gacURy=TIM5fJ*1tLafMW}WX`)J3 zY0D>|kwgzOqaL3-RzGgwrdTg8cF^p@=M4_1_~)}#b*1@oZ-f3sjP_B3(xqG$S<&YH zO(ITSSHXw=Y*$*Mu&Bc9{YLxaBg!qgaD7)PYV@Xli#YX57HH-Pju;^jJ1K`0U8!Z` zQkzyk=|bobS(UD5a$gKImAPm8HmYZ7vDGoDQDrt@o-87%C9*0{M)|~Rqi+~JUWr=A z?mcFCB)=3s&z|RA->`9c+v5+h6CQ$LimuSgvZ33bSRx`Dq4S=MELS!ghB%F?l<=@Z*R0h=`24 z*;MYGd=c&O@PK;+i)iCR7r~N@OH4P10$Pu6nm3@(n+h>GG47)aC`N_pVE^P2lG}7B z*H+0Re;o*hCYsKEB9tQhfI>>cO7)MJ)53!BJ!IekEE5D!mCRk3rG*oiO`vA<4RsK# zcMi~T$!p}aOu$d3$Ck9#N~muZo+@aaL6Hl^1BwaSmk{k}Ei7lU@I0qOnop;BQBAN7 z>YJc^CLR1ufz7eLB;{x-?ZLH}1(5wQ01KcmMCO^JqY;Hd#8=YjoR8n)BmP+4;G=-R z1T7@vMpt4P zvvzkT+VQGg7TN-y5_;oGIjpUZSTIIRV0NY=+DJ@AE!0sN&r0k)gS6FD>7*cIi=Ar& zJf-$jZ;ri$37>oICv$ge4aUHH9|8~~)0O@0ZCp|E9D`zXtC+0Jh4amJdo)#=YT&Gc znsDFw#e9*Sd5#V1(NNyZ7MX^ad`4ad(y{g%1K*wdoY?6%2V^Brr8dY5-i}ypuorXB zW4Wx~#K^+TA|YoGPs>{uU3Pc|*!@=?-3RAJy*u&=QD)5VYq@TLHoc$jaw*I-GJ{Ut zArG%YN*q^1Da`znve0?wF<;sMQTlFbfy>TAEz}CtSQ7+I?^9P#$(Jx~QcyT!$66R( zIxqf{6z#7U(Q?aP6J9S^6tH%+F=29eQ24AL$^t!}i zbkcNkNBN0lqHC{MC=QuB7$0HA$0e2!VLujrtVv=^<+Ox)v#^^V_gHI=`S&{glc>)+ zvo>URhlxep#-_$-4pDekahaj=SP8sg_WWX}f4$2_*%295nsOnmD_6}tPc+T*RnXv} zGL_t{)ULYNT6CkHmH10{(`$^IoGjx8=;s*rg%|5k!a3~ZwEZ&6epWC=EafPo0V?xx zIr+JjBEL*7HgL06)h>XQc)8STMl5EceBI7f99Wlter&6yg@2F=#hwD>=VR|e{vhpa zX4kG(FSMJeti?8S$W3^N1*w|wV~M7$uOMl#mSj|%%cNCpo!;?<=}+Hf;BHLeR4NEB<%A0)&_rLTx3yQ`7Ww8>Z5*PGNx z2;i~cze>nDnlify%xu#}5I3pbtuoWty~nKbE4!p1WtQYXWmQT{cSR4oykFSfAs(v4 zvegxo4yq(6ritOUA(JoqhKHYlZ*BH=u2ET4PX$Q7pzG$PGk1%nGE+Cx>78fQAcWDk zTjpQ(v_ORY#Flx1h)@^%xpx_=<7AMYE41XW^~!HzW0@62=HTD%iD%q80ND2JU7NXB z2y;S*jHzC>MF!OTs>N;x|Flcn(J|dVH1dNj;!yD5CdJl{h=*)e?UB#ikGP8nCmuJw zc|a{dG?X#RdaAx*Cf%NlNPXel0*$s1}VrsD3h8-k1GqSxD7iKJW(I9&xyS}rh zM0feJ^;))Squ>s&v^hI^_zk2{{@XKkDbla?me)+wuocvRVMcxmDy@H>t-H&leLZwQ zoLlbYe!srrtgZ4wZCHMH!#_8_zM_420w2a?#kXyO`?Nf*3W8Y20GEspA4V_n?9fa# zN(2&O=WN4M5{c9`_6{8p>nCfCk)NYApE*{%s1mfU%T>R^o2;F~?A276=Gr}bB$D}f zcnlS52w1|Kplats_%7o{?p%keVqF;$ZpW+?O0PgO5ch2bhm`)54v^eGP;s!_7y(JRKGOX zF(Ok{8J&SHa#AWRKLt`IN#G0a!R<#fP_k53@_5-ksE?#ENK+03fXb1I3i7d%Vy~Ch zBC}5J*I%RJBg0B#o;hLZ`x0TbHz*8y;El}qXN*2u&-*%XezE_|svN5dKJAun3S$N+ zE+}WTFBcW1`%;NEtX&Nt`o&5@xr@g1I;$D#PeJ6!;QNXi%ZvJ#dJ+x+aT>g&!UlMD z>ED^jh>b=k;s!Bf;i-(-V@sk8@ED-wi|kzuUe=$KV=#yQYYUpJje{yVdo(m?TTo_=5#iZOnM_9e1-fV=F%Ni#u@h zxpW@?Y?!SSH+Vw%Q&EvD|L=?D(;I)E%u>g?+~dtBVAVYhA{65J)uJyla=Yob>t2kW zACnuag?kY7Y|ifyz`o!FFVAB*U00a3)S82PIJyc#KHUGvf+G zP-dsplkt$qd(uh-Dg!bMkCJ7Yfx&4?(5JKB6!~mbSn@PaZ zKoWa|I0C`!85JY`YOB*`>3J%Q`4+XhzQ{Rwt`vz=2N{%@$w`$cAW$l2ZyWPKjuzb! zO1bAtQ>(xR4RuWG6OU_D8D&!&ztrQ&`%k@;U2rT&I!u`G;~IGQ_qQAxM;&gNZgZXx z-5(A*qPSF}V(Xfi`8nPAFBO)F2x1ub7va_Z@_%bdJ8p&WK(A3tRXMb?GPhq3$Jd+J zkkVsfB{K9LBcvsKEeH7~G-~An#xkyyztLHn*D%n7r7w>E@btx-8=hv34hlONBji!e zP=q;N$DF8^uxp_&zvrE&vWBG%#byp!Oqj=TD}@RakLVa+z4y~iQ; znQ6QY-6+kMD^i+`DWJbHf}Yy`a@|H{8m`hJp2LdNks(65clj~H?1Cz>g-TOo%AU-T zX`qUCQ4>y-S0UvhKc+?w(CuP3GeZFw-Q&RBlOv`P4q*K~-biEGbz3KGm<-@RpFUHo zJiuzI9#Gb(1}J?|FYp!X>SxnhZceCKNRAA}3VvDaT$NVb#n=^E>W3 zW88dKn>e((A#!8s;}fY9N_hh3qKX;c4HLFb)ZxHysK6_o7V1N)6b+4djf`Yt9^Cdr zRqwf}%5FuDK6Pi&iglZA$f4u5zjBGL8_dVXcE#gex+?BVl1BZLBhTHm^$YJ_v00nO z?R)TC9~MT-VfY4b`gca^S3Phjye#~HvC!6%WLWcdTg%#%XqeOaX=NHS5*;x5dG17m zUtX}uxV^&Q#J~1fefni~1fJ43MM3p#LY~bvUG|`6t*5mzEdIx0F4ghjEu*y#pdubH zaw1xwb)r5cZ4#Td6x({K!8^O~xq#U0_x)3=(k$NsU3(*l)rS|yBG(e7=A#TK)MDU} zPx|M6iaeUn7fE5Bq@;-n#9wTRf|ix+B3NqaA0*iW^(%r?yR^rpVqy_by)i3Ko11*j zirNFz5O`=%9=`8lZb?x7p#g&@YJ!SJMlO7%QH(2frrf}KcwpumJx{~zTcV@{ammSI zzRVk$Z5V8`*!|}?TnN3K3DGL%?^^awaENtte?(VfZn%-(1dKl-7jQ42;dJFw%b+3aYGi-d!kr0)Z=MFNuawzeBA~&Uk2r;|Xi5^xQTo#FNW`W+uxv4;s)hkG|Kb}A`i8reY3>itewk~39+Rm zT0f)|NS1nTU_VTGxI3dZ8Hhtsc0ck+a0OC_TDbq}%0GY}`6k;DqOvBXwvhCEBh(3> zle;H;)^wA39sB86by5ywDU?0hbeCWK&cOPbvCS)g3+w^B0jCPH>Iyi-=LE~~N?2lL z42}@X_*StR5Qx~{|?%gu4f6Vl3OAcI)nGMM=E`ZwWk`i0Lz{gV(PIIoyA z20a2Y1zu^$y7w9C`ynx!r11*WoUiwOvJs;_6!_R<;P(fK`i`kQ$I>JIqBXLAr}*j3 zlH(FpifT_E6!AGdgi3ICAsnt)@IM(ZSMD49(D7#+?n^7B<|}r#7-y}pBjD% zJrL+!hX(Hfu3u&4Q4Io;&#)tJ(Zkl90U|W^})A%#>E3cdHixU$fVM&sKguc1CuyA67 zxEV?<6I;vp39Wo4-;pgP4T_KKv2pd{=UC$%vN~>!oT$L&6+83j+QsEj1VM*Iq|s(6 z%SEg9*f_R4CB3yKse|6kjo;iHy;#H!pKM3(#Zn9Qp{YajLdgVARO_O)0-Up@MB4V^ z(fQHqJ;p^V(Iu`WRoNn8ge)|^n-g+%i?AXcwT{W}H|KqVR@Vxi#fZn)q1#er*T;kf zZ{3lsSCvBp7$tg|`_Noqd9AFb!!IfSjgs%?kRggJRjzD$)9uRk>@K*ik=mR^WxGR+ z+=34m_XAH!%H#P76|6sVh5hWVFl^@8#YA}m`JJnqUKaV=CFl*K-5|gm;v0LX!4f^K zlDdc7UI15>1Q8rNRUSZoG5;b<4ga1~c|xf*Sziw|Ul!9y5c_4Yait#jeO|ofY155a zup}9+<95mV^h~Mjt;9N&mHRa7cA0Dl$(mW|h)CxDfp@LJ1WT@g&k_3V2?OLl{O~I8 zFN+oaviRFy7IXY%v0=lnCxtM5<~=R>MvacLB&T9TVNDCO9#J6tAGFY&6!o7Zmgs0# zR#@n>5NWdhTh?>{asL0Nm~Vp5b6ZS|0H(|T8!#OM69W|m8TB~|(!T@KIbXfTrxr)x z(oqQriQc^B`lz~wfTWu^{l%tp8^`}erkl2_o4a(Jq6H_M@tS@82bPZf7nZ(5)x_>yhA~;>dC+E$L|6Ze zq*pvh_uPHQHiQRRpOEUSX+xIU8+H{EC&>Ur>!mpBLUiDo@(O42c9aiCFQzWunF)lx z{C2(j2Z`T5pcjXZ{4FJ)wofWj%V6~QI=Wzd4+SvE;6q>Pe59+r!=fLi83j!U2^$?o zt_N>Om5)0r2oSS7oJqU8+(y3keK995L({AxG&VklVdzk{J5N%DO}zU{Z4#}T#GxE& z1S;WVAR_3UtJCKUZF7`&Dv`j+%J;_=wPTt18&%{w4zI8vLAdCJRVD95pYN!Rt02)! z&sqLG=;Vpo*h;ctI(MwW^Rpmj+{j8o6o)@pHzFKmBcSUTyVu5@HRu{VH?}HvF?V(E zCUp=_npA3TCX1Dj^V-=j0yJMIHY|{M=?0EFix47#YzfB4LVEOB?V#XPHVvNk_7Xq- z82UCD3q0xhGV1tY_eQW{ct%XpNwToZEIij7-^ou}YaL5D%B^TG476vA2EVv!_EUcw z->9VqWYs$q;~eN)omPu5tWi?idC4Z-c1 ze6j%U0`V9FKSlelpOUB{&uw)E5*)r4qBO=OCL-9CB2huB_OCmJ$4siBE58@Plywj zqCaGB7IitHL90>_lNHon_S{NLgJnG&){m|Y>^R^y^Mcn)GV1n)_%7PQ8#(|J6~DO*b{wyHdHbrrBl1In3BFV|}=gx+OeqnQgt0W;OidIm5SvsEXnG zC(+IRP+|4Pbquk$nu=qaMb!=0nO}jZ9ctx$*EPqr3y_t7?~0DkQ|&8AAJ`_+#DPJ< zu@@1uhfb%Nx0m%TX$E)dU+3D-hNE5Ux5s|`K{^9%7(_3K`gpQqztQYc`K&5^yP$2z zI*f?0SX3AsQ^md$zlyFow3G=5oYSls$;&Ud7ISCJWKrlo>aM8O*(ezPFpgbA`0=aY z3~Fb5rOXNdAo(2Fgnm1#%5=FVVtx zcEUB8F@cJ%#T~)Oh6GizB0vtma(RL!E_PVebVW?l?MMVhfY44{{m7Lca%s{yhLwm; z_1s$abjOgLEhqE<$z$!`g}wMdO6h&7@C+i>l!1&;8un^@X2-jh zvu`rdD9%c)hnXFCGH4QsfTNKH`NYAVXc?x9VaG^+r}((BrfP%e6dl<+P0$qYk`Wd` z8Y9_m7`MH>U7}4Eduj$3v#GOQVQo5ey8cG)Uq*`pdFs>P2fQb})Jaj`&aVPUlUVw_d@b!-#F7`6g_vDjU~w1m zOW2S}g^2vF%1jle{%AR9X0;fBAx7}7A{Wb%lIbeGrPLVsY&EYeS+k++s-$i=0z(l) z^gz^Xxd7(>9>@I?)~9191*I&(Y^`8&M_^{^hASM6w_`Nh{->FKPg>9A-F!q9{SK5! zmb0OxlfTxppOi{FEvjOkI51;QWM=4)Czo4a3J@HcP4?J2M2>ZNISqqHIoV{oCZ< zBrPD>#M-@#C@iU9+EGW>U-Rn!UFPyPwX3}utC%psDBb}1!M9f)5p}lfFRvHwga512 z7;5?64gS|_miBkz2z)?L%5n4uWs}Fk?PRJ!XDZqq;xsGrM>N^n;SwMdUA+D|e`coS zr$0!eR0>(GQF_=C#mxPEUft-o6dU5m~7@5QQ1DyOW$7@FN0B5Ki$3& z{oiC7b!MuT!~Ki`p9q#KDsbEG9!K}Zm)M4om|FBeKJ~vgxv|95{!j0`lG|VYT!mW8 z5C?EhcR?r7CF1YJo^YBA9zE3A4QZh`>p20QCn6q9iXo+j7WB*YySdGYj<|v_f!nlL9+TEiu z^6>h*;14F#1P(xmauSV#vU5S) zTAf|97PzxMW*ImkVI%aqQH#l;1~mCA7e1}3cxPr-ez%1FQwuANjLjLO$w#nRl_Vpa zZ~UTe956kS#>!9X(;OdRZ0unfv|h|zQ+mdX7AYHgas-IDhsEH*>{84w^EF#Ki44nc zN^}{nj%W}I$6fpHIi)Y7LP+*-StRjrkQe1UHqFeDewphs=@EHy)4utlg_`sJ*nwt@ zqlAf8kptY_7NqNy(Pui+UL`rgnyMKr|GN_!4?;9_oRf<@WteM{26nU*CX31k6PcH3 z^bhYQx6p(xctwi|Oy9EtPN5xOI2pM8z>UfWU)0{Ry+%qIem9#*CF9^wi?#4++vH9~wYYb@Z=NqCId18F5-PSNT2B9tARedGsV4t4 zQ+^JRm=BD_p~+!bQ=2qp;JNu?to1g~Y*Md8XA)Zi2w zUmBlOPes?IwXv%N)dp^)=Cx|9`p2ea2!g(@InOvcFC@35i&ul7W{Xb@S`F3TfY>!MziuubQ4p^hB|jK#5TFo)3R4{!N(Et z@|eqw$ki>~1$uTe*Hxr?Z9>UVdxFEehdasY7V{&lnYKlIe4%rkCi%~3U(wKv#{ChK zmJL;*3mPiL!8`6BKM_;d=>|Qi)vZV3ySg`wZM758oFZYT3D%v2G>eP51%xm;y{Lr9 zlg4Dr4#7bPl_pm@&k6AJO(9JJqxn~3+poGEgiN(W#jclRt3F|Pme>*%ewsq!`_XKd zoXbZOxoQDy+(w7n?MSaCQ-*Jz_msyXtaIku77_zvITIvZ zv9a40##=F31QGV_qyvei&zYXBn*w|~1(fYR4-pf6o73P_!e<|GMSY2;gpXpcFA<2^ zMGpeWA)&$n1*lY8aVIRSos7l;ca*g#E|p`YJxp*tPXs?G5|7(d#pvDy2Kz~%_lrWP zG<*8UvDp%ft4ZU z!;}niv(z5wCKk)#9NH~M`#(Xb>RXqGR^ez%6X5eH9G#z^6mts z%8s^X<)D;x9F$j{ND>$O!mlMLqF-Ojwk;<(w5y*362QmG(P|B4j^^9US-<%QlZ3q) zJDpLUM zl>a(pYZ~kIZI!U-=B|bYJoDa#CT!X(_P9LQ#$ML>m^}yM*t+*sdo=q{!m_LfNDW+F z3;6555fUbGn&aGwg^JshujXNMVdo9BRA9P7E}xDRQ-H4tI5%^!K^=3B>gV;XbR2fa z*=+1?F(ZIKNI^WZzbS;53XDBMg=lg)lv|wqyiKnKw{#S`(^v&!FyrjVe*n8CkJ}F7 z4zpa`H*getnP;%VZvPR>g3n6)W@Ty;#$W5C!?3VpyTSXirp|bh^A1&U)96wv2#MAMox4D{ZS^Ta0~-p`>M{n^LqZDdt}Cdqj&+gIt-qDm z?j5V?xuM${I5n^wp*=es)s2#@g7_&s&=yadPVGoD#Nn($Iw;9Nb_RWXNe(=H#MQmlJ+>#o9qbPJ9NZ+Tt;j7 z40w5;WU{}0yZu2M**xaU+7OU17RPAY-C!e2-N}R9vvPdBk|Rn);ErB{Cr#^0zU1KD zJD+asxkj8LH|hs|^MoZ!DlM!sO6Y#hJc)7=u1zi#EADtuvE0W1?xF1?5(<~>hf3iW z$_-d8*q${kTnW6e(%*86A6sN|52k8A$xd9vABdH9jICrJWaA>eaVl9}i{Iwv>ltQ&$k`Hc&I2 z#ElS?GM*WLS?8`GwZmoS{N5V$elAvr>o_ze_sc+)kRH{SOL3oWs$yjTH}mHu8fo^B zpfiCB-gz`E?)g+!iWfPqA@*bHaRL|U)|8(Yo-lxjReNN_j?(y3stmkF#E&K0)*~om zT5YADsZq?5uwP>#kQZ7rTWI2U(qDPipWp>4PM|*nXh_jkw-GP;M&ZrqnPR(E1%-Z8 zuh%8fVwkoaNPh7KD9hU3@48p+uSKu44lLpqw1yjfpF4Tv#yWT5-4WMtcSOWO+8%?r zW-9KQ1D7l@g#+F#rtc5@K>{~TCp|5A!qV)k&Q5BajDAx|tSUsxyQ-RKtUp@|A})GiU}m^$Uhqzv1grP+^rV6YBg zn>fx4?+*mtI{!iH-!4I{-$qb9WZ3F)4hIh(1S5`7y=~z7slXS;fu>A#%L?@E(@+(~ z1j^8x*&H)T@qB$(bIp%c(()10vWZ3LDcQt0gg_7J@jo62JL3$e;S8wF^7)xvl#w+G zsjZ&7R$lF|G$Z1MTo5rbS>?xQDYEY0cxh6e_jpDkHd^R?G0AO-l~;+WZ?n+5RAH z)onVd{66#7P*BKzLoZUvM+#f?35wc}4RU>N?&M2hf<-M_=K|(F>)iCOHFoE503vWY zQDma1-OsB6v}>F(Gfm_ac$9j4UHx!Z>(C@_1O&YnC^Z)cJO}AmW4~(S3}BpBq7m%( zhL{^5f^KV=Tu;+Bcb-tsj$%f6)e;PNI;eB5vJ9_sDioZxQv7XX_*V0}~xrID!+FiS; zE0aGfPFsc=CLkr2q)ub6Au=dZx!h7?>9<`!d&e3}4l@e=jo z%*#iH^DjXbKk25qp<=&Qwl~&c)(+%^_b%2?;S9m+e(uo`6P-wh?Fh?|_$K^Fs;e#m zt=;+eN+U~3b71JRLJ_v_%}+2<97Wd{^d(d|@QGBw9K~s+C{k#twnY?dgL7rbP$u=c(W!;-)HmUDT%84`UQG!GZ zi_aS6dyu!D;}&{0$!Xx%5!1XhcYFk{kiQa7LNv65zb0c>2#oLYr}FT$`l-d{fhEEV zzk#+>MZvr&!CS&d-F=dC_fJsk>TIcYNxYQXAoZb_Ycg{b zqJ8g)`FWT!Xh<=hU^!=oZ>S76k*79(FneJiSFYv4B0ot>lMf+AZpwVU=(m@DkoE%? z^2c+t!H5V9@{57`OUEF@42Y}XbGeyVwd1jJpQ2f=lbNFihrQDJxWE6-b%_R#)z{B zlDd5*2=cAM%`|7W=IWWXv~E!mHMou*CT7yN43PR}qiFnB0EPH8)Ouz@RM|Sc)%x}f z4NoS6FAt{hEON0mxVDU@>kY<{r>(&QBoN7yA($=M+SiauzM?#~2gW9Ol2Ep)1|ce5 zm1l+isXVZv&`sviR&N|+Nn(8Vy^m`M=ZfJ|e9&Bm z(b}`FpZTlAzK0SPv|A|`eg**2?vtuoD+fjGsJWX|sy4&OwVK3+c_CKjZdj5(sdB>I zQ)*$>E@cNEi1~QsNS$=dC!%coAp(Niqk#c?tCh`Seu;&*(S8xAtj zuUH`6y)=?;T1c*gJo`+-Q)d=15JHV)iF&yZ!?O;LfqE4>e;TN6f zAWwcg5f~F;gK!B!)PS+_(K&xeSS%3v!LoeLeo@GIT33Q1|3R-%fTTfX0*y6FBys2JQ|D7n+6iQ7>kl^;z+?#@PSy5Xn` z!RAl99*I@!J7av_-RQSRqwAo5Fn>-9;QouQ zP7`1vZK}#3IO7~D|Dq>xgK}NubUjKB;GAY}VB6fu&DYcqO#fQ$9NnfrmRsUX5{IpM zUv36qJ!Z7Y%`7Lz>wf6{R9}r!^G>^3simuoQ*orr<6-h3->KVC-8X3$N|+~@58Uq2 zL72;#+_Q#OKG~H}K9j+JOZ3i@`os@`_>_kW=`5`l)e9hg%&UH6J0%mf5cL2(X?h}j z+`SlK2R1brbm*w;9d@AvN{pq`=y@v_TeK~lYfrY8FNoSjZdj$vY$hc;l$7@Pi z3WyI2YJ%xrx3RG+7nuP0#OfL6Z=P(rZ?LA^s@;YLWCuk(lT@{T-{o-o31!IYaagm{ zw$IVBGq25kEo0u8|5`>z8Lu*p$&U_8ovGuNai=_IeeEt=!yS}eMJ8^1Q${2HYQKv7 z(Oxn{`@+ubzVKFLJW2_I#eu`Od6m8%I45%abwhaGH^1wxcRf1?U!@ii6wVSkdPvyK za7~O~c&+ssvEw23SAX}+cIOwZr)4w@pXcinT56B<6>YvUTCZ~fLK@uo^{!!L$W954usjBG4PSkq8w>iQqV_V0fZ{X8>8rQ@8^ zQdxO^FJ-ZH9XY6QM3@v=`{Ppm!42Rn{|G6FK2X90YRG+v1 zhE#|qThArBx*A6^D4qd0SmO4Z(Wd*Z3{h<3!WohUPm5N0xiIGI69rEp*b=`x3UmA3 z$3%)3-(>ihiR@xGq>?8jU#W5ige_K)&}ZfjgoDITr>Q zi#NArSO_nMyXtY9&i250HmQ@MuNJs2Fw%q^J8)fzZ=`NPUyqCv@@Q%v%%7z*+<7gW zdx~m$u0rn*lEAe-%>q?a3}OiT_Q8g(PL{|YFXh-cI;~x;`wKz%0^GNGY3XCL42z)~ zq3)fxQDXRl(shB7;((PDVJ(!S9i_q?oo{BVM{kX&^MRdf1*4W~P2M>}WqzyuryVmI zXXPcozf)cXY-|-@l zfvy!3XoQl+!P(=qA5EQdD%U`6pH9s>Lg+rIpM@p@&8sH=0SG=rLK8#osVK1{`&aV+ zX&YoWf@j;S9Jc_+QRe{<~`s13^l4s|`c=3HgPfhlL=(wRze&;-LZASL;67wOXYVuOo@S^_4 z)Wmx>>y;bc@pY2gKfr{7gGs?Wo;k4QLRda{1_LC05S)dQ;?vTfR-R^Xti&t|g)GcZA zaxuA1+)1ir;(%?7V3l0=8o)!a%RuQeY;)G6y^7ZxHIde9Y6}*x0v_pt4B6pq=oVT!W-#lkFbBW#+3k->V&)%=I6T|E+roq(03a^DxISn;_rH z0M|BY1C$}F3~Mqs`x!$6sY~-Ayqhb#d!}WY(d(J;A;3LG5Yt|BneUmfT}65G!5EaV z*RW3GGZthK%A(W9Hbdmrvqm*v6`NTkQ13oBQ`U)dFZf=|D!)qYv ze=#4hcb zGo3gE6Gz<)$N=fZV81$X=idwuFLu{^YR9V!_fqreB zT;&8l+z0d7FzQxab(jDfZdIwP^<<_oxaFAwPw?X?tjeXW=iQ@29u3)txTP`o0>1v7 zF?9#dn(QH_v)|kpHmXV_jZ|YrCDw?ge9ia0h7{ELLlqfcvpuZkZ!QyWUcu=dN(FDv z`x$@1!|HryEOq(k@PK+#VIsmoXF^J=;Bp!z92(k&tPvLr3x2$WFQ19A%wiM4A2lS< zY{kc5jyytf6L_oV3VMhQ@Q4@0;T&{j0q(nx6G*w?z&Tbi#$B@Cu*OuIiXpyZ$`H7*h5j} z+}FlVl9B3pDC0w*!XGHpS-`|Cf}AwwRe6o|H3<2y9O8v~y^M#_HWgIJrO)dP9H}K;FUeKq>`J!lHI86j?jZ3UUGW(s-4YD%Cm|R z6s2@wZhvz}3MmjKMGao}kiFbXDow$wYqu=I4+4F691NpM zc3AG^XG51q?AF~m5_qF3-&0l)Zn2e`6Wn0XzYzp=g0084=TXY0?cE;XpJ*g!PF$mXL} z46o5*25ULQblNm<=|Q6KBng17`z6y{ubrZiZ7jEvkDq@Y12cnoH=zh5D8xpSM?{QP zeb7i~WI89-kz3#}9f4`g+3*7(M=gWz7QV~KWwSg<8AX=D!`_-YZfkAL4!opMWgPBl zze=pcQTBZOt@)VkgndiLf%BFreFBgZu5JE}7sIu>`0`4icOm>#EQ0i#AW~g0e0g{D zRw?p;A^mvYF7je)Tjfc~&jiVN_!Ss4YpC1m`S{bwpua5~GO=So>+TGTRbg1qP_6{& z{-%1fi|bx=rJ-DLLWG7&8=<$T|HHMy#p~|?K=cEBAZCnFC0KP>KK>7wQoMoM*Adyr>?GjaWkT9`Mi)ePR2kM>oU>1H{t$vV%1XgJTG{Gwz_yiB>N4x#Wte# zRP~yd^SHx}fT4g52zgV$IfDBYs+9iXv6aRDdd^BBu2U75g9hCjv-u_OXSuL}oGIYQ zYK_nzwY}~f^9+`y4db?MF2D!n86p>;aI-C{Xtq%a>sVBb%R~yTXVrVRf`O4Hi7y@F zj5Apcjb2N~UgYzI*E~dY#xo)5303Lx&<2S0?~t%_T1S z??UN{!j7Q%C-N~F^p-`bSuv247$9yp_(N>`TUHufm?sM_Jq6bFz)#+Q`%9UeEY~l5 zFtdU4&u7-EzX0|)RkDUkA`LUfKxzd&QeQw=r?$#DimAri>88C0Wj!l=UhZ~l zi|09J*OiKu10~}8Oe>fIgYDFGsu2-rclbk-)Mut+q=Hlgd1W+QRR&k22S6{??R@57 z(l?O~9w=!w_W-IfuGU=Zgsv_To-8FX*H8Yg&qJ&u;@WP!uJD`6imyxq1z#3?z2DTU zGK|%HqAeS>MRJ#!g;*^Y!ikiQq9>rRC=WoY!F7k?JC~OoUnkF1?)BPS@?Naq2Ik<~ zqKZ8EQXLJAc8xD&w6dNdX*%0LU)48GW53xVuElme-Ql9$9QdfNNP%K{t!X%quG@{r z;*I8`_{#&J*@D4Xj)tTz;dj$q^ugl?K@I6c`g4~?T61I!9iU2PpXM(`DLQK7jA-N5 zv43H-mINnia8s5IswP?k;T(Rnsr=#Y>r8(hxwY@`L=y*T9}|e5wL`3)nW>J_wn?VI zkmj1dpo~EF?2Bc;9(a6J|Nb!)lb;sf6}Nk$G=g5EOJ!Hu%`Rsk7;1 zPVr_RUkJH^B2T~7?bF}k$c2LCzB$UevatUC_C{Gxg}QdfDj7Yu12+=|(uK)jHH$xl zYfY2tf~|ObIt#3kCU|`2>FO9XM821mxN(o5a~SPy=dNc}dQjEc^Vqh9 zMU0l^_;RNQ@RNs7dVQ=`QjT9ng%^vhYWR(!vKsUOEhj2o`h%FTmbMuOa<1Xh;RUql_J_qW4{H6pq_ZU%zT`X-U9Vmdk_uWyQ0-lxntH^~9Yf?*ldeH-eH!IBeYk z<724*Z#4dYYNz~o+<4MHXV8X1b)QpnYO;&zopb-hKfEsbEqFFrRP|-|M*1P>`iox^RSn^ zjFgo>ff#EzJhnhlq$Io`c{T5r*du5jxUXZEKtr+9>HRt#(Q5C>w}d2%c`|m z2yq#?0v*UTber`-ZrCe2NFM9BSvcy^L~JNgGCT8&X}_Btcm0vUIz)brYP49(kV98P ziXhQcv2{ozKk2s#prU#f+XpONUlCQ9NgWNNkPqsE{l3=4(*e)v+S;biVir!NdlQxv zaRe**9R+1shQMucF9;ro9>Bp1rcNH#ba&*;R;X%NbJ%VONYFhL+?1w;Wgd$nnRBLJ z*UEn;*K|@^u%OvXM_Kx*x31Hdf3{@l1Y!g(^a1rls*_oX4|@-1O+5KJ!VE@ZR~`{s zV#;g?JP4;-_8>-C?k9NJ)dnYYYLx?LC3K=wp{zzA0{G}X&7ZMPfXeoIIlXeVDA%Z- zD1NV4hrBByJy$mSoeLS-xSIfhJMhaI)zm3I{VJ`IL+gW9YtLE(-A2;^x-B1yrwa<* zcaXyF>Go-r8KACQTv_}u0i2rLgJ+RS4!(fJAP0$c>_}t~H z`^0DZOE`ywZXJt=PDXiks&^=Fka8~Kz&C$oKWb(STQzbgt}TT(-1gWU1m#P}45~H{ zj$s_asDA!M6!r&&dnb<>ei6m*HP;XpTJoOo!t(kfr-&nNC&vB31A^u0yM+=;ic5;85kn0KlLk9u20-ERZvRWxgEWa=|%Dt@fTQnsQHg{ z9uv&Wy}(2XsE_901g?jr-Nn2jktdqNPt?OlH%{q=AVF84!^!3sk&&NY;`SAJ2kVy` zI=*&sepsERu~1ee9%8G>)L%pqL02-Es!nZhMInquZUs|gY&7)y-cJRrZf;0By6`mQ zY9)$1-w&u;TY3r~t&LQ@axvLN$F0$~;8*eVZgKMksMyx?_VjH@zWZCluI3v?7X|@x z=@ONjx@W0Bjr?PS?*AxC2YsO^K?b|=*FHgrRba#nN3@h=Z`GVyrK3mDzTT8(Ya0~3 z_Ku-r_I`Qu6kz9!CDpv#k>X+wWpV2o#}l`Wm7=HKJq+lboxA#D&GtdMtk5u_j>j)C z&r8H5u$zY^X*)-(jjeww4*IyEQ^uow4)A}`Y9x;o?_4H^ z+17}NCOO9^^go5-``flfl=PgA$m_d+j)S*!IEGW*YV_CMGKOgo$g_%XuNK<5v?&bW2t5EpZBvfNGq{YtOPrm7XaA)JFF_bG81lC|xa4ksJIOZlnk%60qMM3eAu~#L-VO&;x zh2mCRW9vJ0+PzMqD!%c{*#g2u9t(L1V|DwQ1o;R0LbdDMe0bCg!;)L|XXXPqRx64` z*Xik%TbJBbVrXc5QmMX(Rs?~@n0S1xoe=)KpRbFM#S*{KMDRY&f@5Yi`<1jknzYjL z>nCOIz8#{&nlI--)Edv}^+l@Xm&Sq=Ae736HJ`P7&C$00MeoeBsqNwze9gr8UwH8g zm}B#Q*JGgHCj+P*!=^0;z*IvwE@E=D-yhZ0Py6vB(BjpW(nLmUL$Vd}bzoyTK%F6j z85XxdMUfb6E>&_c!*lPw;uA3CH*k-u-Q?K0K}wBSl^Z>MEG_GMba^Pl&d;#_$EoA_ zH_c0N%UjRY4#gatQ1xRv57g{*`%V;7d-q?cGz7EAQ35p#Zl{W0sQaT5Sv5PTL*dt{ zCVc4(==6ud2CDa!{mj#oOU(E-OR})6Y!UI^I|ylp8f<_isDy7RfIB>0E`KNUL3nR+ zxx6=8H46R_C!u5xpyXU>462yh!XMTBB2tm985fbd$C4^!Rzjwqxd(PmgAI8e@Z}es zvt?G3dGov@UDS*oza;RItaFE-d=7SI)nBVwv2V_}KV*^p#hP@hsC3e-+fY7S>RyZ6 z;#dd5JHnl07w$s~ds_Gm&AhRE(}lI4Y9cSMc-0WBk}>2fSt7vq878L#eB+p~E{#(f z2u8iC*_97Z)3~%UKF$=!?h4oJ#SF>&H<^6=MYPnl2O&#dho29zYUYlCMI;n~4oFah z=5e~E1s%$)U1~_bbn@OhEo5H9`b!W)u z)&N;2?)^T5@w6>s^-x$mudrm;C+nM+W>sWn+PhjJf@BOinau@z_&L`vqQ(ci0=yG? zH&S*o>KYEFQITnl#i+uTH3`#;b;y(Vog1swpz~}^m<8uPGA7G1>9Dl_XYRw(O-NLF zO5TyzM0T=>#~Pz2gbSD&>K{rh4J#@k7>OOun)I6i4<5RidWXD8t(p2>iQBntx>;VT z?FU$?`MPnyMj?}vEi=|(NS5@dNc=;H$T{H1%H1Je0P&MGoughonj4@yRgnN!Zy^WW%@<@pKY&+Qoe2zQc+>ZIW60jP zar`Vf)0uz(&!)|0DnPi9g3lc7!^?U+Y9$It*zy1iL6wpAK*S~1b6VvYf234IB?E@- zBk^;xlc^3i!y@MZm6_W&oPeD0x92rE+zK=rrRhn1>qe8l^-Zq+^`TBA<)PLu{8bv@%n6|P#%s!D@DIbDNi z&Vm{YV}GmJzwfG?#HY`+rUs{(O zx2Nld!Z>-9SUEdUojX`D9_#=nRjZYh!n`Ty>Ki5N4p=TrzO3`@S$qv>%UHL1#>X5f zzU6a8JLid@V*=idL$yv)sL5PP=M!Y~?X+9Qw0!f*Xrw`;D8LIg!!++U7>;;2*(6k~G+?TxAkxMHmxxH9O8rrZK-u~r5CMc? zai;_W&RK{U?cAg-)(l{j*4x#DEv`QFI9TXjzW$ivj-UA&X;y2fS$8ULVF~24=zSGZ@7DS}t*(XdkTJRW3$5V>?w|zmS@n zF^7KrptN*zQvw`qH& z-O#w?rvMxlA5f%|ik8jptNh}*4;#m#S5jyz_k$i%h0OFl+NdrX6K5=BgUo_}B#X&Oq5c9P*}e0<7!_ux(Li4HJ+N}@>LJDeiACC^!$pqr6u zSCCL1qSIhi(mUQqYxJ#IDT@^fC$qDyebs+zvKYA1cl)%1*a>m)m~-gL=JI2VxhJCP zeiiW6B?k_#$rBS1vV}*1(%ahPX6$N}ncHZ-N9lfh2GZQtH`}J%PE^Oi)%g2S8l$86 zk+MMc_CFiX73e)@9`W~aGQ3rkxcz+ZHR2g5pG(^r<@hq-|QIzH$i2|5-UitmJnzPitTJHu%;Wjw^!-L5aFZV zBJiKU{{Z3tubAL|dlV<}@RH+iZs$+-KDj=5_IoEb5qa3SYhbdlrPXf8(DYck1VU9KRJPbGekZsRT^F_8!%6Glz zDLKdAf8r<(O}vaZ=DacX#m#Yc$dg(Yi&a<^qx!&L9;uN(*uNIO7Mj#mQT%F#(?Dv7 zr?N{6tgnPhsDJm|g?6%yL#Tz>ukhp4VQkAR0^-BeMC?8vK5q6w{sH#b>jToOrifYaEzLUfU(C6iLc6E8DZ7>b@$*Ex?B)w5TY5il)Y>%06&!l8{Zc1V+I%A4g zMsvSUq)rXt{53YRn~Et#_Ivs`rP&w1=~Djfx{uDr2vdv`)Gu$b{G>WhK zOK#kbN6}GSt$AUhM?MJ9yq5oLnn{koEvC@&Y=g6ja1>jZdf%yvgKGhL+xZ>SWU`1` z+5V3^)m?J+v-D;~jEd$^6OF=+@o5^UY+)XR(OZV?LgQe%vIl_u0tmUc+h2^XL^RUvP<5eq%sScE(y6Nln&y}6_K{@o^RW6SrJNkShSbt*TkwX}MRJY&`LNPo{t{7<(_Hqv7hIMkp{ zbRM0Fg$SHfqASEYk6BwcHc@>F^)!JX1Y?N~J8!BKYl9-1Q&@V8TQ$=;R%S?zzH3s> zv&DBGR*iKZs;hk}ilJXjsF#(f>|#mkVG*rs^Gnp;Q)&t}N_0d9bm$(Rhs;&!CYIX0 zwBQzpH`B@+81}p6+Bzom<%H84513N&eevt{3bmCs0q0evVQQ}qlqad=$_KmH#P;n* zK`O+9i(c7>0-x_rw5!dHyK5sUdf7iln3z5rNP^~5wYyzPnrwBA|0G-dyC#BiD`I@N~e+huyw5>R&*%Ut+9U18@sg*lr7Q{gWl z{t|DT+l-#HPn!w8X9AtS!Es}}@rC#(zDb7O=B$hFGQP@9X2O$TJB|!P9FVJHKqrxc zp!9h-<7#aYnoK7F`+gr7!a9v|JTAz#RGD44q<^BK<+c``>KJ|6LCH4>21{SYy#qHb zfY14*pm>rL|Df$N!Iowmy3fG}CSuO$grKAcpPkToIhMxOJWZ(Gk?)e%=og{TLnd3}e$EC!|5}}l$dY_w&4Q&Kuw9;_H1~@@q9WqEL6JgcH0>4_ZyG1# zq*;2aqJk&=O-f$$Jt<0Jk<&43y240tP@$G7K5UmzvQdeaGQTEXhkZsPGW8qupsG z#km-#P;*6GjhX@HwI||pG z@AXC}1YigR2nPn30azF}TyvkayVh#@3dU7NcW&4*0dyj-;AwRxaB-K^f7m(d#n0}K zMY|N{M`}F9rx8rOQ!)$F+Ysx-KO3ucw=hp%lF z-|@{RWLr&1PB?;4xt4gIH^eKJ*A0KxlHZ#<;@<9~nK0Jb%_vMC=k~3G_@2X1rMn8E z$goR~#dmj##v-Ti0|_jKd(&Oz8J}Y$nHp!4_1bGqlhxNRoq&&rG<&};cqtYoB;00N zyL|6~>DUGALET>iy?Rp_X=2`3#vo?DR&ILNHm;Rmwt$3v7XAC&cDLH?5w&zg`(*c0Kwi z_Iicm;#t_%h1IO@yCpC4$4{ery>3o+Ox>NB*o;Y3(nX~wr4Yx5P_1QU$^5hQs>ITS z?<@TcHXb$ghZD2Gl1}&0_%#70T8L^!m|IS~wv4o~zg018hJVq~p@cwtIGMW)=m>oW zTedKvwNK>W!oro;{XM@b_BHCdE^KPgcOFrCNP{iKZhD>gp0p&%kLAgm(Q{-K4f=WS zG(1m|Wwsc@m0#((gL!&IdC1BIeleLClK4danZqqmuNa1vnJAHB=Q+1 zHf%8JsQmZA`o7?+MB)fUgq)R}v9(`Bz4hC}QvNEwG7&4sdd4`!79{P<_f~*|*Ns|C z(p8Ib+zohP2&c3s$l8C!N<%1dw|O&;VnrV7d{E0MXS46o6SB(N?%9aT=fYu zaY&!(t*tOp;ao*O+^U3)!`k2kdX*D-mR9l7Y-#h;;K+l_4F~4Hh|Gdy&9R)rmv+by zCnh|zms#xuIvTRRZfIRBUb7~Gk0cmx3C~i)h#2ydd2FynsOXg(TZVNS0dDs&TB zi0H>1oSHz+5`ILS5|I1zPn!0f2YCZndGW@br{Suogu)`%xJh6L7u9L!$oYJS6@*@o z(J%`&RVDUbeP~Mnr>_JA2EJ~Bx~br=FoC!y>7)7iS-xo8#pJzLMA^<@%#1<+O%F{)M6G_cLV$5o%GW&fJR-PZ!N*~$hZ?Y?<^EolT zJlv0M$pEk_7~ve*oGQ&*3DM%ibu$kA0M8!90_2?iX-1f&lO_eM>-aA~HP21I201 zWzsMBO=^7%yCO}WISDA{dDRd(|@%9(Q?FzV*mFnkaYHRXt9+u>^-nar zdMXx^f2eF=GOD|ElG?Um2a({L61msS2DX2MzFQRhf^VhIf?hu=_cZH95$NDVxQOmy zyyR(kaltc}O_ov80?K|Q!hbHI#_CZ$o_3;9K8s>gqV0_3OC+Kgo#_^1yJq&Cv08LA zcR-3$1oFXCo+ll6MI7pC=g#U{g2dK zlxY?e6d?3COJAodADdq?rVY}4hh5o(F%~9@!Vh_Qy`9`k(N69xeRnzcsWRzHk+Otq z<(56-VBaeYz#;+TFLV`rgSTrOqxYYnYwBoTIXPsh`;}gcW*G^8tCpG3?Q#ErcRx!D zVK&?I9B@C#$C{!~>Ry&9xXWyAyUMv4VPQW!HXzEJ;rhfg2?I%=zO90^EXK+#$%rTl$Ur_OM2d?A1BCkR^^Wj=P|YO@>sV`=Qo}Yv^yU z3n^p~7TTj)r8cYs*K_63fsTH1%0PleZ#+t0oyjvu1~an`A=}a_uYlEGSJz*Cch^r@ zs%2u^xn{#)Wjk9>C~kzz4dpaCO0U^J5#N-HaS-E#O?eN{LlfaS7&+9}5I_?;gjEkP zvsJ!wOUHrNKP<&@^7X&tmz4~=}XK>4z$#5`SgTvYHAgE0T*<0r* zTSF3&43-tiyC3)9)R+`V(%4km_>h|2NkQ%5l8&DDjC&UFkf?Y+)X%mbbZ9$=*qH(q zx9?~=k&vJ1JPFTijk0MJJq#J-zSTRcW+WFHU@Xb%Y*MF^VwB3QoX-f@d5^Z`^k<>rh?Iv-DUi@~c7 zFFXEx-iE=+nwRE_7>Y=Ylel5xfZ6*2uj3fU@6UMLUwhS-KQeUO&JGwTg-1OAGvi7& zH_gf;2=hxwV)R?O^oY-XTh4LL5I?=D3fDqVM9c<__RIw{e z8J5U1KcP;^^NWc6tiQ5qiuBHYU~{nXQ{DVB49ESi+5HEG3i4&nK_VE^6 z7MQt1{wo3kk%p+Ur=H)Ora+~#*-5MJ7GiP@j-6CR$;GmjElw1gMQgo)Ci6hY5bs#K zaK2Y<7X1MP*}Hw1k{hG8?70SgdT@9w=PG|Bq1B!1{6y8W(iHs@CyEY$2~nBkr=8`I zbswXy9nhagAU0z&#XyG68%y-=Oyi{m{lGr#2C%^W5*xa7Gt<%U5jm)_)d8hHH8E9( zX%&v#N6{aYHu4In>h|qvARS%^GE$VlLee;QiZ1^Q&XB&oTiZ}JyS{@_u zLI-vsnSTgZzk^CUUf4!YY)bQHvdS#C)7Zd1`5hE*ZZdiF?K%6Nw8Iv7SJ4}m z6vN1)=^KnH;S@g_Ma3Z*Lozs%l5>{mrT1(GEijG(^E8N!RC(1C|8^OH*sq6XW!r@A zTrPgPY7{o#zSNQAF7i@0l{+ClIb3DFUw|kL`%bJp(xq`Rrq6J2d+r``peJ9$XhtN` zWYTCvz*iYo%`LheEBQe49uwti?Oe8A%I6zSyet-X5H2E%x^)PvgtwnWbsZMnW36&F z>kN(ru}#hn2sg8Z1;fa!(%j@a+I$I4Z&zu0t1Q{;1dXx*BW&;VCaZ~-!(*&p71ryl zDw-gWDX@2O+%LR3{c}=d7(8fgzNkK$<3uc)61N(q#*u|P+R*MFrJ@s(&%}k6Nj*4Q zH@)L^RR7*l?mLQNu0kJL)?2TAP}#ydak>APG_Do+qS!3AIYlZwun{ zR)|^9idCB55a6igBwL`P8kU3KXYTLIK7#)u+D7x7PaQ0k5yZE=`){uHZaXOqD+pwn zFM2;ulS}Kzh3~De_{t)oGDry|dbBRF;>`AiJc{iT`TOn_soLAmk zTu--1DW$UVi9oxc{I=#F0;+tJV4=?)cL^fxkBw)z+f2swuI+M2pcV5U;9uAYA$O0lyTDD$56zY7xOw-Rdr=V z^}X`TS4F>0o_MVI{qXH1=iq=JX2`!d|K667ck_MRFfLu&>s}9^4GAE6KYavvJEk41 zM*DRu#GuF#;6Va;L`Bd2L66xky6Nq_CRm5~o{03>?s^M2GsT-Y($H7@G9yVag8tZHA&%+#aL<8#Y@ z#$JuT;iL`7e710Dd0v*yU`6Ol-C`q3i{+8<>s024A4~XNh=L}tE*7-G%w8lZ+7WTZ z@n#!DSjCt^b+gS^)#|Ezlt0bWw zPG1xG{h?=Gn#sBu)C^yN7`nszGml;;T_P=wN4j)ayyqK3^L4VDhPQS8Y(QTaPSgRl z{ySfj`@mCDpzKI9Py_f1Qu$Fs{8D-0z_mDp1T=&y=O;#9|19mx+Dsl^k1Y$KHY59k z*|~j=k5&6@S>o+{-q2kYnpFo*1pYOTR{7>Bt6Eaqg+2Hh5>WHOW;g$Dv;9fNNRsO? zfh25;U_pcQG0P zkX^1>riXlm+>43KlKKw6G3OsaVthb5PEn~XulYH)c(!<{A2X3hQDo_`kpXi%n1$=~ z>8k*hVeNnvk~!y9h*B~ip30B6O_>|l+6pr?73CRM(Hziqc9D@*`&2BF!U;_3w;oFm zCYS@xN5QDH^8`hfZ5X91J|QYSt}#VvZ0hujCDVT3Sr>Hll)`}y4*B_+uH4Nq`j9~# zpWvL`p^~&;E(96|MnXe8vlSX%a1gA$*Ii3X`~-(KH!N(txhK47%qYX#!6G)MmPtm_ z8=_xPn3sR4p^A~R(VxBaMt@ao>I;$UQ=58KCAoP+(|~I}6;ADLjxG&-KAtIs%t{rl z&glyg@s@UuhxKgA&fV4*G3h+3=E2S}IX;G9nsQ&}u+~z+Ao>uI!PVzep1~jjlf`jX zQi+4frL2Cd8DKnA5sf(narad+Tg=j+Be+Nto1Ne3tUVGj0~8Z>;nVN}gf zhPs2&7?w9Ax0u{T$BmNw7tsL7!xNB4zc0_QG7R`=i-ILr8`lRqR@Rn|jfqT*g!2rg zw*!@NubyvGl<_!Safm?^v3l^te=;F3)>@Lj8P_2{<$5J{E#kiaAT^)m*L<-3{r2}H z2BJ_+RRevnih-`@;AT`h;uK(9XIv9Q;BKwy;x_2vsMEofQ?JP*n;{#it=3S@gcFgZ z{({oZFY69+_w^m;DoAB)45y?S-J)w%t6>n;$Vf!mg7z6n?JZ`PqVm$F;9dx+lfbU5 zY7kFtxMgb6M#(Dr&fZBhLx>Thc;Xf3eovv4nY#l#AT zh)b6~Wa1bnXh5m|H0FA8vu09^wESkXagljsVmMri+s8{jsN$xWUXn<%u$n0|$@5W~ z$2*5bQkaPZY!{znqGS_R?vjxVm!Tn&m(8KwkT5RX(On4ZBj>E`+`E7?9@RJVYX9Dv z0IkM?2dE>o#Pu?}1-^9|a7s_(rL=gSmWOO|?Y|vZX|W_~K&jb)`|^ulNMx?NK>*lx z>29SvZDMSi#g5n8*4fjOMvVbe?3_m5Z4%0-{J>}_D6}A9xi?)PLk(ImAS6CjS6LLA zsq7uP+3&7mjQR^NB24D=i0(DPdyI}gsE%bkAe+isHIs~TH8t8Q0o z-$f9pizeeY{cC1!n-6`#!mAYx#nb3GRTUaNFbip(>AS{CIUE!r7l?G=fnryPcuf6$2IwMU~>@r90J7t<1J-Dzr zre&wm8l-bJ9K#YPe*+I?AT8QFsDN0ps+gbpH`lw z^BvJr8>GjelX*#(oird!03 ztQi>`wf0$5(#x9!D!^Y0XfuBdYFu}|MPK*+z}2|2Zzyd$Ed2>uGX7$~h>?w;+vL*c z-4@pOBVP}}lbFqYMJcx$Q8T?uIqN{_$>3XFXY>@Mji9a_E%}_}`BjQsg|R+NNqAtq z;HxC_)QYQMZp)_N!4rlJ@&#&&ku#GjzWyMb{>Tk?=eVxiN>=}SKatMGNXetFT_2)N zY-Ykh7i8-7g~Y;hYZkti)Wk0q z{o|u$7E=ayj3M%E3Ocf)lGDG4%CDb=)JR3L6V5C@LEjZ0|aoSue`&-4`ih8u^WTL{zs zJg`NKd(purK)nT6V}%g!T_}n*o}Qk@PmI}%ICHc;t?PhZ8|?9QqiW8%!|7ms31dya zVW{mc@YZ^33L^K6r%5^{t%_3iXDZ^6!i}VzNJ?W?XAk*AX&>f34y#s;p%`vu)+%v4 zgM2^xFCzU~lRiU18?u7MJ7s8Lf_c`?Nqv{@EIUkf27p37arVEz|Xi-N+o9fbx>iVD%K$`fmPwfXD+SDB> zQIhx3`u6De3Y;6Si@jrgX#H%JxVRV&;(A5-N2G_WhF{Yq`7Gje2Z$SHp58pJTPzim zo;Lul6fSRpNvI&o*X#jb@b7@8jt-7q;E0k2ySRo&(_^_(Lk{L&?C-c6s(LA8+-Xv$ znqoKqGNVS=2+-WGpQ_7E>#^s+8qeIGVV+omHq*l5TD9pp?l>q%S(IZ95^XVvfeInOwmc~?&6!6 zBsr1p-CzM^-P>V98K}-`I3a-w*$@p@71@bH;N(^MRDa%*(&Du|-zE+bC|{w4<#9SA zvsj}VofFH1)DYDZWX!EWjTR3F=87)W*?zwDw7WIv2Zlo*NWx{zpP(worpwsHL^mvn zRi@Hm*m?eJ2_@ORp4-%EP05o^k--kEC}gxzokx+s?K~Cy2$i`{5oH*5)}y#T)jkMu zm2(}ruU8zCHDGaf!bnWV&Nkk{_ZLy%-`aA|(vM1@-;C;$=NCwyUnx9p@_t!TuW6$z%QatrQZp{VMnEw zYY!wm55dPCKNVj?^84z-D9W~jb1z7Z@F3?pKtzaBXv1XSSU}&RON2o{Sc}C$sP+Pi zfl1V}xdQ5ZSdxw_^fFItCH9LPcdq-V?@z$^j9*5!=ETqzSfWCEp-wc|r`-8D4!Olx zVDa~q!l{t{gTEsDGj2swN8ynBKa))it$Uln#N7#{{H;K_R7l_7lNCvKyB-uab-U4T z{Vk%76VC1Lu?Xc5GB5gj`4WjA+-lALSu~-V6GIzI?@r{sNgn z{g#w`1&_s2{q0@Fc1IkkOLK*>pvQNc?)!^FY$keoORQOzk`TSc?K)H!@qif1;HFD z@Tak{mDMvF52yd#dlN&Oc*gdPe{OD83etH@2a*OOr-Ud6t0k)W zXXg%o5%JX~_z#q3^SP7{2{lLr*i;w>ZR6|>?lZ#6+fvw*@wZ1u&9$>`T9+plM* z2Z))sv>hYH`WRbT%0G7IpF1S4RzyUO(Y^23+R`m}JcQWab< ztm~lKVF_zrA?skr5#9!=km0#Wue?bzo&DIfX4%yNSnU`Tfjk`8X$~In(9Ww$9~91Q z!~@20MPiJenlT29L&`)9ZzjjF#mH|Cl@>0?u}>2q#~Jj!N7Z4krW-$63;ll59lvGi zpDy{lYO(wKV-AXVP!`MMTYT{Pt~;ffc_)t6Ts5yQq8w;P@T#eQ3@raj4Z5hjreIHb z!_Yet#m;L-dBZIHqeQt<@S9{Ywv<<11RwpK8q!zp0>1<3UqIiqcBi-Qof2PseZBjY z`G@4-?zh}+ep7)^-bdr7KbT&_jS|j_ZsmA^jaXNOw(Ne=J+V$++v_-fE?E}*!{@Oc zOJakC`IeCyR&t@H+nePYg?SpN?uGsa|C!5RL+)!=L$J|IHWfxP)Nx^0pemcncG{^4`N1IK3xlmeJH+e;U+f#<7vFi^)$&Iud(|NFC&Cw zPEA2SD;@woeODU*))0W0_v1{^dE=ud=(?2h3MY5ojngj3*n83@NyY^hw{!LU!md~4 zDi2ad)B4xVVm_L9PDTsiM_*L@{=Qd37h6!SVlQx0O{Y};n91|_!}|5JHqqu5uTP(q zX1ZUWQta@LD0+EOJ*W!rn0`hP&aTpSFgB6vGsiT#!Zc=FYxgMiWy|8y7zRJN zUU>=f>P?Qw+OLx3U*Vh;2Yei7_v`dFmj|Y!Y?r&H1AAHm?7d6{g>BC-9UAamGQ}$#i-#GYI_>~~N@{6eXvrKdM1+`FsN_0`| zt>{uhZf@u9Sa2ZU@|i?fDd(oa$|g`K$}PaAsK&dLlc&bWK>e5ujlZHYI=uG{*Z6P( z3vazkruhVEU9`E7=bJ?7MQ-JpXyVSJxOQE&F00=#js57-kFsjT-ZOGyq;L3*ep(A5 zUw8qhb2OQ_ADdwLwySWZoX<_CdH z@-PxU@x3+K#68<_=l=cXBE>|_$TpY3Jqxq>zIm2^e7 z`%DRr_Zw++W2Nhh^L=YcY6qI3--y|Ql%4g`p0=C;goJQtGk78xp<2- zowC8I+FCku+}t?m458~gA~x2OmM;3pwz@%Mpbl&i@tSk=DAEiqG*JM67b0878&?zdGoX&H6 zOa_wC|DCO^8Er>iN66CtS})!O`uLsT+95DKpyF??D5!pE%R>C?e|4Y!%P$j4*b8jh z8a}Fme$JC{ePcuLzuYrR&o`Prl@h=IkbMrwUBkC};9C`Xz4ZS4ef(~I%XxmYKJW;g z3SukM6!Y$8)!#8~D~%dzU@-rkX1(om2_c2`P=gxE;&JnYNzJ<+O*U9FZs^}`oT_`7 zm2a$%lC6jGia>u3J+@_ol{MmO&NYq#i-LbP$Pb=U>v{M$ckl%N&k9M-c^;}IZC}D| zvbsDp_}GhN>hN{b`wm-3U&EJG5X%3TV|MWTMO5?;PX6_|?w_qu>JKm3vR<+jDGWA8CKen^9UFEg)Z~kNB*qG<$RW6H^#fu!!bfH+5pIlsJvRq0QZ^ z=;U33&A_d>mcfVZJIxw49|LQS-;D>fu|Eu(|CTxbIgI>9N=Suh6Y_#LK;QfECsMX6 zdeHk@dVB|?>4&=XuBWkwajJ-op@!v*oUe}zWpWUY%wnnYN@gyEyQ3GEKlvJBt>&&sIO9F6>q?q)qh@gS1maB`_fBawd?)momhTB8R_w< zIRi3GU^_ER0AQeGY=IP)*L1BCYs1XOKtvNVm$c+z`)l~%6~!sGuJ7Dtzlct4PQYL_ zEn_`S#i<}A;-~!32OJf}2!QI94^hk6%)DiMm9em5_`3YTsk54a1t72s5U}f?D<|+( z0*v&f&(8oMj6`BvG?G29NYY=qG!s# zh@6_Dpfppm{4yD-y^Ej!-H+rI)Z{$W6puFqCNkBM zey%DV*m$PP&`D;_^l!oqyk_*r-xLqfOt9`!Fv>h&WBzPrAbDQX?GnhnC(rnnow+Xj zb&Z7>VyNrkV{rBn;SzIV zLMLC#`bBh(d%pFlM(sF1Zp*}v?N%QX+lO+(=@Z{7oXN-kg0fE}$;XG-*?Xjm925oG ztCT%S#`7aCHllpl);=j)&<>92SUvnA;^61361^{#C>Z818 zSqJ`nMpNH!h&Cnm8=!eBXQxJjt!FH$5?7U-R!#Z8n0w2pIG&(y6ha6VAOsB>0>LG? zLju7)xCeK43xQxkg9mqKan}S5?u)y-yX4M7?EgIPhx_5Z=iGBky zRUKU#;eal#O8`c5=LW^5%)IoSq5AYf*lYSm*vamDU8b3 z=Q+)zH41Ig4J0cmk%!lBIO|o>(`3p@qZkATR}^2qt`f60ssI}y^f>j3&6rcOpwkO6 zh<@XJ(i@WiAz4+AUDfcv z<5)p1K3kUOTHx~CB7?xl{*sNADg%3mfThW(y4g-c6CB}$Pfi66!( zR4dRmAS?`eJHFH}FR@k90Z#cuA=iPEFcTt@Xw)zwmy`2cZ>W7>LyNcsR3#%fEZ_Du z&ky}gIU1N<>&(s2(P{2`GamTkyrJW#0ViAYpQ(TgmPl z7>tXap6Sis!Fd1!l2rcjF*inFxqM^3NlB$HN%ruSX*ILW37CJS>xoOap~NKzLU zXD|l^YJYnq*~l)X9aT%~pYDPzLWhFTkEqpJV?1Ry1aEg}$)~}zo2tAl#XiRs73VAz zruu=QlNBb>DX^?A3_WC69I#b8MlnhfKgPy^^XK3?ooIk4E;OCD&8%2H88 zuEA`88pX_a8V(=~@?WauEeDdFT$pL?s<_O2km>&kH8Nthk%4s{3Vno1+1Fb;J)2n^ zVKXjTV@5h%`P4!UfsNcbW?>WfruB}GZkFuUCR>3JG{L?5MMPhG|D-cX|J((j=q&7M zvVnVb)3v1So;M7$=Rna1guIaOdbK2ttyG`Y-sR|9t_&Lp@NsbAKTRr5FkYV}{7_dO z1>Msm%7dhH0!v{Ur_Z{vDZg((WMq*@K&?%-{1b}t1U2>p9YnZ~U#GiE?ee4a{M+vx3TvZ3lxI7@ckeg(JJT5X@Cr-CBR^x=`+6G*FIXJ5qZN*+Bm`A@?v zyLI3Gp6~P7{u%0nB_(3=TF2}|NE4G`r97J1ABPhVXc&!1r$GZAwYP)c`GsZ(iLV3}q!aF0=R(3T=@^cy>v0 z_S`L)<$W0oK5Ed(q{wYRl8Yfrl>Bae_hhItRh;T-%R}bfKKMoZt-kf4Oi&71XR^`L z7RtH|6Dz8;%Hpyv1L!J}=Jt>f?3=k~GqY^_R!FqJD>{HXk;VYK^6G0Y&h+VXe|HY&0hLEt7 z6ZE!~2G9<*FtXvSwaOg6P*(2DDUz7?2@-qwDJGn~CMOhPw@9{*DGpt^9ni86`+;hl zy!*np_AKJ>e7S6&$#we0wn|E(!wZH9@ITC_dtuG!%DJ+?*rrU-TI5!~nl`SRLrvk^ zJ@4B+X!_(?AfaOgG@f^ioyolOF&VI)rXRXbGz!InLc)WX3{E7zOo>ubztNk)r;~B4 zCO>|nz?U|-xA18X?pfk|oDBKrEqpb(<8_smf&oOv{vxY}#tOzYCCm-Q;Mh*^qg?qM;&=Da@`MUjC0 z*DxaVk?R!o!boEtRGmK6dImC24Kmn1u6t)@d-ZX)}vp3Sri>l>6CZTweUk5@>pPm_XQaiWSwT6 z0u3pLihUl6*7a8deJU`pA$Nye_{D;1ivvU%r{0FF0l#ouDN0gQj}Qw+Z=sqKrCb1FbAc zwPlz8WD(;(Qw89yJ?yu__Q#q4mz7mMrzsU#DGLK5`Yd-ZTPa~P^jf3zXj(m*F)oau z7!`dOtxTYL2Im4&kmC&CFkuqvn5meIf)y~*2`Jzn(z5A*aTNO^)hIm&1SQLXObm3wNApt=!F=gR;HgZtaTqyARKnTs)y~fG9q}%@ujLOd;N+SAz#yY;}Y%| z@y847T0Qmvzp?MMbdo-$6>+q6#;QTOC8^Gni-V&{)O};YtvQpM-In$YEPXjqU_7&d zkGOsaz*|K%HTF!dDo@%ECep)&y=U*?s5=n8dIx1;$c2A{S^&#e85GvFW&iM)J>>5k zsbU0g6VH?DAGR6hVfm3;kTp6EjElV$Si};byKs)m5-}7gR+#4~(tgLF2 z7qbtt`4(j5%JhkD!_#%`plBBFdlDQkM;%MI7jgj>8kfiHgx4*FAPz#In!-`f(p-qU7?vv~`BwHzqu$kzr!Jc-Hw*Q}r zTGdx{6SO3VI=5ge5;v`aQWC2OISm zQH_uVU%2@Qko_G{YIInx0>h4VdHFi|vd{*VAXCI&p@q+>8E>-gaNf!}86Ao3#C!86>%B;p#MOTMM%6Q&)4mYN zRdp>^(pYz;05*M(g5h$3U0?!X9Lb6q>b_v()ewqI`~4Q{y~Xk^P&WDn@^t(Ui^9ym zbzgQyp7}pDytCB1Qx>d5L20qwM9o4t2by6kV9`30t(LvSfskR3@X`LnE%O6Q9UE@+ z%ZPKreYc|v8mWRv=ZO=wkqkid5r%PBXK=vWl3esa17f?KZckPZI9y*ydRUVht1El;qEpGlPZ6KZ$fCiHHg z{hHirbCNIg6oeb*_h_k5c++ zpp2^52v$mLiY^jx<=~mi^;$KumNPmf@Z0dS`t-`~p5-V4Qg_^KC17SV1M|S)V+Cvu z?Hj=q6YBe%6%Xwm3z|SOk6^g)7Qa>oo|CU5$tZ9>N=Zy1XNg8wU1l2uD}E5V*tk%2 zf#V;+h&ML?;goYkAjcLKlNo zJf*`VFG3Pz7Zv+?_vwUrCyuhnNfx7F!qRaiD|1c92wHfO_&gr!8@M6J8uHGPY zKLw3P{Q(vc4{PYV@=z{v%1)n<-rC*p8hdgZ)*nkc<#W6hACpz1bPP8F5&pt=X>%<1HPPL)GiJ*dj+GRx1@Z za?UeyHjpk7j&gH5I)}>xM;dM53hPygPY~JR@uT!^gg$!si04q`Jx4duw2CL>NDEBS zuL`ue=LDkBtQ~Bm^Kmvb6}0F~zM2q0<)#PuwzrQzxX(|c{id6M;;=cS5Y7fPtfLq6 zt=t$1M`FbXjz$Mk+UCm~v|(jtHZ*=JtMQ5T4JqvA9Bo#lxtnN6=``B92=yiY*Dxs++TSJmH({%0TJtMrhc1|#Srp5ln`idQV`)g zKwP!yyWY2?8h$~p9iG<=LCPcUq)E4e=Wkf$r2IaC2KG3W{?GI|za#ZQ4aoQ`6t0Z~PEvJ&Lc z!mQuuHW6z@C-+_!)RE{LmBT^XeX6DkO1JLG?)BGcq@mm5#^HA=6EfFEf>-8mM*m?n z5aQ``e}nZT!3;-s*~i;h#echBI$ii3_S9PD3J2PIiRB>5_H|XS-eOjM&Lo+?S>#@jWD<(Jd%L<$B}Q2*73czf65EN0BGOvy<4r@X4S&cV zmT18@`kOqq6UWPgl(_R=8areo8=*`H9TL32Yk_dACqORpb%g6L@twHQkQ*9eu5jLe zCi91V_Av!mt@l5q^*BzEoe|!nx~uCtTf!E+gBxbZpwQU(09IYl9iR) zpib-BWFRj((?(=RZhikIp)MG@Sv|Fb$~2w1iI%hB6IMbN&V9FhzAs#~2=wZvf}eb) z0CFqt{kCygDad&Fe*U+na=Yg*?|qz$<5FG7>tlH?1Fx0>l?96VIutmYn(y*-xE6e3 z|2%d^P`Q@{rJ1vVLLWcfESxVA%y+>3>hgT#wh``O*N>SI!W;A^A z*)x6YlQxx}Da`zZGj{hT{v>p_{kmy$5?x=wFCjh+>*Vx=3XxN-q-_PQV+|HmcCA^5 zrDGC@MvyYu<)})txsq4&NIr?xAT-wWnWK`!XvC_6e3Eh*4To2|MoT_&4o_Ks{1C0L zl2G0{Ge>#2y6Lyl!Hr`O?%1~bX5Ol$an|S+y~|BZ!-wwKCjC9h2^|8{Q_T|~4QsVw z-K=3P9=VXb8Nd|3wT0#^x^pX>dnR#4Q~`-H~f~c{+?FpS9x^(r{I7jPrJ7b53IJdI}5ztskq=axmQxo-ZN=-KQjHQ;NG^evhyYWsGYaPS0g z0RCRYH(4p4$xaqb^yN|dzFD6*U`(X35*4%*kU1)x8Cy5cR$MmMMfRE-U(Ox0`i-?` zpX_6-va60EA-dD}eF4@X5b~0%g86r=gwGG+XQd?;*@B)da{RT-h5(`I!n=B-DKnbb zwqspE^51??;jT?&9*nd22(9~UESsv}@R)X~MUlG8V3!HQ>4Z08@XmOgRN5Zr?P;n2L<;fx_~B89dOWiyXDFO?P%KEo#ry4s>& zn~}Nb7gIb`5`Bt^H65DPy~W{p)T!^rjiEDbJqyMllia4nc;W6EC|8u$qO|qK7Sb zCDtwv$sOtoeJ1&CSx;`w=f58U|5uj}Mo8rw7~#PeVnJE7?ZeuTQHOq2#Sh%dK-7VRCQ{#c{}X$g;+pWdDxxG~utFFAPKo>eMf4BEnP~pF4w% zF#Mdk8n^9}p8HJYm0M;lF%D?6M5@zgg7B?RgJLzshpqX1s+X(`5!_K^id`&U7*nUe zYTI_io1}MPSYR>?bvvyduWv}(F(DYZnwgPc<`#F+gzsAuDAq`x5j|XGR$YQAr}lyx&vEU_(x?g``%9BVA2V+CWbp)m;W+ zOiflcCb(1T_J5hB8=ohc-^78jiYvUqlkpg%kAruyJ}xHedcx-RRNSNam|vLVF|RVr zx_l5R%azj`cNnhbmA*|ClzfKu%5+P$d7YTwIEf@cl94UHgc05hK%U$I|5Tg(L^c30 zAi&zEfXbaD(-a<0u{favP1oo1aX+^o0b;xe_%}*WK9m5^(#B?w&ijG0~m=O8~^eBt62A2x<}!RMXGtpvEIX zsjOE;L^eV;8hOwcVDvWG?3HSeO-bpiEz`DC4X8((C7kr+fy_HEV_^M8YJBKROnO_; zUl>m4njx`w8L5#^J;ut95Smaq)ucsSFrpcC=!F-?!^49cH7X|O#3AvoQ_+20VOSqW zvj^b9XA>RBSWl_(KpKpkF7cs{ephyA5PIr5dp9At9`@-0j05IqxneXY zJHT8>IV&KWR3`aJxCT0s!rcKYa=Y8&$BcE8>bGL@+Qnw5>dYLg+fM1hF(5*noIaKB zlyS+Y0|+1C9{|AUiytunV1h*@0YSbz-^qp};9sZ3(P zBNJL4*!#J)ur!!*Qf8@xZVe3Dj=puZ$AW2JR?$4 z`KX*3>dqrKV|XBa_=F!EpJ^(8EWw?NyqJ@6#jvrXIu8Hc8<+q3oezd%;(1c zG&}3mTI0@q6b3{GyZSnpu3SNt??S2?a+0%mcaj|Qw*v{gj5ubS&Q=g?$%*Z~)*ELi78n-8(5PtvDVHH@;on{;F2P$pPg-_Ht zu1PLI3AboBihUmNxA}&A-X02N$<2Iu6a5QBqh@RSp_~722p@&fcr4o5eW2jT0w+F) zxLN$}f$Fm)1h+Kt3{x4EXCR&>**gH7nu0QT0in`TI+fKa7~~M8KwF7FQC5Md5_^Ly$`v@=U69qQEF^ z*xK_tZs@E>gy^|>wD$OZTIoB_Pw{$R4DjEQd|N&(znJL#mKN@WbCJ0+c{=%eUG~fm z0*zj_rOp4?`x8n+gx*51-^x^|RQAxtKf6rRd;v^7irxX^vTS97`gteJPwuNZuv=9l znnh|xZazU4dwHlIOkF+nQmUpyn8BC&x|e>&?9U1)+T>#=C(5JMj1j_aY`WFmxh)rd z{e0@|h$#MsI7ex{+N@%}t4qd}!1^C@1>;6IdvfQ+V4z91Y7NI!0YEZ|G!=-(d3LZg`M<^G@&lxMuZ&aN$7aHE+6^ ztOMaAE1bkM#f{(_L9UldHfF4^>e2@q3_ZK3th3;ylKLW{1|Ixc06{QrE%aljn=>fR z=qzrYqbgmNG^MG%{p76YmMmjg{)sd_^^CtW%5ED?bOqUGOfV*H2a6_`R^V-;gv!G= zb4ua(9@BZoso2ls&{)3ba898j-R}9!6UtpcBfzipSON;r%sjEd4dr2z00~xZX~MZc z4Nud49N+o*Y#}pmBMV=VPs_)=4`-M;OglzrRPFXpoJt(4@f+M=`O2+HI`=oC-4yNZ zDHd;1SGX?~9yg@&?*h9>?!x`)%}Dxj+d_9a%vcgNZG|Zd?eU7PFusnh0RC@jN8^kD?g#4?9B`!d4~yhS7tQNmad z1&_>%XFMH%#8(L~eFxqi6Z~iA94r#I`j?GOVb57vcCS$%mb&Y9TAH})ed?32bFjMC z`K)I!(!c~RW(w#7akePg{3?c?o} zqCn@I0CDs`M}%D+sw%El!#&$y%rkFzsMW5;|2f2v={Gi1=N2Ky41}0V{49sLj_AHj zNV&g07BQAG#tsZa`SYxSoT0M6vfO2qMbapOh|eW{EwbTo0=gyLp+9(G?m#?L(bNhY z8F9kZ3|_-saw`Zsy+Y1B4M=L&VhDH^pE<=c+zW?sew8mZ1S#rf<5H`EP5i_?6LkCX z_d#k`=Wz!XfJ|(F{r=cfm{p};D_w_;xeC@rEO}Ay84w#`S*nj+Ox?)a?fG_QG*7jN ztv2<|ZPrIH-K2F}Xd=$N+5DR+*TyjsumB_IZTO4e~LuTzQSzh=rtc zt?IFB&U$-}d&8Kp>)<~d*xTx7S*rKB(#F-432IJ2j$kJUy)e+HNCTILX8 z{^RkQF5P)vPSOzdGx}36$*RP$?*L>d@5s`=)b2kBCd=4}C+m0crdSqx^7`hhM^S3Z z${U;oKpH(|c5shp2f{Ume56B+6bJCWQ`f}A5@fEn?lF-SW|GH#SW#kr%-Yt?W*>-7 z>?$3_Q2B(nDpT}v`5uj9DwbGBV}SHcIaxOn0jb3Z1eYYZsyE_-^%p*Mhp!5ajX+zM zIHI3U@!cwHdtbMAkFtv3?)mEX7tAEwBU-}+K&o;$vUmRj!y=E3PKzxB$cUi6Ct4kr}d*p)W`LD2e6sAAl1u3rpH}U=y9X`DjCJUGd1#QO{jlP@R=( zu{5ATyocx#^mRPZ?qN5I2!OJY`bgY!r6VJlVq}Hm@h|~LoyXHMb0Z_e*HSLGD9rA& zzq;po6dA|!0{NT${H1j==`(AcJ(LHD-&U^G8J6=b^-}93_aJjt1-oI z@r0Xvevtpq4!Vy6NbMyh(T(;$5qbT1g*#stLkzqqoz{Gx&J)i5^Wyl!2lG)x% zS@1>q)b9aeB^&3>4i=RXmxazzf-;xJc@puq+$5jH%B$=$wz#7hdrgJKG{ts_T3YEt zXS|m)4pgB{j<6bEjnhjKPabEB7nIE8lN)WxIf|KSscr?jT4Ho2*_BjsjpUgU1vozC z!m@CV>zLz2Y=UW-${Rdhy>39U=x2J@sbMLjl7CTyyFhB=9)m0z)=)m+8DJ0(zKkrc z{xX59Dk3vn9zOmR2jLi&lsU*Z>uYsQ?AQ!VRr*kNw4>k12=8@_J?iU)f~!7it|iZ| z%o|b-_|9SKo%y6^EbOT7lCm;uUknmu4+YpH3FLx+2w*X$RwYandfh4%*ok-^Pl8#b z3$?h z#~EY#r10vp|5?k{d|>gu%dnG&e|~Q%XV!v;R!Z(#uC`e)8*I+65#$a;^6eZFb2i1R z@PdS3cBm#&6SF66Kb!5xVrgF{aFxEi&q!@QgO5^{sRnNP6x6qeOQeOHJk~t?ZMO~8 zBcM%s&cx};@W8xY`Kra|8rJ2UG;zFw**=O)A$RDEP_QW!S>%|!YP-t=x_KG>&oZOP z{Z0{U7%d6CwHvg@Ng2fNV>EmjnMvZIE6cH=2b6w7^}1owJ4@?kmR`R?EC?vTsj4W${3CSGpkm$MXhLTO{$-TzP3@ z&}%aVxd!#fg9Alv%ryNrKU~eJ@_cO&9FvSKio`l+E7``Gew?x@&zm@2r;b0ri!>qvVVk^}}^`+m&pO%|1(>QAF0XO0QM3AsJ1s9#$K$X4uS^c35!GKcRmG zM~E`^fjmGBiSB_V;RqEH_db@Ulg5G9lH5nU;ISV@THE*+1-@!?7!9-YrBi;TB{cZi{ayk{-Tu+Jty; zz?r0E5Xms3?I~J`Z%P7I6af*I>REpJGJ>QPBD)t!ORIU0a`6uMIioKGiQa&D1T{TR zlRKQV{Pup&H=A2DlP zGEZ7KkOe|s838uBmOygAAypXgi3RMdXmZk^FX4_x3&2~&1(;j_w95bRwt1HLtIPD> z$HWF+49Pi@&Y?@Z+gC<6NtBhSeax{eG>t~+7)c8i3K7VFfG=wj=Zz9yna5hlNE8T z8fJ@+Kh+^3#ku4IKKEGj2mXOIfP*K*J;FzcZ$evvMQV(sa0*yOp10xVYjuKq?7qu9 zv**{H%)6g3y#(srg5curkaSfpj!7Rd?*Vpq__(tDUl=<`8h~O${0GIx>wNPI;RXTI z$vCeaL%6J8KyY-S$u6P)VBWGW*77{KueD{mG&PE}a)oU7h6e8Oci@TfXaK`^EaVKF z=D3eH2)K63Rpp~bAFk?h(r&H)!t$BVCaxvPhiTE$*VCS;-itQub~Su`1)NX*$jIvq z^rw*J_q+E%Vfqq-sO=mwqgcOkq{@FudlYD)I1Q>%vx#2|d0x39O8NB=M=Uo7wZ?f2 zlCD7LbW9esp=RUBgoml2u!Z!Y%A|_h9+N1EoqsCwAc-2towJm%hWiGLd5O@iclGko zsL&xCm;%9sND45y1tX4cdV_dWWv!1auOtwLWF++TD>aOVYZUrlt}nM_SgP4nu9$Zj zCb4@>MO8t#fl5X)kJ`+;?r!CioHiwnI%HJp7p$YYVr!bG)SN;_0Xq2)PMg#gvPW%P z{g{w!;{dc!aU4>Q^rMcazggAD!Eqim6pjAsSJ?KgV}F~PLh#YfszLHI`?<-dS~^)C zYH;Z1)ddn+79629Z!n{Iny>1lF*n10LRsuJ38RA8O8%NTAbT5y(OQ*bXb=db8DCbJ ztM6a&UHsoUy>86xx0EseTv80oOg?)2wR5Ng`=|o&u(FrW3=yNxD?gx%avD$Fi^U?c(0Z2*fL=kYC%DkQqY7y`f4 zY5{u0HQ&DggKWyb^Bc|+HLQRz9v{A*Ssm%xEVc6C2DN)x>D0X$1py@RdG z-c8Dcr`Uh1qL&aRQe4y(o!;MAWyK3(fjnR(sA;2*i3H%|`X8)aM+2 z+Ssv$f5EV>%II_kddBTK*1I&D;rInfV=&V>fGc}os8ZT+e>}}z_q(n%*uC}%ftbZ+ z5Micw!8{V;NIIK2D<ePQWaixJf)8VKazHB7hZM%kgW_T2U zO72&W1CGI{qRMjBx_4L+-Zy=lh~dtqZE}XOo-1A!6#@~tt5(tGD2bEmxGYgZlIj)m zqfg;ACxSE3b9|H}x?(AfbLlE3Kq#(1snRqn&n*p9*6D-koi7-eaW8R4mrMXIiUJN)rPYD%eRZa7(jeWl) zlI}SE^#h_m(06%$(~eZ_piIgB@<{k(W+cND`9k9#Cv0F$1Dvqe!+RXU`$-a9F2reC zXC9-&K~J)?dMY63)k;JMKLk}BV(MlzOg1-Wmjbrp6P}oBvYDE|+vIEd-0h|XRXUjU zz*{W7mfX4pV|(#op5o!A(#bU;CJQ zd;t|5VpiPpr#sk&Ltp-rP}%fDaqI$-c*wYF8$wW-{6lg00&&uVSh9{BTm`MBwM=dL zFNaL)h&QEuZ~l{xu}?ZAJWlzsGXu34!VM4s5RKaW+K1s{R)k0oQYnal_hmsOSLBpr zz47{$a8FJdqd&4`_1}s>DyZ_pesy;9*m|0Bc6?>scFKt=MxXO*rZ?-5Zcyp>;UU?c zW-v5nN#Af6E;F9e`i{g}6e+#=ZkbMJ&}APepkKB^AB?ZEKDXIhb&2^hxhxZUsvo*k zXkt9E%lvw_A~-&p)k1lqM%Q2^I6k7rthuDjroFctkB!D`8gki#^yd;nr(8XaRO$J^%NEjmCRw;6~J?v%$?=(+)Z57>7|0+ z)!v8w=ksuiauGlE>z?Vpd0rYnlJ|gRm_37CHGn8M-7E+W*ez4`fod z`(K%m;sxg6r=-zVh;p!5{J#DBL8r;~xA7bKw}{`@h}z2s$~2drw#shM|9-dwj6BP> zANFq#nq2+o0fb11JBgZxlkVSI-e0Hg!)7IsWN-h^Cl9-UHYh6oSEk?B7@*)_T}uzTG6cYPO|V?N zJ;^BWX&X_%?lB}I7MeWSV*i~U)dc@m$c=B*Rk-qa*MEgUDYy%bb2o2;q|E(qXZ%0? zZSa$yP}sQ1jb5O@!n3DI2|7`;^SRn1ThVoZH{)=w97AwrxUFkh0l7*J;ONaG(qB%a zFi04P3QK@9GDUk5`QWceJiv^NUTRe(-gdWfAgE+qud>}MR?%Z)mJg#dtMmxe%!!u$ z+?jz*bx$nGK*c$@Ts0;3RMQojm8=spCh+5mhwJZ&t#$T^wI1L(ogN5yGZG#wmesQl zLlhkVmk&t+{2M#&{NL_O2LH3g-B&EJlJ}mx^9V}zKvH0#t6nV0KHwJ0E&{~I)Pi%J zcA_$2sbpOG2{q>V7r}N0%yjC(I+hePF@%-AYwD#@QI z2M|@HvvcV-^;$45%d)gND~EI0eP-?_6yg6^cGxvkE|I#JR5K)wzKMbvAA0X>n!y{L zr)vF-`K(A}*9X|iHgA&&y@u7lsTJc@LM&GxE}nTRb7j;{`1z5c>P4RW0!v$_cc4X5 z=$_CGo=`=Z!3p(OaQt5Rq+)M8`1dhv$&e02dI28(d(Z}%?_h1Hv0DB)r0d&N3{~}m z^rP)T@oayNoE=?DWwW75v)O--U@&u_RO!Dd@y@~cg#K+w|Mk{uh|D-@T(C)(&**Un?lmwMk z@T!5D9GYqW)aw31Y8e1Dn*NsImoPAvGovOGWiJ07mc&(sCx7aGe~|;ywKOgh`V+cU)-@`NbSfxT>%un1`G)pyU^fMU?9auW7<~a-lC=f5d8+Ek z7#6Q462f&;&=OT>9PQt`p%@AZMN?8UT z{$>nu54h~^P5sD)t`E)>&7s;ps9T>3F`k~qiCd~l$K4rP77 z=}=Od0Ou3uDdWwSh^hZM#T>VJIB2(ed~rlQm4hE%kh}1469TuFy=rE9RS#=asiz^B zj$}Siz|#s$yJQ3oP7*NGZOSXkD@0?(HGD#ZT%Z3a(X?AH-Cs_{trg$aGoN%7pNU19 zt=tsr`OrMRJ(B&woZuUDB2r0|Xi@2xm79nrs@2{svPkV4%QX$!qO{1|yb@zro|R=EHY(K~FZ@Ol#R_)Plk=8e)dh#- z1&vL=dGraatf;~h-@GUI7I>hG#;Z^}VrDldjykedSjCcoXCdnsmuU3hjLpd0@tB@V2YNOd06krQnQQ`|> z1!)SAD^kMk-26_Z$$C|;)Xe2aA{e8Z#nrcGwp2%1SfvBV4QTqr83rw76t<%oV9hDm z;+TUKy)aqc?qVl&_c^419u2q%6T+v4@+in`m6Xm9hReUwhu1MRWK*Fb7i+pTW@T=flHhKJSPa6varls}B ziB_h4j6Gu-nnJUrclg<0r7`8^16P@g$xDuEzw>~cY>O{HzL7(=Q{^m^y2f4~nW*Yn zjnKuRrDG+DBLCn3I<4%#TYIDkW1lwUEP5f9FDZ_%Yt8|*pU1_; z14?o|#;MFc{S_@(rGze6FNyt;Q#%Km*fUxcl{wjp*7ZF~6Dp8l1Xu4nZWYJewPnie zkb+?x0Y6gXp_I(%+|kGrTKh54&mj*+X6ui^SHko|$z_Rv(R2hn;wK2pguA|>NyF$7 zbjkZVA_Z11a_&mkfnMN2SLNbihet&|V0fQkoLiGu;Z+-WC;Y^I42e7a_K7Kgm;Jaj z0Q?i}g(K!nmkg*2%S5J2?W~4+@juA-srS%KBX(;zPMnkdgc^@`GYNe8^wLDmd;`9W z+Z+H8TbDBgV?nV4+rXW^^JqwTrgXd0O?z@7@)Jr|!9MwRe#)^yMz528c!-rtGOgzV zPUe`bmsfdk3mcMHDWpqua=oo*_ED=B;MY0bU>ylM?)JT<^un#Az5|CY~=KEtA zcX~F^om*!(XI)cNYEvcEl8j@O4%KMA+-_nL1EaXtAq~z@g@H)pb=OZQvVb@?znbEw zOT@Spn~18zcHb@yS{$>cTCe2?N_rinxpU&AsQ{#MA2$ml)&)26k@-fWi~RA>_KLP{ zgXs^w=on_f4h1AzpN82cRUq7R8kMjyTwXVJ)H^JH@-;NBqSTh*4$Ck`IIgW{(}MAl z!}8)Qn6^7a`k#tXGD$In;#74O*N>L{ge`@FKP+G3EI*YOIiOzo5ZKpEu+vB@{E{)j5Blj5PJhCu`P?y6ct%X7%PCzoJxuY|zJ2$oeKa62OBCL=iJkV@C#=!;JN;#(RZgfKqk%VA!9dW~UU^SZgGTOuAQ^A|=(tgqAd zn~w(aDa%0Nr8zO3!~;W%3N#~qW(vpO)Sa7mE0g)LKcZ~Fgj~8fvV=s0ud#1his?yU zF?fB;&h5PT{7hCm{8^`Y$ks#EQtzmz3?~&I{WJ#J*Nd5{RBa9j%wIj?h(FP|!T2D9 zOI8+RHA%Zv_X^BwnMca{u&X|uRDaMc#oI=0qkM~Uw1&HNiCRG4SLd~TTyUzi22k}p zMgJC^7CgAuTi~?R94qvWF1?Z)CA%l-n2wK^1cF>bvqLe>LA|VrgDS(uw$d$LajZNI@>PSttw?E|mO;4L4&Rv?-b4s7+{>h2i$f zYG0rS0QQb6g#bc!bs?L+krn&`TK?HN4KRX*sGmJzHl<^wbA&602Ns?(v1+rhs*Rri zKw}W;gt|hh@}D3xoyNB~#QHy>01*~Z9`l)MJ+>CWaVr|mv_(DEtns^~S8ut&F4Vpa z9IzL}QzK|#sLA6{^VzBAv)b+Ewyw+Vht>#iT6UDvLIxt1AfM8_z$$%H9lnpiXl-i583CObMGX2~*En%#{52c7|0sWp0v<*3jQ% zK}Y+xr+3=56RYMAUn=M1`05TnJc8!iHnnRa!bcrKs_mw94cZciUk2GEhl{*EjC~-g zg9Mu=sUdFxg3UV$lUSBaepjhhCNY$mr?slcegj_#-}-*5>@#}q>?(Vwb{!DnH$Oq_ z?)3FyTI*%Dc%2hO@(79}1B#EFx0NBdpOwvAl|^L4Nrl)9KOPhO5}&p2JZ>XC_ePzT zsdWO$Uac)x&-Mq-*cT}EMTNeXz)7N${7Xqc)F(jNNCPvWcT)*rTM}{V7IfE6?|p(8 zJ4l$zO~F~W^eun>4#1y1RF+1njzs9wmSnW!ocki}eBur(M*%6(yTs9_hDaLk2V3=O z3U<~sN>2h~J#v3SA^op2LIpOHLpa@(B{J%Jz!THVC#9h_aU_xCm<4b`R5U>-3*D^V zTTkc+?B#@)`AG9X_8e3AO!At~YQs1^dYhi6q7tPMS6iCSjC0Bkg=WC{<@QwJRKnP< zdiRa=`QMYw_vIMd5acS3{F(co1Ji-kmrcX-(TF$@6`C}ahV0l-w%8}JmML1JVO{RD zZvSTb_ex<+@KU!FfmOY!`o5FA^^`f-cQPxc>(kp%y3)-k-#0IhEIbX-vZ!?B&6|;< z>ElQbe2(5Gy=pXyXvgk->eP%94kNW!ERFn9a7fqaywGS5cQxc7R|6Tr_{*fTX&hI3 zEDYsz2i)wKNmlSA{`fb(Y8{6@l4ais`)7$VFR?8qbY8JaOqAnwCsuuLb5#qUqkc26 zzmVxRNFMa3u&-l!RjosI_0>iTR>xFdS3LV{{WnUxY1Id3nV+$s`#&1&egjG;PJfL( z#_Sj@u1G=~H0jFc__sUz5UTsp>KGKkO|93DIKkBX|FHMoQB8H--Y6D8rCI1zL3)!G zAbaXiY0Y7}DUD@$$;8Hm@>TV-5-5_he@uHtT1@O(% zy_T}}A5={M7BZ@DI?lCjT|ly!9cw5#z4{_)I}1 z6D?X$=(~CSowI92+oT_{bn^mZ_>7jUZ?M<3`{l`;%vR(ZvF3OuK48|mBfqG-?bC9) zioDKcsA!Q`4^~4*pke&%@Nb4k!kQMGX5c*x6uFD9dhLvV)hq4+H2SjVL;m>OW)`LS zX#Mp9OLy0tGZzH+;5+YPSh;TzILj~jSa5bZOR&x+lRbLS#`ESbNwIwlx1p|Fy`2dL zp`4Ph;u$PL5Y?;YPq*4uV*dp*{LiQez&Pg)%>T?EJQhBbnG(GR z9#{=ABjX-*rr3+nRA=GM++xl~8sqW^zOmTX8#1CO`I@KJ2`=^`CH;DRaJH%186-;L zz*{Ihx2x|6Q&VRw!Lw-NnpgNpz$qaQn=%SCB{_KY6_}LLWj|Nf5C{R~U7XI;ihtUq zg$Ce49XGL{@tcskG z-XB01>C*&pzhal*jGcHhr@V{~YlQ70Yz*B;{KZb3vQGaj!~P2F`(6`)#9fKmE--yP z9#uQ;-Ib>Lanym&SZeLjpaY$2u47`F?C3zf56`RX3~upsKGT`9W0?(2rt>b%MHqFt znF%IXOkSmgtJnGr%6{&_H}yl@sJ#`9%7h5s>=FW?fhJYf-l<@Ly^wMwCG6>d3)8;@ zNSwZ`!|n@lzhn-Fy_dD>8~9MZz&7+`hhoKP6&5s~AC3!IPTouw7yisXbS>CxStUce z+46!igU>1E;*oJoOxlv?Yje>S6Dz_?GF+zpAV7D?Wkibe^ku25Rt9IWsN&269#JH3 zZFk_@y{8C0d&;=+`1`4J^AQT3E}i8|JtnXS638;3Nk(Dx$`xhqoK!qOMq(cP?yIu3 z#TPecSq9CjU|QXquq&|nPQ|V*F9IC?Uqo)-AQry*pxN?*S+|eWg?vemcH4bA(=m`8 zRIgw%;IyCN)wtP_^`~~_S@EvEc0I&)FJP~l)tD1phm0k~#O*y{E`7n+^cPW|{mKWADyLc#C!cTDldjoP zULFLq?~rS!68jAVwA0_yeZ*Dc?co7UAU$KZ&tF7Ap)W10EiCCHTAi}8?qnqtx=+jv z7XT7*h4Ine79-EoObe?B^z%VdgVDb&r{5;)-!pn(r$^lbYV=VkXn4lpN^Pt zG9MDXzmqJZD^Zc9jn&tuc}OlUJtByT*OHo@o{EE#<+D?v*ZBLc7; ztB`|Z$E1;VtrDOCllO@}tuC&@p`zHQpmOYEON>hrjhGuLgILi?fhyB8CkCJX2K@rP zQGJ`&))8p{^_?>dmHgdlXkl4@IzhEOoe&uO7m*?RRt`l_7Fl-bfjjMMV=4s&44pBM zl;!tI5%zKK)Zouw<>9ZtN&<@J;NP#fIN5C2NjRb&J0mVGnE5rL=Kl41`*+j?dQYG~ zEcFw*Z(z3a;n#1ge-XV!{{26zFYgP<5BSQgkw>aNiYgjuDXAHdn|95M@DuNop-UZJ zrFj1yXM&o&squr{(WBa+{cfW8wjlLq@etpV7*s{$bOtffQ&dmmuES8^JC`?lnr1`v zu-OPL*@CZUi0s{pY~eXczarhc@h-Q`-)P5Pk4ObbPZO`aq{1ljE%)2U&xxd9}tq@kG$Kmimef@Pr_*Cl#sup}Gnu$$*D7H0Zi042o~lA0oOjM&u9Ytj zm0|lhj5zokUNkL%zAtOCTX33I+Y9FvCwMn+Fy(T($#h+FAbxAZqyghP|3AV1c@F1$KULUClmlQU-dV)=9VnBIfyHkEk2>f`1| zpE9dA^l3(y7RJZ+I()WnNgOuK1>P#|um1rD?v>-Q2b@3NtJsUPVdE)vaUv5o^|kYP zsf4w!V*QMz1i^h^GZJrK^oj<;+*Pa0vf7^kDhQ3fMj&8@SINpCH1Hzju)|e{YIU+g zYs>4##96h%!^VN|N4K;ur=7uo7tt+U4Wcu{Nrj*G1)}l_cYh#wNa$xbUNK3eCUI88 zBnWT8$i=rk{On$qKCG^PFwOr~%6s0kXKbW4;GU(wz49`A>GkYKv;%?3m4-_r?zs2Z zl?lkfXbR?Vs3suQVM)IU6cF$~BH(WhX|&Q&Ob!m@=(rF&0;jmNrO%3HwxC}rQW>qb z;_tpbf+EOcn|s+Duq?c8+&E-p4(VK}hjY)>zVJZ=SZ=r2>&C+L{h7-J0I8mPhKzfF0}S<`bz5z)xNrPOVxOH>tf%H z0$1q$2g-dV_s!gq%^lFTA_Apk=DWu(gM8bfvQ&VEY24_;;gIr0Z@I z^+~uZ^V|AJ*vLpvm2*k*+bS78>TtD z%vz>n&}*0_{-sRYQ|SOgs6rmvIm213x>h0JF*V!Ft!246 zp#IEO1A5#MK#9eW4CC_!4_^*x*G|72z7@^B+yq{Z?9e;9^2u?{L)U(mydl8GVWix7 z`tvH!#gA844J5X>hx|P^#U8D(N1AXro|L70ogcw8C^ksV0JjL({U4XF%Mj{s9e1i8 zbOsfFH8LLx#C&YdfwdZx{p=#)J}B&sny>?hd1~OWAI8&s!j~o~LU!PVrXKxin~?*Q zHA_0Dhtu&*JF0i&0c$GG-Otj{jciWM+@1EIJXgzGeHkRcbx0;~jvj)sM(C>3EU+UI z2bjdu^CEiP-5d#hF!;h`Z}u}af$Tp%6}}0n3u^{&q7A+oZ=W)S+CV8P!6u2dF^uz5 z*6?40dQ9A{r5r5uKt4OPDixBFcJQqr{=!7 zoJK9nWpQ_P*^*5c+7z3CEJm^bNuNezKBvq1u4Cxd?jIvNPAIb5a#JCH8Hflw{+1q)HOpmC=>bf4)k&dsEW_K_E{d>W)h|VbY4Js@x*yXyi6WKyAa87fh znPc_Ik16dV_fDlL%$N_Zs6PTN7FPCFq@l5>#+jp@M%OZ0gPzgewN9>#CEw;Ufy1$| zI$d>8VLKmzMzoRTAC^Pq&xhN#^eM`oKVlH5fEBOu zroOw*H)}e|iPS?BPk@p$^ON7WC&m=ub|Fm(S)q_U(HYm16n%@L1FUG1WP&mrdV8&Y zs7+=1Ztz84srk6wCwCux^PO;ch2!lpoJ{QqI)LQVuMNX(~gM=3SL+ z(yiT8USS77NK}X!xmFv9W2?STJf2_1bj{B8h(tNGt45=DN~&k=PDxtN4vSLw(}Ibs zXonUcb)#bL9-xVKVKDQmkS^X(Q~Y30HD@sWlN>EEr^&ocY0Tv07vELG4bPMjTUEdW z$R@3BKK`29*1J51S*zWKvn0FRUJ!R>+9MPbj7D(a<`>^}9S}bz^+?bUb04#-#Tn$d zSNN)BnqtO}kDn2e?fh*O-(oysL=IFM`1D=f$Z8Y9dpNfr6@h zHc|J}0!k z+|J5rBA{IB6BIo^^srQCpAD4RGfbc|dnX{_YcIQoY2;)CDOAvwXN8z9znN`)pRi(@ zL?Kbyb}j4e>ReK|L^&dPbv@rO`I*>D>BH&yYJ|SJu}x}t!KZ4AYRo?RM^vtNZD#oo z$$msrZP3~5&5+OXv&g;+BM^faJ=0?|SgU+|tb7{wId+*hK-&i%(DtCp2#XQ&9y?~* z3O96i$J445KAjKZl}<`tb>-Mrxiv5y95eXM#Qr17F>3SkrDsB_I>%xZug#cPY|qC@ zG>cGqS&^9Z;`qvSD0>Z8<{eQ;JugG>%dh%EkIKtJShdLqv{$$4z7#ak(5SASne5nK zvJ3B?^9qYr5Hjcuh|6Ire0h^>&^+FmTkH;q8xAp^D!B8h?L#p#ctrUIlK8mB(!EBn zXQ3>1F$QzoXeCtppVpdo(R?X(#=gXM`X$ik?EyG^9XQqDLPj_ISx8mdDE2@!*}LVqxSab%4e$EV%l}7j)h4h{TfH2 zH@BR6vwC9$TEbv5vse5+S>aOHOQxF|uH@X-ijO?XxTjj_P9&12<>9eOIc3U^f@IDt#@R}n?>@oZpuK3I!CXUspf`%&YMs8(5H$+e3l3k0t&;0D?~=P_3^0d0g6 zCON4_rjM|d7Ho=xm&B(uso)w`C`Z!6Glx$zlcWtrU5rKED&gFzg#ZuOtC`f<~{d4bC@9rv?p5#wQ zB+sdzm0qi#wp@lb8j8dV2gE8F3-U!*I66NkGV)p1rxq0%>OFd0|Gh>Fm6F=XiWpBK z%X5-3{)@;sH9P~X|2pbehO*if!38Sp@#+LPZ9L2Li91KMurz&;AL1t zGkD(<9`3mvzbkqzQW7p$MMnEDsk(Kzu4LlED_(55_+VKf+IA|NmMgDLi5lXix3nH0 zHlnUGgLv;m%O_Axzz;;tusW%{7rbNRr=09$g-Adkr8&fBCb1I1I`<2K>d{Hnv09OX zt5|bTG+cQ(aX_s*mYE)icX4o_)7PRf0HvX$M;LDBk(dKm!_mc|pj|%aHx6w~ts153 zO1iQn1xGed8)^@X$_Qpd&YyRA@;;0`N>`5GkV(IfJSsy|*5RpV%31>RcFSK--7_Pw9|CRJxTQVMLdv6weplolwb29A_D@Wvin z$7cyc&-8IJvTsek+(p^G9g2ixMQWT#DtVLu=0);4lbq!Z`a!Bp zIhS(8^VQe&`m#fvaZlRp;x+euHsg1kxtD3Udam&2Jr$hIOo-Ik- zUdYBlC}`>79Ho~(zYOMMNqL;{x}$Jl+hHg3Hzwmx?cxIfCIN>HUc_RzhTBw!*}Xz| zVOISzyap_=q3@$ZU`E12d{p)_hR7zD1+Ytao9_bEPdf%~KGzokGKeKen#=JEM%Inh z^oe*2`|IiG+tBqJt<3Jm(+Qp$lxH*DrRt>y$kpeP4kdtGHPhlp2%AqZ?ovtTi8PJ4 zb{ZdaCgiOXi<6pTkvBHIzN;>@!c{#fmCDs+&@G!aHQBo8Hs0pwDuXMM^<@IcPBSf5 zrVxsFM_OQ$S^8?XUT5{5PkFZRdLzM~e4TN3!WvR%MaO72JS#d?NI2dr?5!2~f-D(Y zeKV?0k;&!cZ=w5mi86hFwENyJ0^D3PSm%?N;>dT&Q#w`-mVaMmm{82uZe=6?{AMh z4pV$BD*p5@B8M6KkJbK0xn+D%*)7&%_Ym;ORhxrkkibxQw zRdYQCrPTL59;Ybf4Sbj^Z?#R^KVQ86T>T6c|7OL=$M;79!RKD%0wQ&IWoNM}A)8uk zt~tlSC)ZTt#Ygz*D<2&3b0hG238k0G;dt1mE5 zaix1&=A^vdK_?zpELfrcgWepnCg4S4s6zja55J z2d9+I=b7MX*DU~^Hda_;qz+)uLTm7tu=+Y$4(+WH+e~vW^TnVIyE!U)w}4H77(?hsGeitzpcIAn?2fhOKpIOsp$GXiq#hG@*$9o1LGAFViHkz*WS!+#&ef&cA$>P z=6%BI7`>;8X>fsFtsRFCcNAhOrC2E(=g6Uyw{dZnX$_QOij|{ZUi(e*NF}6AT3ipv{ufc5 zJ&Ey?Wn;9HgIzTy;>2&C*aE)Ndq5E$e|Xgzpm9DS(53~I*B_Tyn;l zmd$hBeA=NusR31QRJtDF5+Ei2aF1Yld=2+icR4T)simiR*H)`!NB43>%4%kcJ)+pg z?rJW*Ea~sbAzEP1;qf_ z6RlW*XeSmE8dBb5SpR{<4qIhe6+s;-ND(6O7Gfj(CUG8P;{~5!0xr^=>rI`-APVot-4>D*{NT zK**2fFMTxCN&GXcu9^a)qj>b4a-Wkd^*E}q=^ErSfv(LmLy#7T z-?f`;bM*&}EvJf6^4$@=>?evY!3k@C@@fSVoe=QaOlTgar2EDil(!LRkF7^UDy}e5 z#K9_#B2MI31O{J>=j>-TKyjB0NkF?AQQ$sKYlZ-1gK;ssr1wCAD$9~AQ_oFw!uv_- zMx;I(-Np|U5Ch_w0;3dNABE~l3#BG~;mpjK4egP;dJGUK-Yir5UQ^x2Tem4^fPQ9j zqAo!5B>UxXvPY}IBwbqc9E(v9G&gRJJLQ&W!$x3ciSsZOdC(3tNL4S2e|R*j%DTV*l1E^g zCOlA8_bGY0(O&QP2s&Gi1C>o3QSQ~|037FcT=cGhDVZt-UZgpcn#UDDP$`zL` z$b_h_0o_@8I%?I!@FM3sZ>{v__44|)LN7-&2f6bJbDIHkCa0^@F!?uDbrxFmw0+X@ zfwq_o`bf`L+f^k?`qCxyBPW25K;qr&Ku3qA1XV?eY?4%XsTtWvs`Fu4H$2f8jWWHY z={MZ{yyv~(6S=_Z!WI9gS@*LI9(vycK(h&xbhuHcPmVl=8Uh+P-g5P5FtpOB`2wvCropd?#4tx1+qP7mTGt<;(AiT2cIZ|{S6XNEhB$Fxkh zJcFmm`Z!;h+I)NJ(NQ=KM=Nu1`H1cGsj?PLV&x3i({)GM0WUYrkV9AuxX>L5=jph+ z3Ls8#%DojKvo?vx4S|l@(p6PC_2nLFy=amz4dLg3oR@`y6G~^8rVixDgkw(JxL~j} zvvJ$!9Pou?HtC28XTGf9DL1GNj+h^f5R>U|XvJJqjzZb4A)-IT*jIIZMorV$+Db5O ztno{0+Zbgn1B@`waKU&dddl0t-abfImV&O}!vq^&NB3HRL^Dm6^(L;^q2>)rgeHg! z1P@P;z6@`Ixz@ZA#@FtQ zlT;h#rYRV#^+tn|PVd-Q_#0l1{(e&Y{&hsqt`2VhQ{=m^V$V?~w3=ykk@mB}swMNO zcc%>e*Vg&Dya!tXL5fwjgcm2!e771XwUQbs$%ZK~80$`MgX-dCq^*gBt(zb_rMS&k z@zeB!mG%h3`-tb@_CTPCkI`qxYJ>X)5w$6AyEU{6M;4b9daqxUe~gx z4O<86%H(YD)tUkhQaNuR0?GzwvM*Wi@LD3HM>RA9Cs4Y%pOC(jiXPss)G$L#aL)c;@#7FGuXO)WihjneQGNJc) zx7cFBl3St~Vt9!sB*&hMruRvo#Zj~@trmEF&TV=m2H1FQT_V8_U2=bHV_Nx2^0!|k z0l0+i_uj=&rii4bE+x!MKZAlrIoy^n{WqIN1z!* zq998xg$&Ccl2TI~V}}HSMsSDEdwbRop$Mq1KHFi8sL5@6t=I#E%$iMH?BYwwRC}kA z(p^t}J{N1=xx<*702^Ta_VuTjaqvm6T5&pXWOfMKK9+5E zmhrADicx1?KNk(J$dcVWcrHrc_Fku=+(*vrUX2Vjtg4F7-sZK~K>lIEyLP-jwf^3y zfr37l)1-1lghH~XfBZXR=O5Lj^T3eam}mW<)OGFK0uC$$?HbP;hR+g$>}nA+RC*}X z$rdbbheacg4kpmDcg}&aZK^JQFgq^k?+B@rH?7||?Jy>olE3yW@MizaZ|$B`q|48> zWanI#w%_JE$7w5&jOJ$Jc@jujh2~(Sordqbp;kS#11_UI{~AxV4n53lp+%^ZgIen+TpZUGZ(Y#0O+|m!%?bQ+kuLs(44d z6q$BWfsL-_C9C?zw3p)f|MwSeA8TF+dq zj7y*SdOvbD^a{ZwFdS+Y5ibM^7{B{Gfw_U@AtqwaZqIV1Z!VUnJC$#p=i$JB5*-bvdhGteUqmryx6?(Pga))7LTpZcWT%)VJ9TJAh}oXZ8PY2D zj^DU>;up>Bz(R`9q~KKIL-Uz91O%!5SSkPQI2pGsn^iy z(!Yo{yUUx>XN(zbnw{%Bk0j?ztTr(zP*vLD%+*!kNXxclciA^tn~2 z5)at0DI%pCR~Tf(;$~dXcWaq$Vr8B^WVOS^aT7wVwX8t2iwZj_M;vkLfCS0d_&+)4 zENUlP!Ff1bbR$_c6>p8}VjUkP&_(lfYhJoziD#s-d(kCx6E80f(biX_|d6i}P zy3)P8fXwF?hTSM1{zdf0eu!yzPSatZb(qTz5|JKi5{A_OjO6vFU zX*n~S%BK26Fo-&FfQ(2^Gq25Er(I5SDM5?Yi1uGi6%%*jS@QM4s4Pt`{;N8O zi_ZIiW+^~LQe9hIRV*-dho|9(q*f_-RAlV<{=8|zNw1VO!I!Q5-Bd9$MjaZl{rGO+ z`1A{o+*UntY142n#ladT7cxl@+3m#5u9*UPF*5cyFrTg|N9bo|ZM%t_ezKYISQ(1D zH`EWO=!sR1Eo~Cw<}HI@O(A5G;*T=(ZMSL#{vv`aXr|#WQ1t2E3y#8SLI(EKJ z*Pv&!H1EUbEv^}f+f$!x`bmcF!kYYwtbp47wVTROrDU*jWdHlG%l zhSNUigEcd`#_A?GWQyCHu+AuEIaoE`eI4I+YUNGf;858juAmLiW_BSss~Ry5Kfcyc zEHHaRl>Yn!tVv1;qQJbho>b6oij0(EIJAZh6=xrGfg_I_B&iQ2TC=WaAC%~n4coaE zI`{v$B{tM8*b&e$0~^KPf3on3a<6UHAM2ag2|k+%qZDA-mi+pyWQV}mp4ll^6c5_? ztUmWyeL>dnkjvK{8l+MG)!fBFi&UwS*duGA_^S+e*X2ER!cIEs92|y~PZ1o-)HRr? ztbq?yh_~)U4RLQL2&!5kx4&6!%DO=pJLf-x<`-Kjc~9FCY38B{h_eQ;Sj?``by_8cd7Wkt%IbHTcM%z#J-B{>^H~1G(d29z}h&&>ge= zen8;1vta_qThq@zMa4y+{phOV^nN5R9hW1(K#RXYN2DV;Mg#YW;}maV^?_ZysXy*81*;(;ZNy^ zfEq!xBXiar%kA((Uw*FP>tg%-=P)V)Q^3!61D=*5@TG|p05@~@I>W5&t;kt;C~1$v zg#suX@EQypBV{jc&JZw(AhJZ6DS7wVz0U;=(12WfEu+gStrmc1xiw( zU4|5UZuIwBe-@KsD}MkLLIWZ-_@FG9HRsyv*p`u6xuM3Fs&34Sdv{g3V%;~8!fG*_blaJxq{Wth*$zx%L{?u;r9}$zk|A-k(_@b^HW6r zP7dR_)BATFi99an{J*Qq?PW}s{$0MAfdzf@OE}CZ+#B1A7iq%r<9ON=Qk=QJ@!arL zKe6#^x4NdS^S+vxP`7#fyZJI5=Ry26Tu}^2Z%Jk4$h`4p&-@&3VkBm^StXv>SPO{( zY4YzF&Gd8r>gxMjjY?|+f1Ha%+8(fc348s`h9K1YMRMAHpKh@Y(`U(nRErpnK4jfPuy@Z!<3dJqK3?l_Jd#s_~l!Qn2SbN zo9ZoH-c3c+ToZaQWCqDL&myomGNL-A9>&p?3<-h<3csZGhfeCy9rb+z>Y`$MIRWE_%PrUK-07k+h=%jNk zz7z7Ga9HE9$c$@6@0HgX;XVhfJ3lq(IqU+?x)=Zo*#n<-6HjO{nT?sf7o?<%QYA<@ z@@4ezzL*M$o|&kswgw2w$)MMT7QBpIC|Kc$_;0|xLIDr?0=0FGt4^=t=0u~QZf3bo zz!zH8Ee@alzfP$A(%CJp&R5WPcb;E$nmY*Xg}H)*o|$^z+u*HMY$v#If2@fV8u+uaqn+rddSY+4DZ*DpmzXlt|M@jHsW z0wFAx@tsdpR66Re8FWh6>e8+_&IG*WHK7j%cNVJI(n^;}gOm()K1C;CM+U~6t$il5 z6VAiO^LF_F{pW%3ze2#eKYWdR=Vxx~;Fd)WF@c>AT1@2Md%n zb=AQN0?qQgw&|lhD=~&W-)rLgV_~DVdCN4m55bDI0MXq3z2}DlxwlYGJj=UjuSE;e zjHwSqA4f-Y8jkf3@J+8RY(VO0#J^o-VW~(vh%nF_uN@x(Al_Z##ygB*1!W;FHeym= z=mnSgde=2n6j$D>H;-hdh*n9Y>dU^-9cmx8LLqI}L^ZPmMUPC}5BLP&9ldDy#MDqb zCNyEBk3N`GYGhFPDG02}mIP6p*@8>bB-!N{ui3zMG~5$Xz;F6*s%T+A8O_p=DcL+} zDv+LYnefk1mh{yF?K07Hu}I5pn=`FCMe329tl_ z)+2gFV7IYDP@C z$w%KBt7`VdZdA(@jTn$_8?iM$4qbkFn%p@-!9hUsC!ToB!W!}PH0@6kDd%LBh_>eVDu02N8 zR!R7#Qh}xKvQNBEZfvSE2&yRi)vs{eb1yH@HC{uS7w2E|pL)+*m0$?6|9_FBV)|X7 z-5Vdh;Zh>;BkEek2s+276W@5N6lDfBWT7&%y7SB?E+Zue|He+WtIzT-<2W;Bk!G=|({rUra$L(aY1@PQ)K$HP>q)~Nm>N?gQFz1kHx^z05W z+i=;34k3Cawu|UK_BW;=roC5z`t^>YO>-Ojk-+p7RNRvEoC5PyaUa8-q-3APjSper zXH04Id(zT-;np=ufl-TPHT98V~}Jajuw z6NTeudDyKv+Jl(_8%6^K@=OiiSnOE+?0md4d)90bi+&xfIxccK??Bn1zL)KM0LmnT zeeF7Pn5H@ImRhG{6q#T(XmpRE}*LZ7iYP4v!O|^MI z|Jyy|y&F}0boXX_?{%!+(M&d)XfnzS**Pg1!bXCDR*9#w_c69MSJuAlLK8eU8{Evs zhiagz!kS>MgTe%o?jSrZD4a?h{{dg4QfyUv#u4<%H|ok-*1HfV9!GjF2o?**2M)F> ziMvu-XI!~soArACT^E39jsT9RN@>3NbFWL`qc5OO+!PmI->GtFGaYvKhT7dFGGT}2 zqQE?CR(9WQl(Sb--COAjMnT#W8Z93i=raznN0A2z%zn|s=}Sgkw=z@V;$-rjPUM;G z2UJ0b)c)~+uF-cg0|fm2MclMJ4d@8=L#oQ}^+HEic5A1fl|6MLh@`GE@*mklkf(9d zAY3uXq~v|Ky!ywGhe)MB;3zf{l9@DGyJ|gGIn*+zg+#^kT>R3k_1Z8iQZ-RK!ZR{w z^kHZf+gW0}JwN}ky;=BC3lh>Tn;4`MrVC& zjoL$}>eq-YESsuN(|$+x3Rx(u{z|hx5(r&RoUPu^iy(aePS^E3Z_xX@?5}i5;SHbP z*?wjS6P}F{{*%|}tY`NeX^8k!&uMaF2#b1}eClLn>tSAJ;%w{!4}l3whhPeV=E}AI z#+OG=6@YjA)3u+)md-8k>;H6SNjl8P`(KT9Z-YHgn*KEwR*U9;q`f7yOjP*iV_gE< z%0S1^{MA#^3l3en>BKm$~n~mfyU@e;)E=FV;6G*$Z%3aEGD$hjDgExeIZ~g@E&=A(KH7^3sk7B z$RIbA$aQhUWUPorqE7#%3e4qZ8=U6yKv2=H^yN5bAL?~=^ObuqVNNdEm@;u|!V$IR zF3Oa5%Z?u(wTO0&z{xyZa2wVD6OZNyUDKb_9H9H5J=zG z9ATV5@^mt>XPPE=-iL5{U88c+uluG|a?KDu_}ED5h8%o=JAZe>lNJ&K~1bk{+D=%4`x*iC+9L<0EqU-&VWum&wUgyj*ME>mx2;2;{5PxQb>imOzK}0x zksFV8{r9X1YL8y8<rUS4M3HHqE9T1%qrR z=QPjKWvX!A>h25NlJ6^}ftNDVFiVlh61dZzqL`{~d9 zv$yqD3hCuKv8D~iCac1@ZMJcG$Is*rOk)gO=_y+2n;Yx8qcugExp@D00;=%e+M8lr z{JY>EXYHkdgCDWr&{CV{0$G8|E*9HR|J;B>s{O27d#&3I@BOdzOF!~*B`{=wOt>-0 zc-*>2w)@a4h6YxRDLI$CpRuJsM(b#>fq!lVlj~Cd-02=8t$r8VusOg6X7|e&|H+0t zM&4P;`BUraG4inqw{c|?Hx1TvN=~M~S1E%0~qY~ss*YB%8~ zRPs;#`?90e#Q#b{wTw2=;(j$dCC3KX`Da(0B)_1wc})A`xk$Y=g+zV7_v9il^^&Of zfY%ZnSJ38fTB8mbHq^>0v6abB*oKFFQrB1D@+MhhS6t#pFQ@d&Rlp{u+`Wx*aC4!7 z0T!3?4=bAb{8j(!_m?;P)+V57sG=SZ7BP*kcP9{CkNEl5@1yR@JA9Q&wu0Tg9Am)3 zy_k8KqN(e2414Bk+9mHyvnad_@Y1K5szA#T$Z4 za`BPP;3zdnW!cl(l>ME7xi`$_+wmHKy;9j=7ByF2_KjjGhg!mpGvu)|R|*M$E8A7v zeQ=K1IH40X_Y+%szNRy_wJX21Gmsg46h?D?sLsP_M+C}7+a~aG zJa9iy0{QT}v9t9HDe0{fBubryU&*UBK};eGv&xrYQgY8|M}gQt8{jqG&p~C>;$37} zbx`5)qwHZl*}&>P0m)wf&jmRhF{h2E4TR8YJ$}7Vwrb+G{LiHmu!A`y0QdjLYyIrs z%m1uMGVf%;7S@*f&m}YH9g;FA_oEE<13i1Ri$ONt$~%VyAAzP!JU%IANzO+jEmWOu zbl#@eCIIjgu2R_}i}8Qj{T%@9Ef{Y8P9}VpJ^K6(Bf6_va32@RaX{#i4sNI~C+Yik zq9=0|SF+}QP}P^rF6}o<*_usFgy*al3&~w5N zG(#rh|7+#5VAolM{e_?&XBD^xHN*X3E!XsrO7(sA$<8&kWi_9a0Rfg4$6s3}$5vCr z5a~abcG1THwlHfY;$lfaQFb5Wr6w^;06gWAG;L!ySn(!AI=S7Ar`E{$`6~;dcR;+C z3`^6VVJN?>^1E4U6vOQx-cJq5&M;0Oyp(*E?pgLhIQnUKe-&Bbz}1}Ik}k&G8S=+P z%QO_3twf>&BmlW&0nt^1cnwtz*quWsuv`-}9!@PPJAFhUOI@vSI}zwoTR)x``~emJ zI@`1Jfh9oH8mX%2vClo09`I?@3aeG)Am4hOjrRbYc?Uvb#Z@g#0jxsF@O!`mN7kCG& z7$_r&PrA?((p~tO?_bq9-}uaCoI=J&5~Tr5fypvbn17b!SD^prAEhPjrfIwwey7nXM~m7(!z+K#1BZnL$?o~|SV zyN=@I+N=xoo223lVUEO#E43|=maf=rvNa3x=f-5?Vb)=k*AltY0FpqD`mtcdW>Aw5 z*{vKO3O>x}K-U{j?^~r@7h&GP;U%Z<&_NLdwLXt2y}43V15`1(b{iQyCwd@|PCqtb zlskzyIOl`Exr2(oNm-S3m~FfY1CFCyQCj#$#5_#d*4&9jEST<+rTtzQb%O@wlRGXAx{H1uo)`KhK|Hqi$k#lq8b zs?2N=rT2fa_a0DDE!(0fiXe!9hy;luIW!0~Ip>^HlXIrYu@w;zkeoqsY;u!>L`4A! zk{W1oP;!*0h=RXCH(Sry`=0y$|M!jY-q?&9i>g^wbIw(3b*-vdGho|P<%ufCu56M3 zXv&j@C64`i`;oRS`X-ne>DtI}z(3@CeQEc>8Ov;GgtJID=XA$OJ#DnrPN5EIA2jA7 zu2|~@Y=Q58)SKk#s8`wp#6ME7n7Hb zb1avg<<4x|Xaq+tP$Pk`ex8n+sL(=0bgCn6-8oCi8*A6I9)}Q8y4H4#Ys!Tpz9N~V z`3Rgfw&g@N{*y6e@0^26Hh(4Xq78moJSR?J`yvGK8(+vm!kld@rS5MGlbSh&|GAY@ z>FY(TyVzclA`94j1;@8rXBqJKc;{Jo?S3j9yxoa8(K|5}{kiEJtBNuR=hk17PjdajA~b$$z<@Arh1T4$bo-%C#nSL3t(KwTRR;>pt_?ZCY3!b;WED<1W$qFOiOk~u!~ zk*;2;lfm8iI*yjK8c=WJhWnMLf{BlKT9*{&(x8qmgx}>Pt;squT9a^PQ<{y=LUN5` zRTy(L)^*;?M@@?QakhPcTUmYPfC5atDI>Rl5qmSr<0%1wlmaQL+3Qze5!SiE*#B}p zRvjfA*t@M5+VteZ$i=4|pq)`8}9WY4;e}U@}m4Q z$jBG`)5HJ3^McDloJMi(n2<%P(&lw1l^+K$$bDT+U=L$|ow|pwYL?o)gPq~Yc7Q~k ze-F8(LZTh8b!nzy&YN-~WwO)B+a-5vQDN@b_v+)d52Kz;k3SN!zUey3WHHd6%XTUGXdFVLGM`?*Oe6 zDP%zdiBpm;^#^5J%I-Mz>s9^El?}CwcUNip*B<6YG{UwS<2n|O=K%I0Dj0uzviEHF zt>UZ`cS3iQRMMxt;`6DwWYL9Lnf2&TN}7yUUhn`U14=QhfFs~%`_YTTd_hmi+@l?sR=R}aAfe5LGxZB}fO z-aNW;`4YR(Ypf1a3UZ_3B?VE?!`jhgy+uO$mT*5T0%_%Z_pjt$GR5rEmbnQBpp?!MF_ZA=Z;`@3J;=#i87`-fUj^ zaivtGy!LeF*Hv3!{8y)VASf-UKJ%ga(jPCA3br>La*ONY+su7BH#()3my$-u9v@E~%1{gRO% z8TF>G&Uw_Cvzs=v>jypenudqr89JJdNI{)bI~VMs=nWO#8EfccSkDRq-+WHE;TvaeNAFi=kokn#DYi$#;?~LzPl3r< z;s+0cuaN(3xTOA1Wle9wg-B0zk**=c^J_NLQ9j}=aLY~$+?jLMch}DW>o;B|6^N)* zhCEg^3hNP|)ax=av@Cj11l2h%|C(wY+n~ReDbpQUe*+*ZdD4GxgtRl_o$dBP?C4hA zn<=}^<2u#gy3Y%3Rby>#y<+5YHCJ}qBTwfymIMYGknxGrf2WQ6FXcMNbIj}>mmDKA zRIBJzL-nwkUsJ(B(#%UHzYKc@nSV9ghw8B^naV@)clw;pVn!CrT2BcdR<-!fETe*1` zuX1A}5zxa$uP5{_7MWrR2wW*nmTTCEf?o7Rq7jLiHIz#a-q`pBrP315t+iju$gEVi zWBFyD3(^VoSbWB~F#zY5k?eq$bRDfQfRU14ytP)-)=_~*jJbm+@$B8onktS0NXn*F zw4{|8ANdn14YGZ>X7CKhYZsh6yO``lB@Cgvy$;9 znpDthS-cQf3A?r3*J0qvR=&}F4&8y^ zCmt;COXEIg7pL&mdp-eglKrx`N-W5>Z?+5`os5)l6wo@4%}!F|I)Hi-oIn%$eoKhT zDob_fNL!(=%Y8zw&I8b6>8?^@21yB!kzIZKrSC%SK8_eXJE;MfEx9=V(@E77k42 zf!0-TgL$r_I2@oim+Bg6WS~>l0`u5CVuR;e*OZFg{VSU_GI9*gDdU;lY;m-zoX!j6 z<1FWx22?5Pa=pJ*w}hI5IrV6g2zE#}W-ZvmeI2Cj)T{q+_GLhM$zH!ve-~ibWc@_q z?`4xhq3L+=nmehoiV^nf+Y1+pTMO{=F4*Jvb}Ba1xR*K>+wAz}ndfsq^+6&|*U@9C zjD)EJ{|r_mV`H0w1_{XN6}|`}Ju*?$u(OVVfj=cIYxsYqt^{C`D9%CQC5^M;GiiDo zdlY@B=%0$T@Y@u;zH=ss+@-jfY`@h@{MIq@Q8a-Agk;2v0=v`_dV06a2j%j{gCZ=>2 zTg$E!_wOigf0aUNDU^qISzpywrwTyyxkIk@Hda|iw3=+{!mq@sEA;lMiRE&0e-&Nh z(X?8lQ(`+TD^6XS*hkC2tnKfi-D`Ya6Q}~!kWrnk=C%sQ)hdb0C5;ottM-!*>9sXf z(7W;iJhZH$1dI7H{*B-!>!h%edd!T|{q0$j+Om{e;leYz%?>TufSv-Pt_6vYk(lFC z|O%&^8%-|y-u zzc9^@Z|W$|!QKzwT3Ci?(Q=8hhp4y*JT7@|g^WW(FAY_hAtQ*de=;VtA7jymmKl^l z3gC$s;~nY)!*jD622XMIq< zYRi+ey1d7BADXE%`ck41r2SZjuTCAC=W)@-PZX3wtmoU(OBRuxX+DauR@A1fLif%w z738XYlkZ+mhSZJS)}K@Jvw3{vkQimM=~iaYQXscsTc@i$6*`Mp1-+~p5y~rHKd#vJ zpkp}e%i6_AS&dyH<>S9POi=g@qJ@y7)*rp`<920z)?IMRej5P=-Ze2lLw|Wh-#rv; zDdb?C6Gz?|vQg{-{P<5R!zP$VK#!|Lg06usV6jA4<090*P^ekBe~v73hL7n(#yeTcE0Kmxb*Y6g?7(ck`k0m^U9yAk zD5{%v{FtSIDZ}O-pNg`q;1SZVGRbo;|g z2;GoGv#43)xfGL=FcY*z5J7OJ6XVF9=hz7w7Q_hKB;jEUzp0AW`piu^v8mkB%g z=em}4X?BOZM7>{QdFEP@LW7yqD$X>f<_sEqRG5w=`>pe!KEjIrhzz1FcuOHuw}sD0 zTEm`L#1$9`3fXP1!T-)T{QY5~_a@^Sd#YU~A9X4GrLGPZyjg+dE9*VaM|?#10sQwCd|`ppUKIjQ30Q~rHDisAe2Oqf8sA25`KP_=e(8&}e zg_G4G+sL*uO)4WSGm5E)k@_9K&hxF2g7ykT7E7Zzq<;1dZPLSl{dqRudok&wYPGkn zOO+hH;?Cu1x+~#2nB}T&Z(CzQ z3~G+9bxPwzA8P}-F?7ocCF0E8=FXJ90#5?=BT6~b4+<9eeJu?OgQ}usZYzKJ@Un`t zCTwnZQEIX)xpnso*&BNk_o*5)F(0Dtg)VGIS79X$0;$vkC!DJ5nuBE6LLB@ZG{Mq+ zcSl!{r^bJ^w-tF$hA!YV`Vvc+_GV!M%$Xnb_WbL1t_d8X+y?cTB1yllZq};7X?BR8 zUpAmr$mTM-iW@Mn%=k|6E4|SYt2o7Xlm@!E?8OTOyG5mq4}a<&d}e=nZ{5P#94Y@R zSZ18kz3CYSiZ0wtP?C^5!jmR3r?DCv+2)r-<)n$3+Wd z{%7NNzw|>wiyJRm2nI7dR+*Z4QKdGGI^VpxF`C*XLu{{&?t~;~Vs+?q6H&=-Vs+%7 zhCp}HAx7x0sow4~f+JDai@rVxdQ!m#qp;2?l=>)RMl_A@4(fhhLCGmQ@=atv;|?At zPBOXWADw^TJCk5fyukHGk&DjtZt*W|cw*j({Zd3`DXCJEezNm}4fS~x(pmB>DuA%d z9wr)m#a`XwMR6*94G+Ggj)dWmlbtR}K=!JxX`HBsU@c3E4i@R1Hq~x9n`%U}R5(*V zr(|$?HAV%NV|Y&2;O*kVr|1&E`}UEeD5lL4f z-~JD(jKBzmA1nmNKD(qlB_n>pZ5h2S<*yr7jFe@zLVI#Z*Apktg6Tx4o6J~07e@i( zyn;Gnc0dNs9~PG@m!+Y;XPTs34rPlQSCYFKe2{BZ>Ps%q?@j8-jdlHRq*f>x&Q?>8 zmQsZjcs=86kseVxUi90Igle3OO*_eND;bx$CiQ{roBZBeF}zAO&DISn?W-=$@R*Xo zsSc#JF-On7bE(E|P4MSQ`|W-eLWnk&qsLM4w?$eGX$k6+&RG&d$+F`5?o_jsd8=WV z#gv{-XgU)qXCE-Qu1Q${v)WQ{?9S(s)EAAKdy-VRYcBO8?clKoHKH~k zZGs?EjTqoWHjQmlWf}90*SSlFNYDj*9a*d_V-i+DToknpYKzOlp>|bvwWTO8)wfuT z%QzyTAqpUf#+Dsf;(4b#U=mKdP5mBiLy*=qXWey8lQZVtQp~WdY(L3%GO^MNd=x73 zNVG`YSv@Cx=MVE z%P@ZJ9m%Fv5f#k!)j2CVEA88dmHWV{y*fc$^f{eP{W=8xdhTdTDG#iz_&EcWmzRB1 z2Hv@^3}meNXM_IkIB2Hp@05R@>9P0pXs5ekb#exD?T59}K}!ZY3~Bt!8lVOPt1917 zi0gb(eN?rnphy}avD4*>lz5%|YBv4vG3eQTL1bB@?u%Vz?Y)RY` zzdP%N-sac!fVE2O>h4(^FjPKOkKT_Vs6V&k|NeRY`uy(}`ObWuW|*<4D=FVmpaNA7 z;>sL@OO@loovaJK-ZY+AuTeYACOA16)T%EXv@P;F?rmMF3O+KF8Vn$;wWnbl`$BQW zeT~iDBL|33(1bkhEj-%G|Ev2=uHn7YbzE5s$krFF_Xl51zaHSAdUY3jtQO0La0rmC z=F?sT`&l=IR!h>0yqwoCd|T+Kl^@Ibu;21ITBe$SI?fYd4NU3(gb1K`G* z_VwByjp@I<3Q0fj#bUw*qQK98__3KMClaCUS)kZS?ST`m?(o3ST&5L#b?VwjUM@PH zSLd7{3EiON<+>z(?Ocra2b&bmW^ z*g!OVmC`e1LryM$Qz zd$Z!$JubWR(C9Zc?A>9+6__#0sz%wBJl zUK%B}Wc>~%H0s6$K=C*;ajr`7H=%;>0eE^{T4JGxWU;WQ26 zi**Uj9zJS9R6xc)>a}iO0F8BE=N%5+9}W+H)FBqy(3ei@(`oe)Z;XQQY<}Y9m^3;mG~19*>2O;`{P;)>@sOf>hA(mOEGS$PQJpHs7B##T7N$xflwkGRvZM$+1ihmj<9v;?oU z`JghlP-)Ao`~+rRTUa;6f+%0*tUgV$)osF1WTz~f->7L6*AO^+#eXY`4r77~eV4NN zc<Wf zXIdmE)Jt7&8rrsj54C+6wV=GlY%lGyG=-q-^gszGew6E1J@9nhcFOGOkqyP+q+nj9 zu?{Q0y~rCM-KDIx1iD_1=b*>MiU71>!i}O^Z{?NaQxt47FwrRhkxM|DV zj^3qiwWkxv*Y30qwOl3l`NT7pX6Kuw7o-0{@S1_=GGo&=0C`68(-7s=X|IdNR8zv$ z&B~)6rYRQ&Ck%}^qY%PB%wU41lY(DlcINZJ>PRgHWcf*%wA~mra>%Znx-Z8ibf_{9#q|!~8|5$FmLU66v{YYaY5K$=A8>8=T8f)C@F|9Bmwy8Y{j& zFv(=RPC_v8p2bPk?hg~0XsQYRR^~sM?3oF%+fZB*_|-fnBxd{4p5~$-{XTj4ujVZ{ z5>+@s{?mV$;UFz?5?Vss-G8}F>Tb_J)fE3`$rdn|K(+|;xNoo-%cCFWL5||=KQLm| zKt-#a>>E?gJC}f?$TE&}vDK}=bXZs_;dPip^BrZX;5!N--i9)&R%qC8c6rVC>wmGm z-1^Ya!aMv!(O5bzpYt6@rZgYoFm8UW*lr!gSBewX|6D|EkZO)9F;R>tnp7N2dYPW^C*FZZY3G(OAW4#H^Gsu zN`%gxa|`26W<&j0*IP|A8h0EN8{}Ln?@s_q)4uVOw)#{pjTRp3pUUGF)0HU0l*D2y z+4FL8I}UjZ|IGb zRI*5fAQ#+JOp~R-n>y(wF`rmfdz;#G44lgHux||x zmVBxts6&G*E*9lbgfhv#noF=_GBAT8w*d~iP2O*Wgy^3k4G0)7nwS1JEAqe8VMKZlTSAR+eJ5ARL1;v)b$x_C zaY)X&YUkQULynk?oNRBgg?T>Da{rU-MTI_=cYwo_q4D#`;M{lORF^(76qo`AgzbYq z**CGAh4DWJzc>rAzn)Kl_pdtt9kpxP(gf9Q1MQ7j%OojZF?fI{p_Lbg;GQNVHO1_=B!;a8* zI_w6WZUoGh`TdoVtQ%@o2|nFhNG}Y_hr9gg`rwQ!xIFmAqpK4zFtPI{n%kW>O9rJt zw2NN0d8Ff<1`~!2hNTK-)_d!Ej2(q=cVx%e2AEKelm8)ud*O`SdV}Fg3EWB z7Q-Ov?4Ru#!#t*#-G6q=frPEqU0_BMxUbG*kU{PSXQDq{ z>urJ*i~uE4+r2mIG%Zdh(Zt+w;ur%Rc$>u7cN7LGY+-OKHriblHaNH%U~IT0}DnsWEoSmFzC6c;zU1Kl(q_m;-_jU&d$(d;cQ50ZUZ8E{q7uLk;zEVER=H$d>C2M8-lkDy( z;^84)tG{;g@r!Dg=;c=`B$)MeZBSn5rBc@a@*FB7fbVpg10$t-C%G~KL*mywW^8x0~q07@Zy+F4tN(`nY7 zKK1iy1v>U0`v1~n!mnR#u@$i@TDil^`XMXNHPZU=i?M|TxB$LA$G9x}Q{ zaYFA-cD4f;xkMEOu-ZQy(6iH>7+W#(m1yNBv}5RJxo#M+ATFP`tbRns^ooVZ1Lvo? z)qnBTjrvH>a{JI4Ek~r!@XA-d;Mn6tiSH=3Y+qBWHKJ}$l5Xm7DdqW!WMncPBi#5YZBvUjkn)QUY})ESA9oe>ikw(`1<90?twtZbCD!rzs)oH^oeDHT)ajH-#$~M zaRX(y8agxSjNW}=f?Uq`dc@o9wzZC+tutCH$lJ+JBjoCvpEkIs+o{t`?4#v7I(9TX z=v-5)*v-s3Izz)KG_)^xIxqV%zDW96z|n9)3vK?Rq|oamj%*x`Z1eQkrCQ<Ceerq7d74{_7E>;y{(b?8G@^Kl+o zWD6Wt*Wqyw>BpIz3KxGSC%fg&hk%rUWo-Lj5t|EcQ|tZ8ECVa+vb|6i>!Yu)6RZue0~0P0 zZYUh^ZP%zNHgi6o=0Y2w_*tQbWJH!<=NHyWB?lZcgZ^yrWtAU(i3K@w@|**NJi@ODTN#CezIq>#|Gk-BnV^uOtg6In#RAe!EdP*Tv)b zNm%{nK~M(sC4$hwmJ{JWOt_FSr4G?6H^~juhs&~gt74CoS{zoTl3el|Hc7qnJ)R;m zqX5HQp{Ljl5$pUIH4&Q-Lnng%O(om44^Q~F7uioyBMv@Az7&m94!P3lCL^olr23&+ z`3dj1iZMZwJE=FE_te~p;2p^zanG>l03#-h|1BYpZzd;foqJD|9H~g>C-EZx()0+q zZ2!}zB~lp>pb03BOG-`Z`jTg!zB8s3Z%50yGBKH(IMt_=`(~4pTHPYkj>%@$l}@UM z?K=uVDU-%%x1AIbgSbU8MnY%!bIZ3$)TA{B+FF?8$S+`0~^!)x4{&(}ya^X{ptcTZS6bn4-JX*K$BJG!i4vlUUi5$P~Gv`S9K@^mt% zpic#62`cp62-6{s2$b5^Chtw?m2>w;3Mc_?(c~RivLx{wPgAr(bg>OPl@r~kgiN+l z0o6Gl2%Gu|PxF~O-NyjWA`V^CB+^E#)g$ZoA3a+;<6^{~S_UhM?RjHbJ*rf@-KPS88|7pXGhMOl4x1^(5< zA1vtl4QVZ>Z08}lkrh_XHZ81EBOf&fQo6>2gKKTw!W!1gv%|bb#IA|U8B^SsnFw9TAH4IyD!gUSP~2E{El)sjVRuf z8yF>~hVLXdwI3&JAeJ$qMG?ksf}oiX{gRn=8HHOF{)rfey&md!kghu-^e33Z0` zg-Qh;InU-CBI^uQ1hAR+v!1-Aef85Jr3YHW`fbj7*di?DyCfFD4F4PADbK4%_OQrFzY0CJ?T284=nr zg~`o;-5u=pfQ?-PK~A5M`llCO|B*BEuz=B}PwXPwHUNT~>d2R7! zcMhlWj%<^3v~r?^@=UxIsjdcI%V*96r|3O<=KK9L%7tw`DK4M%aOq2{EGD{mv^ZW> zzbXWuXg^1eG%3ZW^`$z|6NNTK{qE+H0l_nkUYi0_3yU?+xMH(>M?3!K^XZ{Ywb*C| zC&T^BhSTZHhdJBuo1bu1Z4^)KhL<@d1ydIGQED&__f|9eChLhN$IUDi_hqe1Y!U}T z$MrBra+;D9lN4AA%kdif39-@pw4B%7Sfk)66e>-0K)P|>$=uH}P-(`LwC0>G-oPgC z;xb!f^oJ6_DfEt)uZqXZXS$4e`ufz-RzaM?HB2M8$ZGgRYe zB$sP1quWMc&cLz~n*yBpFg>u;0N138FbwWvJ)%^(J7Dkh0vmf%D{j;xwNOhL`wN?l z2|0>u9n-@@xafqwvXgKh1!`J8NK2zq>rA%I46H^2$N>6c1r0t#oJ3@MsDR9qt z^Js7X%|=S5cZp!g#?|(r>V7n#HkIdX2;Tq*!4;C4n-t56$D(fYM#kh#k=6X2TG-Ct zQI6D|_)c-HrRHVG4@crAY^m*EnKrHBuWtA&&CY9C9%VZ1!pZLqsvpF*uPKv&d>?mdbJmR`C=MN#QB)8`!RjX6TG+FD|Y2eyTg z-?Cm0Z)jFxnT4fF%f1@iO%pGC-Z-p;3Ui3vJd7HQK?V)z$LpSynyUnh^Eq7|H!`1a zaKi;D6{i+JCZDsI7gEPhplE4fv$7R6dy|WIs+zhzbrU|b->5yDoPHZ6{^ee#AilVR zvvQGp^_b=h!$D;*w+G}-jmFsM@Ll)rd(KsFD`danS3hLqAi}1h&;b`Gd}JkJX{2@m zM#Vg#+asqhj5L*If^78KAzhAa?X?(E_~#C97)hj3C|M?JvSyYSRK=tXzvT=Xg75=R zM(isD`0w}Y4qMh2GgG@gUzdt|VCq%a5G`clRuj(!qs|i#JLic7$&(voWc8e>7Le8R zDTj?iM*4t~+|d1PP8>oJZw-r2a}Mft?H}vMZ+oOcMrpiOKnia)(d4FHN z*rJ5wAi9!&UMq~TglWTGk>4Eu1!!bE+kRVJd(P8kubzXSh44EH((${po7NX7d<(5a_e1z?Llyil{2qsTLh_l< z_3u33D>t#uQx8yIl>)NX#mQ9E)I?1M_W=M3QZt9>Z>-hU7{}O>^hye`8rS>tadL+4 z)QIV5NLr3in_N6c^`#Xf3l3mi2ZeTqbscw{!nm&G+Xy9UHHrj8GEodWMJAUL2>Bd> zLr_b_BbWEZ1VMDqCRPeZ`EBh=j960&xzk!DvKg7~l@#U185WqnetYsveW-djO4Lbs z+5o&>#~r3HPK;vSMHgHA2A&H4u#Zk%_8rBw`KOb3A$!KJWK!$} z@?lg5x||iqcax0YcIVN-76RTTu1N(uMkNk@M;Uir{f@HP$jixS1mGs6_YAs#S?nVk zUmp!yKhS>RNEW?=c2)0s5cbI#3XXC~oWOnVAF!1WVY z0%`P-J_8Ts#sv#vSb0ymK4NpDo_fn|-0-0)WuN7B3(8Y5yP##*;gxdD;!eEA>tey z1u{k*A(2x@8i@cDuSn8e$7qSP;N@zwM}YlPF7orAQDg4d>i z@wLYsEJ1C;h&pLHfkQDLXWTDH};bG-+w8nsp!WAo(?4O6nDBlQxFU*gWby_}2Z zgmk<pBi5`wDY8O2G4i*+l{=h1!t5m+^YpA zx@Krj5RRBN(bdx|%mM;CoLVIbBR%QO;3ix`C;A3<@0s>uq&8?lli!G;$a>ME+?mez ze(2>ypP1$sj(54{6fz^{LBf4FOFcB<4_0j(!7ccons-lU{)o%f7`fPXSE|gjuG%Z2 zpz4Jl3+8?s4^hjUV0HQgi5?l4JI(uv`nw5Z$GDZL#&1TQZpW=*t9uU#20wgaxDk4- z`m`XN*QBTb{8=V~xQ9*ffzy_(ZuMQI)|No7r40_!~_@F8%;-A9*4`V>@8k|pf9)Sy~sPy5F z%&KQu>`eUo?k&pAJcJ8UqESaZ9pv)MZ0oTIgwN;gOohHl#Uy zd46``QANaEL{>z5xVkdM$7|LyRX(+XVd=Q0pWn8pt~f>I0Sg}cWWmOW^^sCJ-z}2+ z)1LzmXE$}frNm}%iA={%jh6R6rw&@2I*`cKi8m*K#P54}Q00`{w7TWI z`PLfQZ~c57c{FS_R4&t-{xzOl!0~0&C3(3JW#`AV+7M*eqrT4LI`zQPZkewH8CR{( zm=8nqw05;s_YKNyXdPAsC>(g>5ykt7R$BXw_lK*S^S-sRc(=gAbGSQ>mbDBDGpGue zD1Y91;C_^HFNeiO3R;{YVF+V2SX$jzvO3LepL<$J;Yj+?gO!PQ+jFJ-Lh?*lpTqCn zny(qQEbUR8Jk#}Em_|l>)w8~RqZ+%f-cY6YE_ahbiWVIw1ra2^0M@%dKE?7vDw1;f zD2gyzrTb;wh4-zT&6xLfPSFc<_zjpn~#UBdt?+P9jyO^43&fYzj*xg$X z{HxlB-;0{4IzNf|K81pD??R+4_&KSDQ*a{w0sb=}{};Xt zpk4LdNvl3?-Rk}%hZ@T?mVXNWr;mXSRh10V=w>A~`cymvHie*lHW}BaPJjAL{MJ({ zj9)J7iy2pAWkeglqr3vBd8>)+z{)2?jwy2=-usRsM#Dot6J5=;wjslIAM_oi@?&@% z?Gs&fszR@CPqU~s2;W?FdX>JCfDv;7P~n=E?Bu!&9PzKYmL+CR$%H!~Uwm~4l*0Dy zZouqQBO&elX@iOT&3B+r%Qxm$gL1~SD1)sNZ$>kPUK?ejwx?T;HLX{9wMW;(RIgxa zSqnBl0&n`2WtA9&1XmPcn60}msH{zw>fgnLk+55ZOHaEP;nhyniD9@(_)B*`(`4n{ z&*!5F*L^@!ZvgjZc$vh`!LMVNv2jXYYPAsdGVxR8C1p z2Q*S?kuJ1`Tjq7VCU@21!{GP#V!|zNbIA5)_$siN^0?uFNBRfx)umJ$Ff}`rhwpd2 z4!?Gd3aBn$^emCgMaRXmupRxQEt~5Y_S#MPPIlKUs^wd1g-af|RI{y{?FqGv;Fu5b zA8*EeM#;>P6&3e(qcKIbmTp()o6%#8h&ODE5Xp5`x?XK^<6Ey@t{aD){msygTi8@2 zWVrw_1%r9DCqRIsje6Lq3xg}}1gOL(+05*1+6VD3VH?Q>d^sVrs?Wf)&m&tx2HRo< zl}&P)F=BcTIPZh3@Z^Gc#Y(<@Nk{?q?xAszCS-|6Uv<~|MACp3HU;Q*s=dCzU?m_V z9cJ018Yhy4^iIix@!AY+gZl*$DrbH&7OAEN<KV(MKQrSUg>!;vZmp2C3<@>O9{H+^szxlP_SgGY%nj!`a3{3sZ#8(gM`D>JQw8^_J+mM&ReB@; zIBdv%qJsw7M58&vDZ{=#re=G9=NSjDSkjHQOyZ0m)-AzocrW`@j!$m-Oh9s1eK6zx zr}3Lo!v&vMyv^>#KYrM32D&TC_(bzILWsSOX}&`J)|wN9V6OuekpwtFI#=%f$X*yB zD+gEh_(YNKVylgdrG$^|Ge8{c{xoJ?c4H`{YD@QlBcvtQdL;v$y{VpV^9G4_l5jvV z)JQLw@zx*}#HXY;T_yabxQ(R!J4%jI>N3GEB{@b|Y}P>bsEBY4%mOI})iPPf zI7csxNlIAJTNU5jy~%N+STXGd+P)qER4&HS=TlHm0wV|IF5B!N2A@Wbju2XwEV8qh zQ<1zeIrxGJ5hIb+y&9b}`Ayt#ejl@7KP_fo}s zU5@8f@;im@HW5!Vr8O82WmS%#{Nen1A*%u|W=&GXL@X|6ICYzElHZN46)r>=wDfh2 zs^e=i2!b()l%`m<^Ph;trK3A~RA4d5)ygJnxZMrppeS9_9qrH>_#h!5z&&+*tA4em zgI=2cF&KxlQ=x_hfkA6Mk^eyTHH$#u9d(Fx?<5(gqnuK=uV^T5VvvTbjZ;i~1h~KK zxfYL3k#lQ`;4Qtz#+(J-XC%_@SU0G(J|8&v3~YzOZiq^F5WR+@9#G?Tjq;Ajy|p#ORS1FNkmSE2V5~;8b>M z9>cof8HtSUuVHcwT=TIHfmy@xE0R z#o)ybCk|jphG=6ppU9bWU6J=v$;&*Yb# z66i#9GcSJBu^$&->JIJpYIRM5*V(mEYp%Hcp2R6G;uX~~60Yoz$DtgS)-iSUt*xHB zmV&f&OkYgsQvAdOTf3_aX%>0z^yEt22f~xKPT1d_4)u~)6m7@ydydA*Is+Z_!uN&@ z2_u}*?CR^xGq7IXkM}++vx~hCK(SAbaaY23t|BjQH9Damvdt-Lb!=mAgKH;7g&79m z1L3KyWU^m?7Mlz3nLH=(&6%94V2qreHx>8>6x_w?Goe@!8afJcYkGWqwaU8qj}r+r z=;0mQll9BZ$JPc5jabr~uCy#39R?Agu!*R~FeP zv6P~~dp8G*yDwz3+o6+PjYV{cA-S5Q)pmIy%~sQl<#wx51o5X17f%A*Yqn9(zTJ1@ zh7J*D*D=yD-}fLutxpDoUM=%al3nMrh!WQIv6eWMERuX4POO9-l|XCHd50*w9QaHN zYF3{Uh^^P~&Onwg*P~+PIzN*VV&1C2>MMbQIJxF>+YaicC!Sw z_ap5kqt080D`+pOW*%^`zgup4`K>!LkErU_Ab`zzD%hsNOlEG`^(^mI z{GN>*xX!04p-VrMWVY{Nn=*PHC@B4SB!nr#Ky?CkbRRVFE^)Qd;Kj(vm8SIW=Z7S; zJlLom(mWkkhcoWrJosP^4plOcOv$rlyQ7D>gy!}-YIqG;%e-+SxpMOhxy+f?4INJG zeSwPM*~d|vhhD~cO{e7JYdbGxJWl?<1D%%x8h5o5O=+V)Ymd508+c^EvKeU7+-uyv zg;(APw=l5`qlx>puNI#~7YwEN);9b;G6`vDR#v;sK^|$sWA{}xX1Z&Rd#@p~1-$4) z>ir)IrZP>8cz5yr&HTQSjVO7bOIk?(?@HpqZ&kfUzU0Tr(fpch%B0j=-gQSj`zQU~ z2x#W3>a-_V-j!DS=kh-~2JA!&ljveG7I*)-{7;U-)m%@JGs;ghe#d^($RiM>UI6&* zH_I2a(l^F^gym7#uYi>M%_DS#Y51z;^d^ZFTSXSkmEUmh6PRNfN%ENkF?$ogqwv#I zrJ}{>Slc6QyHP&ANcF+bye6^UxMrtvW4!YYh}8eajsS57t<&i;sXVLQb1~_6C1d0| z0r5Kbz__Uzj2OdfA~&|O!FVzGZKi7Pt=d$R%>{9|a8}jevx!1^*OXt;Im$z?2Eu#l z_9Y<~qw$R}71Nl+THwlP0v3^XhCXtkq;-_)i>|Nw_BDCl zwmu_{dWohYRQXN#7hIQyJ5j}A`rZwU$0Euq&uFez!}4Iy;!1$k@>YQQ_E>$YLa|kh zD%0o$%huxRRYF1{J=oxQ(GVACwI(C*omNR$>$QU3gI?k`T)t^5PZ(qglJoMR zu_Yb%Yv45Maa~ZtSMPUO*%@M1du4JwV;i%0pVA+d;AhX%#oH?-gbK+~5tWEvv!Z0o zAs^Zbo9v}gdYB&_8tc+(mi|Qvs=GgrdBct826d^bFik?eyR2=?u*j8br1%7P!>05?9}x@QNYfqpW2`uQ|MA)zGEdeb>3p3^y(zYZ;z6aXlbTChpn zxEA*DbKaI)iS@pHs4++zTHI@6fK`Jup&HMPRJx0{E zZsBX})LjqSrHxUP97?~V%zvIy^da$|+us|@C# z!k83me$%<mvMY)GoX(v~qV6 zQ?-BW_6OfvjMeZI#+(e%_xbGy@@P6eF^cc1ER%UPDb7NJcc*i2H8YGb6%ERpaGB=f z5Eg}6VqN!cEL8id8%A>kc<*K_P7GC4amvwb$(ParZmzyP^mVl_zF(P;L;o;H9#EXR zLD=Ik%_QFgAmPkT@HQ;-e86z#btnc}NixEfqyZpIkFnli3N<`T4imNOco#V4pe6H^ z$iemgZD=XgaksX=+5f}dTR_FtG!26U3oe5N4Kld91QM7*2ZzDkVQ`m_1R31j-CcrP zaEIU?f`y<#0wf`j{6n7idFA_e|2=!ozk7Di`JfNnzPGBYZr|HgT~&Rnk&+ws!t*`Z zpO@5CJl7>g8lP8AWUQE%lzFjGEy^BiS6yyLgSN}#EX9P_i0~1{0fXJp#;O2*N5Q%w zDr)NK<3N=gEu4;+>qAN`+xlfWyTRp`{Uz3onu?ggo|sl=3Fn#dh6OR~%2)(n5`f8U zbUbkmSQExo`Jpqwf&zeH0+F1rUP7sct>L#pJuBi*0f&OVrAD0DvQ`TvA;{+9F$8)F z&yn@N=wdZDhf-q~!eg^h%hn>Z_J)?t>%(}(Lk7GTdY?iih>EoF*qC*NF&D~buSDCF z1&`^%xoR9^NvjrGWBCYnBa=U*6Xo$#I;TUvy-iKSyBS*~=v75=w8reT3G(n(o8?y$ z{B=}Zya}=!oR`!Wa?i#+~ z+N%>xJb@9cIf2om@t0otr7wWCy=PBw3B8}1wEq0{Ga`;b?b)WW*tTZ?3@*9Td*s1;yT|D8vG-HDpH2#_SM&BM z>&|FQk`j{#9R)V$fLc>NRuU#jxZAF!0+x8W2UJMfHo;ax6|$v`r{Fk=5)e?@(Lh4t z&$o(VGrK`Gg!)s;RL#Gsb4;W^$id5LiBJWjSVqr?^z9&3mcO9Ll7`q>(CSL^nPcRH zWMpJe)h$7+P6M_pI7L@Cu_bfz&A}+=DItB0dw}NNxR&9Is~j|-eD<8iDYlJVR6N)n z18;6AkjT!Z`kb(2nC^~>JZB2jo|Xbtj^oLXs7NyRkVmdwNiTyfNjqwui=IVNTn zk1CoI>E?QQ%6SUj@Pgk+te-`RU-^Q};schkfANV(NHsgKU(Te;Itw0AG2KhU`lCh4 zIoxG+-BG#1vVNYX2S?hns8v!_p+><%88r;p2EUKtSnKgo%|PU zB!eoCx#`-jU0A+%(T&$v3LPF@kzzMc#8~6I%Ak3Za~$cO`myZ;E)88MKe3)LZhW3g zhkC!lDUm9zjH04FQ?`;WF*BpAZQlbBT`YyyWPxXOJWAN=sT4Ss#!07?s#2<1tGJ4L z#C4Y-E2HX7`^F_Dbj?NqAmcGXbmXn7t%?b{2K_FN-cCD*Q&1PC&<8DHni}EccHqF_ zp*Fd50TmCi=S%}&2t8Jw)l2^UGY*sCrqOC<@vl!~P+%XQyX`ZDqEX?b+5@9%$akZ* zqK>=xzA}4xWazh%PeQcf*+f~Ba~?Q(W6z2ipjZPnKX=k#=<=P>0A^bhF`~ThRM+a>yu5^AAvVr*%Dt_+%C%Zyr-pRlK-;=ZfUQki0mcYSERZ@xa!$iC(0x=19tRW* zulwsCyjWb@Pl*h=zr!588~YC@#=ko>{y%WcJkW>`%*YZ&2L-=2NX&`Z`_TIfQbVXq z-l43ykilEbo6*`Om{_F(1sYur!rTga}mNm%Eke$Aa2flt^Jj)+K4(L09Nv)S3iMiVbLSU2>Oje^Qbs=8F;3 zuvCVO3|x;=b%h`lUx&KO%xX@Z6f6!|fTxk-Gg{j-CAoPG0ir0fnKvsbMYEZj9<2wE z+UFKd2@AjIcsc;0lZ3fZZ=@qCUXp3B)m{-lS18BnZ!IR^lWqJ~-%<;uB&AR58!eAr zC6>7HeUd)0T(=MSYO5I38jdeLY&Gvozf!esrV*YZmE5FU4eAG`4%@1mhL7q;l;1e5 zk$i|AP7S0n+i8k$);@P`}iLACh#i%R~f_+T2$WoywZzt?IEUmB5@Eb?W z+3gKzFuKsZt~j3adUSsa545&#`8I;FyvJ~!@O|{o@-^QJ@~6>p!*aU}7yo_Y{}=rM zxC^SJCSP~G^b45&0%?-rqAo0Y!`Gc!Lfu1phi})r5T;RDBI}`DR0Jjj<%38BcbB?X z_A@cjM50T!o+!TWM6Ib0-rz}HEh)g9NpeT`e=H>32`OeOc&ZF^)`Ja3ga;F2*W7TwRIS6gZ7y2T_=o3-WVR z+PqpB%QwO$l}+7~I(U@(0=whPw(0ZRJ$`_~>wq=9ig0@R%W`46n(86EdERtr+;JH4 z^~?%Rj9^S~ZFq`1d&K(jwv%-HamrI*d%1RjdRzrZVpsc5jAP-H{_+0VW%~8^zOyO< zxgLb!Da;r;DLMO?3Uh@SMAoG#J@p9(5H7dgk`b5Qv+opVZLMAmHGF87`jqp>hsZ-! zjpT2L=;z?5ZFps28l;K4jYR?ppq&?T3IUV8Jf>2-c}GcAt~<^BMCE2fNdo zFS?v{_nOa~nBEMoaMp3nqYA#bXMX1NVy6Y$K8yh{ugQHh4;tluxz8R2(WgphrMF*_ z;VRWxEDh0s7XL>2?7JaSDqnr@Y^=+`nJsl=&{GEVF?8Xgt0f?#ab z0%~f{GWd#5g)-+u1If@MWXY;)0-*vo2IJpI)8Db>o8M0e#$i5-+4HNoL8wT;X0vgF zUesoG@g;xH?k{%JmNu>Yxhj7A+F+!XnBXY(6*L2~@2}xUGUKZirw5E1yU%TNk(uct zGH>=?CGff=zWG{Ka~c#8jmeRX%Z<)w07<3=F)H%;LH^P87Z$Zc zjXxrZ0j$r_r5Mg+JI9L)3@tGotdHQ4NpYw9ED4IFnn0{)o<_V>_GO)BwbJr*w3QV< zlN`28XS8`ZVETsr+Bhsa7&Sbw9SI1&R5bQCWaFC3@M!G}%&N$*Ub}S9h-(?-^|fkG z^%%Q(d=t*;bt;pGjR96@9PRLG3hHta%3vKlMZ2)iQK?=unP)O{Yk7o9CHcwLI&a&EY2@4`Idt{&E znh=(5>hsMnybfEwylslgC8bRbkH#TQC))bISp?91}=y1L-ha+9NQoy!rDNE>sCZFThN71&(Djx2_! z9kmlP|C7GP2-QMja~LdWk4q5P?_goto}i9p8ALo$RHFVwOJzOv=DaFU8N}2rK8~et zYfXfcNHN0JQ`{jg-U^hrFwhZG)94e3!a!u1D_(#Ew@Z#Rz^G6Jp{o-l5g3!Gjpo4w z9k`j3;#jK0by9C9VHGB7CKCh^zGgoMS5B4t-o7ZZej_uCiBI2~Vse`q)v|l%SpzUP z%(W3p0tBCsaO8f*`p8!@ohFrL?y?plTXP$^RvDoWRj+4?g>|`&NH=DW-(1rz#jn81 zM}Ib5(tN0*VDU~J5;YIO^QYV3phWSBSl|K&gAcmF}L1)#wgUH+x*$5jDmVpIARp>4Bbj64XdGFQSzo3x*&M2as%PJuAIoq0ybBIU3dFigD1!TZgeisH6E5Jxp zOPLt2mN>L5 zawDn|^zpVOtoz$pc99IY3AZ2er1P@aBYCT<^&3gh@lr** zR#vSsB$=bY3Y$d_G*0?|R>{wj4x?&2#eaGB8>#$AHsB2L;d#^=%{N3EP4$JCM-Sl+ zvZlZKg#x8z$Kp<_;Oy01N6jl6eQn8t_f!o)BjXbKyzIi(dY7z%!M5~478Ax#7y)ELI=VqDO$HenPwz-W zwxJ@k=KizmU^!?wQ=8L1GIsA0v|Wr9u*&5d71EtaG2Sg1!lws$jayk@nI2$}nK)EG z+?F4HOJK$%uS4=pC;z=3@2u=+Szhr=5|of!DhejG0l;A*<5%=g9X}z&)7}zPbCol&AyIVkSx10KuW=53%9& z`SEjAFKhPVoz+UlN`h3fL-;Ei#e>Ryv>olE8b4i1MnC7IH*Fw>u4G(N;oyZh>EAM% zK`P4VV_$QcUNcjUVM;bUy)q^)qc=>A;xO)aq+Lp~EfI6G!Z){LD7PcvNwAnj3yi^V}a0HpHq~#pLgrD<&|TQ z8QZJ*73V_Xm}UxkYa4@&9_8d8baMs3RVC~qG$R=+F6YyPOfoC$<#K{krJGoZ4xS$M z@}INHNkmg;4VlON+M|rA>c#FUJ%1_KbJ{0^>)0cgYiII~ z1Rn^no%*|$kk9U2n*bzBT~;yQ@g1_lhFR$@Ur8z7hV1K)Gxl-0is2PD@dj|XazUqB z2r-R>wVQaZ{qS^~br&ukkF4_m!i|oU5!@s}0Wn(@qBt@*$tqTaq_)ML=L1R?-c+GN z$<{+cqu#c{r!pE^skH582f1TMTEJ)O>PzV{j7@T6q1@&MhGR)fB@Pg zTdgxD8+Rb-hM9pYx>Xiibi4fe`Sp?71WCbC`?^USoL>8}o^oldu?mfie#sxI@yTVJkIZN335y zVUqGHbx<%0SP4*=k9XoRSOdNiifOCJ=T%W~GBZuf75zrCme26zcw|A5%_F^r*u&9p zsV{*G3@$WXCl=~lvqQ+(YB*pp96COvE^h65*C(Dy2sZ%rx)<5XK}sGo-?bFm;vGi> zvso!sr0|tO$1^nlHy=vVLn2%sWNt`P;)bsisK;oEaZ%542ZfWPfzA*M>W1sP_!RkZ zk|)QGZH;WeN%?Cm#{sIQgq6LnZPaMwt0)fn~EDZ^lDi_#9NQILas zl76@1^bxk(L(zudD)&ubCahV_i)%9GP?sx$viroEJwj&?a`E}vGfPF`jRl?|B(pO4 z$L+XGa0#*28vRkm4pI1X%Ntp76?ODu2la$lBdERaOJw0Zr)LaqGwuQ(=cM1=_FY38 zY}8{==`>&I1%;2;Sn{kTHekk6<;*}MIVj_*)#W?)EqK=e>IHg0+Y>}mAFM2ipC0LD z>I!~dD<#D4kdH2dfv(T!wBD)L+;yE;msa&+h2{mVmrKz&sFRq>pH7=v_FfL@cf>H2 z{EF;TP}0V32c?9s*ghMCxR^OwVVzKth9<>-PC$J-S(d@}U8uN*)c{;qNIQQQ%5~le zdHdKr^voJ;jt|_yOfT7Hd22m86Q4bwAyV=S3~7!3QR_2I8ecIRpEc~c&r6H?Jic4b z+3t%0aY@<_W%-#m(X}(*dhS@58wG*0Vky2n6$>`z*CY+GuJYbVuM4B=VgU+*CEAV_ zUD>Ra^pt8HY?Mtmly)Ds!-R$){UN- zK)VQQe=;)2(`U5oxwS45Jua*tOqIm|1_MCaKZ0$j4Svxeor-$fEZp zl`cCbo0XqvL4RZ}qA*x#IsI5w=*#--nbk_L#5c>$%C=dCW2n%?!U!{Jkh91-8z6g4hSoxL3ueS!xWO}B*k_ZM+mJ}WD|t5XSw z=uAD8!n#P_Y*Qqp%g{Iv0xnh!4PZ*DjYU&)R*R|jzICJj!F6^!-J4j@&eXqBgwYXk zalWYhm2%XSZ&FJG1nn9M;~bcT5YwsCnM?ac98b`bqUPj1Mqzp{MLbF>%1evy|Gp6s+blV5XKXYmFY}i@IOY#OoT*_* z#ODHg-D68jC)#sakQD%(TJnk2O8~ZNSiIIk+;)C0@~EAMC{xTX zs9*Fsx3+!^oh$ARV*lMgDmDF5jVWZ+=*a4wn5QuW1(=oOs@s`?cR9kuQ8eL_xKG=m z=$82#sRAJy-`vY-2t4*285mk9D>jq1QEG4SlYTdP_1PNbAK{S;ibBV5GH*O@l3H!mE|S zNsvEf?Iw#sXN78CBel8z8|gqFp;CTi4@9{$-#ib2+|YCo{d_9@Yh?CC!`q;v?MuQ@0GyFg|qIzwYy)Vi{ar zQIPY==E@8@^sX?=JnPLr?o(mTsj=7(0SszgdJ2AtNEHU2j3_rj(GdDH3{T^YwB^9l zqUg^?TOAUgwsHdy>iyldq1sY*3Rl8%Gj*)SdH>)R0MjYEQjh2S`w9K)+58Mr|DQQ6 zBJ}8i%h_c;^x4z8>gVu4P-w|FbVmbI5z%bs`3V)yP;gOY~dPeLg}| zI>WY?&&jiw3*I9iC24Aj5pBjkw?Zkfgg^U40%Q-SZ%RVe=EVpNQ$?@N7xKVZT7+|$ z)<~{O=~R9_Pf`f_?56AWBxlU$9(Si+vNC4BrINmyS4ycE4Ntjv=&^qkV~j`??6T?0vpPZsKljVye2NAIY9Oqo30dKEKl!CTdvQNvB*=7-yk$vKpJlB^KFSWK$v#iK zHMcVqQ~iQ4YFAx_3Uy zdX)y}eQ)0Uh_2`&lPPmBi|6`Xf&^e`;SztXuWkyTb+={LSfI_W&@!gYcl0F2IwrqI z*X{-wc;IMLFcepP(5KtqW7lBupWxz}f6nP9Ktl}_|8yXq<8f|l*qLT7P)p7H4Ize4 z+7R3Fkvxc4iP-8R!YFeQlIT)aWf$&72Q6NFS%c)eu~6O)$i0!d2*=w;-| zx!X?9_AxbLGf9faag?3WVPTexN_U=`O=i1?G}uf6rOM~}q?-)(<2I=Hcm#9Bj_`Jo zPNlrQ{GS)L8!99AK1Po@8b}sCmEnsr#hO;gv7yFOXq+#CtNecRKJK;EFE zY8hz-wZB}atuzk@lfDfODiKq*!q#+={@JOno0~Qg534LcFbvZ+B2tLuTnR0?)_GPg zr)>D82ZNDE`Po>Trp}wk@%@YJA70ne{M89GIVfbw7VYeicQu?0t_K3#DG?*+4`gBh0ljg7kju&4hzJX`xJw1IKsTq&|7QPWjz!534Nm7~6R%rf#TxGAH_GYfjN zzly0_idur$g`J)i!Zr~Vy0tdgSnK{ucpgy1t6a)ewi)MZT5646lS|hds%&4wq3z13Z}?Kp-#SpEal%gx74{d}d$LTdI21u<-Rz zrfoYOOkeN+Me>$zSlM*%7w+SaQL;N;&kP3m(t#LWHPUTq_|FOg35}^fXeh@88=XF- z1Gp@FHz&X|&uo|GP#34r9rH1xlQ;KNDoz!tAMa@u{CX={RG$d-&FKjWqyWlcHTNc$ zMrje~UDUto7;x2cfCxl!q71@-sI=%_0?3!@b-%<82bP()&*4~kai zh_j*Qp#Cw*;Q}QOcKO?5EDa?5N?w}zKbAV3mCDlw+P{o;xDd~S{f-{OsX^KjYpn{& z7&z&vnFQa2|15B{DG6}itFSE@9P~)r2`-Kf^~rdEou%-{av_(UL$rpbhi%dHF)*1g z1pVU|D+~@5n1ND3G9Yp5F(NH{4%U+X+t}p-dtTbCmE4FUd}l0yv0OkpS41EIOX4=e zKimcY@WIywO^qVLPls#W!LqryT3)xP3He;puhF=#mLhi&-n8l${a|~Cy1>C5spVrw6n-9(oTCm0X~g}AiRZ5{|5@EVX5`b`xxY-l`UE zo6YJUI`;n;LHT6#AfmPD==GkGq`1$s6=|x=JH`PKCRzM}4j~`nYE0;_5@3yQ!Yt=l zchwN~`#~4(0JJ0f zvx+WJto&ID9lWHU%@3A@ip4L9E=x;#9BvefCXKHicXM=$WM>)lgFKDo!8&}F%BkG# z+^;c;f;y+1iZ zN;qW8zNmB_@gJ2%PxQh%)r<1%wSLusS@LvN|bT2d7Elv7xg_#Manh)XqKOzoJulry6=^7VV{ssA&Lq+C56#P z&uD(>?g@FF@%NAqsFLNDCKLbAUL2#Di3XbF_PZ=)G?XwG`~XtRAUw8M%-F z=#SNYymA|gU}OB2^@7YMDp!<4&&$SsWN4~C(ZlC`oNN3CSBChDMU~mG;DN80y4+7D zToMeP`eAlYjYe9e+fPmGLBt)gorsioPWj%T2cHcUxjv(=xfpBW`)IUX@iXHf;`ZqK zv|FHPX9h>u&hj|7J1758tQ(M&y+sd)e_7cLSgTxwU4(tDZ}hoFh^?rD$XZ;7 zTG$VYLfby)Kw{|xb8gE0bC9Cic`1fdljvO`02$v`m%T&1qqcGS>J|D5=>kYQ)}9M0 zlcq_(%|B=@Vn=jAIr^GH0HRucTw3k(dj}6lPWnKnSzO``iGme@T$(JrUzJ9 zv?C)+cSm@~No;(}&nF(Mxr)wYwqW=-Ru1HHRAzrnyuvI)w^l@sGYEYks7h*W{e1qT zn=cNEaJ($m4Fk^CWMdVxs^c}rmri@3((Zx+OWmor=vlg2tpKKKW{w+veCogT=PuSw zQ+DV1!u=h~OXi``ZsX5ije>ql@J#PZ$MAYP9C6~6s)I`DSL0c7Yrrytp3<+*$ zCZ_6RO4ZnQibp)dj;))7+ja9d0dqp0f8nt0_Dr4;107SgH z`{H6rw)}|+oZ|$v5({OprH=6F!%zWjwhliZAnTc;Xp07S9XoZ`1)-&eV8ytpToHVm zaiXt@P&NzIGqL5XkDbVa1LZdox@j&nBf4CR*WR}2#$w=*emO33^C#Lap5_?$sgwpL z2-+=Tl*{GwDT=1BC@!i8jAns$i>DEgPbpq+?H_Wz2$DsW*M9 z0;I!`>ho_Ej;7sbY>yT*se++w)-J-$=+LlY_P3F));*zG#@hJZ=z? z=mJN*FY+kicmM-L3jTqjPPY|M>zWQL|;zH0XMqs$0k=4cw+AT^OF9m!XUV> z!JPch+;YvV8U|IfX%jew&cQI4SPUm#L=8?`5bJD@P$W{n9(7ud@hK$%Aq4-3=b@V} z@QV@Dyt*eC^{ao5wx_FxrYp_M(X|2?o@9@s?NR_izR-7y8=l)gp8fTUX+MmatB^$F z{0koo6-C7w51rOQodY5G23W1?B5RAC7G3#dq!q2eu~*$C_(Agc3XNEK_xiw7rl0TS%P`HbqPW zMCkxD_N4hFD>T%So)?qZzDtqlqhq@<&pV|EXAP@2y?%OWApow(E@)j|_%{>HIbm62 zHMD8hfi;^xtZ9O^y)pJQ<8#rC>!+jsoDKm0Sb27Xw2~k;uL_!=bI-HDztu$WuTF=p zon-B%J!gxTlq;LVH^L3wF(-dl0Jy~>%AZCaY?c>HDw^A{BptVz5FzDf%Naa=qIj-B zb%j#epT_pvC{+E4-!V5*7(g9TjPQC^Iu83)_z=A~dqgT~T`Yfb-odb;o1num&^B6p zxwR3FpZ9-f@n>cn|JAM}FeVC3!C*KPgH8_Q zwwBw__%mVYn}%mB8g`O7W_lfQxlA_Jl>s@e@oWl}HwOj<7P($|O9MZRW#6YKbbYze z2gS*pjh}lHd5CM$R*AR(d!~}7o_i##JD&^E6GoY=k!7|#o(DlEeXWhDhviD-&?^N; zNQWU+zGz(v?#@{h@wzIi#U_K_ULk1??m1TsZg&xzPkL6n6&hGk7E`2j7ty%bes64@=@|Inqz|5-; zh_34mNmVc(VNzHdLm!mRY$QXI`mg<0B6k==ilHj&FsBp5%a))8tWQ$54C7N&_RzAd z2u&U?pO)9gQqrufmc3utFlug3>X`5>5#O;=f;9W^p@m@82ot#?3R&G}%80hC2$M*2 zbr^c3SV~I}R_vUm3i^%{gZ}G-wd+S&G8l==C_eM)n@l@SOgrFLE*r{&h~jcrm9K5 zHCex_Fo5!SphHyy`vZax+IJqW4*#%Hem4A$*Y939>w&GdtnknH%@31{-h<@5w09ts zT}(r)ubO<_s~KD;lWAVoik-ZI9GLp5{Hgj4KZ0r;(>h9X58})3`_hY~wZ{0!(dFLwvRVjA0 zaK+&`%|W;D1NXS2JYx!HxrXI_W6Z>;H!}2!0De0w>Oi5;@ z142PPJ5n53x_|_4h6=XES0U+T;1p;#K};HI`d+v=6N&sX3%kia2iQBt$&15i`95t96(1#+gdtQI2ObT?HFc`#vJ~@HUy7IsfVn;_8-E8!Vg04 zJ3nH<5qy6*_XaLsnzfgc-Q7A8tT>uS@V--u%r2Mg`A8l!``e+#x=tB0z!2sPo`1oU z3pb9-;XIu%*96@QTO|xMb62vG2Zfw45u^YWe>Y@n;1k15$8PqRt%ARv98&q`K6DWJSAF^R)q z#x!igG(|Rx(tNF){ujx@_##??lu+cTg5JD!oGm*2Iej`#Bp2la$3nJQb59>UUBLs*zu5lxGS5p1GHHefimH}l+~Rhd~OPyIyZ4JU4faU1l1kusV=88Dj4d2 z_Bxn*uFK@D7UKP)mJ4Lem8S50E)>!lJ_8K%`8PEnZ?9YJ7Wn4uQV1pOi1LgrNH zv0O>awenr*Jv*__Lb#LbySdrx^_t`B?d6v~c2`|8i~yCg(u?T|JK z(W;G#EJ|!EMzNyhu;mb{L0b{F>lng+xUCRDkvql@|5rcxuH=8h)<5F+8=z~83x zEyt;>cG7tWx1#)oi;43C{Q=k7k32Ux=p#t#wN%D|)}6-g!(|52$_+Nv8EMT@uwqW^ zHpS)!&U-0sm5B_ZrJF75Tg;bJpi%v64+`lgBz=1-ANm*lqodzT@lK19=Z4x8@aM7A z=7uyN7{qxG0`&yrykmLb7Te-LM^!5Fr|=)3e^92+EBQ+^5AkOUip5)X_{f3?6)g_} zVFNcK3g86GUxfoyGg_F%@}~FO8-{^DX&n%2rAVjF0-v&rK!Uq0ZqQN0Vp{Ah7%}Nc zaE_F%1b8wsb`^iP;Gq^3o^u&M6;!MR{Rsd~yCnD+EQa{hpQNK#DV)R48dh|cmbc|? zMI^TW);)-&B7Zwn_nX$x8Rf?xVbLMSk-!uMNH1dg;aZZ9w#cj0!W7&7kc`{2kLvSU^%sP(Zty$ zTMD50qe@&?tV?vNSn~QcJD$X!Cuk+YfqglakSE2u(gwD5&>_XSK38hLKn0#h#(N#y z=%t!ai_pfR#eQk&Jw@Lm$RRj9JUsrqAW!|;L#D4m8D!v1tlgj+615Ns0ydR>5u}G> z-AA}-cvaiKB05>5;(09_H}_RKjgXK~Y@PY!JX2=WuPTEXpT)ZlIhEdfxLB!+7wskX zX}u1=cNB>ga?K+1#Py)GwE7tCoYqt7$Gy_>`z*x5Dgb&QdO~!d3n8G|@{%MW-UgEp zz$p6eAsoVenw0pDc))j}$I%z2iIkIH+%8}tR3Q-#gNo)ufq5&ZPl)4yweQKG?cZ-f zT36z>RCg4QMbpJ0YvX>S%H2uJBM+jHq&4pyYNBsoh0QKPxx{G!2ooMGJdyPk?Nar0 zR^)_1!${Xc^QMXFfhtA1qAB`CXm=Zt(G_fkBV)nxQw$6G#uLSJN4HxvoS9;4$Iul- z0SCrgYs=^0*0295rEA{S#$$?TI<{G8DU4%UZw?R^MP)TXNukOvf7V-$HV&MAAJP|Z z--@Kno!DH1Y_EXzR_5C+lx7cwe5N!03t=EB!B8rmx;oK~eoQ@h1>c^?L1@51sE^9x z(FlaKTkS7O>!^OHBCcGMJ@j0Et^(OP`$gz?{{rQRJef>4ec&*dW`O~2YBlTnhlFWe z{Tr`8E0W?av>!a(xfQEpj`8KYGAUz52E%>x!UqhTlU z1ew&QVGqdI>N0xS){s)Csnb`Kb4m?wU|AIoaYm{JJUFQW?MCYY6d3jJwGgjy8|a_z zRA4$ijid=mSOPNH&{u|$M%!_zR+qyM$A`Yn(#?_7-};K`q$+-9f0S~qc+*9igyY`W zhxl@y8<(5N=#(N^oY0pv(GkDAej^wk>7;SHc|Ro0T(;p-0&AHIy(nozUcu$gct3b( z!)Y#>eSZ=24ijF%6c@_<*gs>czZhg5u=ukWtSAaIS(p!Pb3Lab}(x z8vg(>ua>K;SMWgd9jlrO0M=~&2lIi8v}Li8hZri=Hi&g6vBUw!jQX9$CqK|a!k<&Z zxT59a;z*g-vPNZgOzZ1YMlFklxWw9+XCTxgY=Y>8`j6T4#}R$j&uR%IJBp^5#XFNABcNW_Vp)42F9m&O`H)9X)O7;-W zlr$GRfqgWN0boo^ixpGdLX#f1g(;AkRLVqcvRqlVp3H87jy{5J@^X~4xy0;}3Ol;u z)FzFmCWNZMhP9xJfq{^2NJ{^!;CsYSg!v*yqUiQ2ye1DNor>Oo;Oa1{v}l1=y1pAn z6_)aWS?+)-K@d803@ZtV@FZ)7ek5%TMhHq2a3g8YYc8)E8PrMKKa`3H*czM$E#)4`C-c4P$CLW$+SSHsVi$ zvG@eZQ9q1cE3imbrT2m;DzMYg(;{kbzUwrc`*rb=n{vS-eKS>C@p}ZPaLbYgXHE*C z#BCMgwnlGGLhc)Y2Zdj?B3GWIt`9)#n%DQ@FH)gylRVa7=jl}fVe4UBJ&3LB} z;A=5A(Z@j3a~`aJ^Gp^^^+9z9XNng1FjRYjNvlo@H=a}&9-eI*!bNIeSB%7&sXxzx z)D62sRNE3pw-Dkl;9`ta+(rlI6YRDFyG3oh8B32y);-4~2au;(TOIFd1an3}XR)BSqeuMLl z0Re&_Mn*}4gc9ahJ?!^Jm-odjfqs?ef7^^^r85)5{np`1gzE;<) z9i5Q-3s_Q`d0HKztGzkf6&SQoYctjgIBjFUEI+i3QOIwvw`ZL5T@4F=UCl@qYKybm{vD}uTEyG zF5P-ILD?zVI6gt%AX~9IsfOl?s6!Ktfj{F#s@T1YW>Dy>At(GM?PGcJS=C3_vx(0ir6v)PpIx+1MrbLn>UAX;R?%j;;BFR^Xjo{^F+nP@KKG69e#lH z4l~%AMuZY z&bCI-Ud#gRuDRQ3^uJor-GRKNVVJbnKC_VFY-?Dgdu+a2fLimLDmp zPx2W_&lgqjg&FUk(WFmXUrN?j#6Yel|85Y$%Y4N9?FUORR+YXU?3V)9Y{lWldSR?3`g`F>-$&U|2tS9X3$1W8d9_%^kmq zW}qyjlA?8Ti@7o0g76NfC^?THva&kX#gd)Xk)pBe3tZmWi%NVqK4BGhBdpR^vo#ir zYYqNqgbncx1~uXvjNeEZc=psdA2m3ip&1h(Aq6c*B4OJG7)` zuS#;V4Ityv@TngO3A^!)G?<K&(tBe5 zgI8J`F5bu>EOHAHZs2SuW@qz!`Z$O=;rLs-*gIqDhgsmciL3x(-)O%^3d44b=d_L2 zfvJ0Up6GJ6&@WUrU;FogZv7KqY5eV-REV51qZDtnSY+V1PuEAF=Tmh6W-79msls~?(+m0(m`TRsEYP>+!UHV zxjsTf&mUD-1Kon`1NT|Ns@9_OdiV-HtkGLCOb7^7c#*mC>(uoDts*sLS_9_zSaKxS zQqJ-Zaulr0RJ5)Mft6hBO=Eit!h_k}8e9w$E@>6rzXo~L$G5p&XzPY-+f-i6yt=bS zrR$`#8uM|tSs#!+FL5YHl2;Dq@Wmd3I`SnHum>DHrIDHC4P7&(Sy4_Y##+zxd zsxbWS(WUI&+GiENiK?84Eq{g96(E_+aAjf1lQoM!QH*M)8a*2pjfd|S}3Ko&kcR# zect<9_piIYZ+(A!th2&7li7RrOyttKTN4KlUgA$lmcU3?#=UY?>Qv?h6?8mQg)+s?V{t`!h|B zoL`*Gn2lqKwbwy!fPt(_4Ql7rd=qR$jW3J8XOpqsULSOS)mYDpu57U?~S2+o?ecrcA zEe{fnRgL3JP+Tz@vRo8TKOR4631Le~P8v5XSzeP4V^{V_>QJTp3KZcYPJ|ncqP0{X z)azEjEfU9Pv)yD%U)f)$WBpgBN50@;XMExLR`mB6j@C%}Z(aILe)tZ_ z@7X^>9N18j+HjpK*7bd!q^;SIB#LX!TC2?~JC<$1c9@E+I%0#*eV)nt{x=)+Qsu{A)_szAdr=n<&$@GAv= zkrE;)pbQbmtzJaAPSGH0Y}HipP87uvO%*HZm1=rAl5dCI~$H6P#75Di^9sc2<0zfTYEvkKqc?pv^^qPYB~_yM0m{@ z^PxWhxUPYESny}X;keSXdKM4=%NMamYE5U7t zGO~2Wc=*n7$5NOBbh?dz*E`sFjdXFIfB`{|CTqU!k*Mc!4cJm=~F3Kq@3+VDO;{uIFtKwxWSmaWrn& z(YOZ#^=ir!sg714wk6GKn z1w$luSA<@=Gvg3{p-XUJ({g_J-xn?#zN;RNbppVm}#>FjA0e$*3OX3*UiySa^^Gp5U*^oO)h8nz;`apM- z$MklcE?K=93{y+oR8-26x9m+YX_ek_G2&K@29S}bhyQIP)^9aC97#jt>U0srT8Oca1EsFV-c_>Lfh@OJPzW%-S0muTh7InZ@e>wy{wq;-AfUcgXAOhhA|Y#JOm+C-MlV+$Gr_$Q2B-g3P^_6P~H6)p%^(zfB- zRsD{;$H1?$(gje#-ma?WSb+eALaMu3K;rMnJ-Kih!a(&-HJ1R}2 zQG25Y{3Nl)fo6F`Avr9c1d+M1ZJd;O^7*rvb?xUg+_*vLETH>~eL~A8| z5N*NQ?3#rCF(2rsFHBWNTip&<{P^tPoAv2w^n@h!uh{b3cc$j?*QFG71O6CSbG>N0 z0>WuL`r%U%d^3{M6hf{re0T*hqgvRTt?t9i&zI7_$p4nN;Do#HIMQ=De=U`li-jpf z`wt}yEVQcH{^+Ar8OnK-Qju_1JxV>iNDOk{+UFHVvX;a0u$pVwT3XA3%Bo{)j#b84 zobP*W)S}Fxf5T7bG_*r`nu3HsSGSxSkc;G{MsbJgUG};Uc<1y&(WLJn-0kTiX(5=2 zQM5jM9g`=XRKW+KVuyd+Y5jd#fB(qeD%Y!}xB$gMrqxaM%AaD&h;+>ND^9N-zZn*< zcHK`j%VjG@PHLlnZZ7JyuC64X%sfIDAw}VMka=CCxUZk_Ytp>2h34H?zI#}0j}yZu zp*E!wk_;w~wMDVO&xMZ!T3ZZK|IPFDh%S0BGxd_2Awv3klx+T=3=0pC$(X&AOoiQOwi) znp|ajM;+rfw}*ap9($Fez`(1+^zeTs@Mg|;+K4NTTdD4^@1e7MYI^b4KNvZss)z!j zh%qBdC_m#{+3UJX72i|SDm1wyPZ(Gu2vE;NRe%+Ay2+%zw1x@Dy>)+`M8DtiFq2B+;dP2;p_zk8|lXD=nc z!DPDtgR`K9mW4lU4%U@(=Ni#Q2)7q=NXPip-^GNNO_{&V1)|y$393eZS76-+CYX<* zYk1xr(pE54d$}zT_H_JDvx3C$J#Eu1vnsO-b!h z;HoHUyJ|U)Y`v?K_0W@BdkNCCBLE%R1?}PL{t^0_JI++GoMK3>P)#kd?)H0fcX|Qa z$G3-3urNnT#|!e3@+!2ON#U)OU{8M`N4Q3qDRzi~TbSGmeNp9pR~!(XKzqjGq}Wb# z!gS7^SqDTsIE$-}?pZNwsCt1|l%|%QeQb%n1c3c#NdwOX&`^}&S)wtnS>pUQ=8@k) zvp&?Hr!5u5E8H*wmMqDCwn?aDeq+AHPJW9WjnJ8;M+|#QA)OAdJ{zw-M)#Q!v~$1U z#zz|eKXYx}54S!=@Dr^)tL{(R7q=Pc{{BPSE;(FJlGolb{hOaBb3CfU+%B>(coRMN zDRfC1utz^H`;a4gr`%tq20LjDZ4nV9e2=*iPu!C-1z4oDw}kV9i@_Z%vNWvfNTS7+ z+}~D1gcC|Qv4D$u;lwDP)+;y4jHrww9zT}Sf4Sr`mTrX(zQkXZ2umQ4TkEU+^7)o~ z|CeLO-y6hc%Q`nc)nMKHnSrIfP0YRGk$9OZUb-dgfs$wTJ2UswbInA1%F{u{7rjcs zyPIcN?jS9G!mM%2&E`$iNbIamkDll%fI*V%NH_VH zOy9LJscV&g&lIR9=ZYDKc(XRVWCVnhoqZjU_#$6FDnU7xYPk5MDMd7ylVY`8jQaVw z+=uRY6r^0QU!+2L#PS7JO-@`uvSOy4eTKR{v<3}NI!+?;HCU&Jdg$iS63VTki zDa>09CAEg$G~F`(PyKz_C)+ilGgviyX6L3(ntM@4h4zWjVp1fOAk1Hg&7x7*J?EtF zredy^i%B(XCF-%DYGE;;-OJuQ-7%-|HzSs}z{AWYwuWxa-71Z6f7ClTX#ZufF!mGG z_^q_wSd|0q?G@O>pG#Qn0!JoNZ!eUS#nLWhP;Ol|cD8^;VbZLPsf;_T9G@sSoV=s; z3Q8`Sl*;eRsW*qVW6zcMXWl(gP~UatFXZFnd#sED2nlo4)=C<29pX!-v|*073+oAz zjTOR|Hpi~Q<8w^`z?a`Xs`-Mo75N+|b+X`8)uB(+EW(lA!`n%amF~V!sSB3lFY%{T z4#{Jx`*^RFC8kB2k)|0NhX;2L`M=1>HSbeU;I0P+tB7c2wKPvNf`rftuva!A41Rd zse$2B_3=+d)h_0)3;Y#Y#lC}Qy-htAxu2>(J%18##Jjn_`JV!t{$*9m5DxIU7@n~B zN#8#W5(tc`|5Eg9V-{OWVtcau>A7{(rT_hU+vky=p2HjdsYyxbCQK!duJdnVDLrz= zO-JLJz5D#3-yV<((sb5fSMh{7Dz^oL6%~6lJ&I0oSen&3_hjLRnsqYO`i<7E=EM+N zbdZB9TUGTjS9uS*#C60spr;71gYfo|h@n&|>d#xc#O{()-|WQo@E!EG1Y-(Yf0tM6 zWKaFBO_$hemCUf$+VNK{{|VEf9WMy0NIyuGROQ5`LlzCf%SE4oh@#6B<8QEbGo8Aa zAg!Ok3NV$-g^Pnq?1Ws6)n`mhgSY>rC47fVG?qqSm`tyLGbI$|LYLU|m?XD>Kwpwi zzqM}ORs|EzkJ;dR)GwNGast{WefD}ea<9WEDfdqlwk-eh4&WbaW}Lf#(o3+ z39k*1Od*!9@BJHFaQ}uE^{0aLum8dY=mKaK{0ruoH+aMw!;Qn^@AlEaq}!507x!B) zmrMqB{-yzq;$ihlxm5grD8(i2{7Rkuaq;KBsKt^W(vU{?;%^erw*;^7$wm`nCi3W zmN#VzcTFOZA5_gZ??Dz-V2S55T+Nnxg#LApBpcU?VGXBPVn`OK7FxsabvQ{Na=484 z23b3GiOI^k1erLTJ8V&ROOL!<#`d0&_1Nzb=J*R zyMUEFv5gVaTaNl1gCZWoy=!3ROybWzJEPbtej4d2RXySbrMoht)}+~?|~;r2>w z(%jIgd|kpiop*TSGPzb&XL2xA@lw$XC6CIkuHllzNF|4SI+GuCOW|7jL{%U9YhWc~ zNA`)$x+?#ej=mfS3XplC*q~&Cr(L1~Zk1CnKThdYiRLyQm#HiW6=q(nwN;}8Rd5MH z5Zq+_g|NZ~)PtX@znvHcGaRJ4#osodiH<)0LVx;!hXHLk;s-;k+SUKV{@+>;$LM_9 zp8G8H?j37ZjRhG4?`B1eWT>Kz^z`;Z6L1pqq-&UML7`_oIs-j|b#TkoYM*Tp?2SmL z>pQ2Y2e$vQlfIJh7zdkvW#Oqq;i_B=rgRdh5Yp5KcdHuX>7UP6l^kC&?31hiaR>IN z9&L|S^C?lwu}`dIQJ3VXMEm3|d&#I<^FRDkwikY0HQx#!qg*6L3C+t8g7Wtoo=ci% z-i+V(OQ)kiuKU;}#J)Shyx_^($E!LXurPBqN9$k4| z2zO|kdW7$7Y2ShAEilxnxOdo`zn}x2rY5S?`-BIy}c>EZ>cK{L?ffI#V?${h5i}K zAAW^K`?_IWZ>S>DHV%O0|1m~Bfi%8VbU~&Q#HM?rD5kBFmIC*Ly-f6kZ6K>MZ!tmM z=@U0-HOt3LlW7$su4~lD$^1HrU4Pr>qBCtwrA0bOJtDGcu==O%=d)W5`M^wUFTCBL z*6WambsAZWFYeX@Y?xC`9FBwtes?Ra-UBKO98=Z$0c$C~6zmg3eX*6-G8CsS9td3E1=y}6FUpWb?_><$rgYw&9cuL9qlf9ZltY}$F^YK z>4xf7=@Nocn5v(xFzM?=wGJgUS8wM#U~*ECc~5033|%ciS@Owy)XJm9Ot~P#aMFT< zIkyf7YV+1voso3(s_OT{O%*Hk9mRZg0sJP_Hk_bL>EbC&!{iOOdvZI2Gnn%5e|)Jh`*HM7h+< z$heB4U%~{;*Xdzr=adB%B}|?gw(QL5Pdk@XTm~_5O>;qG<^!YCfV&+fX)(SVlw#^H z6xn?G`z-X?9mvZ&Q&52c$a;wYVL0-wBZzXTRxRb~=u_MPg3Gd3g_~4sfDcChbKn?O z(@+4cKou|9u=MfILHXoH2ve?V&J&*inS5GHU2LlP)jrFmxp;uA2p2P^JIK=pQfrZ_ zPit7Xny$E{*i;$G%&SwE7td3A;a)%o2u2*RUad%a$?Sa92wqfgs!+LF`Z_B&YKs?xbmEO9?zP+LX zYKg4n@@kqH-8r00ZW0MF3XL(xN2y0lX=M6ZMP@Y{JCNB%ubL?O)tfzSY)$c<5dvG| zl#MD(7J~1iSVy}A0Hu0-L$NS&ZJ^Eqp{wbVWFTT`>D~kj@jGYlEha;Tl_XT|YYyPo zW>B&k6TTd|A~FSyH6lqCqTWNPo<`upR5`|CoTXHCjnokV{0Pm*;J^p21>?zmuyz2u ztcR6+I#sPfJss-M;_>mGVIM7ypIG))xY8#HOQ+q8&*jQwy3CsATewFz=;)v|IXa)| z^GYQli6q(`qb^Y9Q5+sG%c%EQ_j6+f5u8IX5j=(G8QC#agG@q#l~7 zj^u(yv5W&XGisFv#aUJ>8w8JRLz!Im!@#O;snM7s)u@U3&e&E&#jfGbc7uCj+N&-k zzm6u{QKs#XMsU@on!eH0G!_|;k6L7{9+tpHK_CzJl>}3*pj$Pf8;3tsUCM>EJ?fQR zMkogq2;B8g%k0QGukj%7HarfB($Wk+OtVIhbkO#Ae30)Dxn~s???tgPeBZ0OHjMtq zfSGEDU?Z5Yf)~Zg<<>#^vV+^)dKT$`+nt=px+k@m^Kd@P4#cGury9`4g#vyup5cNj z+sf5W>S|^yBhAB@GnEakJ!j+3C@2Ige%USH`6dq)#nK6GPTYN#EuNw^Yk?8Sw@OF0 z4Ja|Y=;}x#9~;&glZ|I1xGUin{G7dF)D!z~dUfPJI8VT<7@Cc;CuxN1nW)gS!B!f* z!O{-H4-%IqszS5%j3l-amce)C$WrE#ut~gCRTIUBxxMLTAC)~aGK3EC62cxvc8@&? zR8daJJ-gZ{(7{MuK{)Fyn4&lgfxX&`3Z(ie(3l82d;NGXM7(p29gFferx8c2{`F)B z_YOF~pnFww+31&tPM2|6K}>L}Uv!Ryt4(o_zzz81&sR@!J*8maqt69QOmXO1zPRUc0<6g>jBmLA-Wp?R9@p?UJ>Z!={cNB|qIZ zEd7R1dBzuS=g-#~9cU9hwSsS_*6&c0IKiKPA53qa(R?IW`1Cl*1(_1BBi|iMGT*=8 zDi}V?m_u?5W^#o;bt0nkIa%``c3^=

F1#i1AmU@L`9v)2Yd4cD7wj{2MC?jO->f ztShW4;73k33~Yk0nHNeGGl%@Cs4_k-=8k?D7)`fvFm&TyYYj!;_+BCBhDy@j#n71X z7ycVf3}&*U6J=@s_h~fYqscUU|4&~a?ByRO8S)cnnX0tvwpOJCP=Vf@Zyy(l3lv6=`x9A~ztKIh17*j{=br?ns``Uurw zt;ojjA5=j&?_%c->8{{RzBw{ZDmaUOF>H*Uw{b+;;EL!{5PQDG^4q!gVSKDO*)Gv^ zKKpgYCkh(a-VF}2ly4DfMz$9O9!*YV(IJ&-4X{wl=Y$i2`H(Gpxl9g2XW~NqPziq_ zQkqB~I?Fk3A=vebgZ+rH667Aetpah+7U>y^^{qaO3X%ZmS3dhurmt!JxrPFDIwKaLu`7| z4_(8#Rsk*u2TqOv6IYR%Z+<#0-!+hKOm4Zd!)K81{$0CC z0u^sag|EV|EEmwKAirUEI*e!^Nxcifs}a)eKu48z{LRi^$N%HKbQ{Twk>Sn1_(F;J z$J-~`)o3^H!-s!MHvh+e2HnRp5kzQnvM3vybRK=Wl=aeG5pak4gsYB6_W%Fze?JBp z1D4BrViq5mA#c&{x2p?y&KADfM)6{mDO}zR1cxT(jO1~1CS$IkRrTdp2r;BX%d>l8 z=_Uo_G%Sg(3-BRNRtI*oKkg8r!-F5yXBjRhj_NcOv#m-YF;VY}AXLAIyRttE<3J!2PO{^h6*5MIDt9D@ z9B5e*FL=2byHdjGg~IM`kDO8nA?usF`aynU?!D^6w$m}C2;<_6>9ATus2<$JniG%D zDWWdNPu!Y(`r2h{jkfd>ZYL9wYzlQwx>Sn>094+g6D5ZYcSW4c_DCzyY%hYcv$S<% zu`^2{TTe0qLTOaP!(@c||5|TEnV{ES@>L?+dxPlQ7{I1}y9Cyg0dj|!VZqW?7dmnJ9Zr>-8jKyp_4{ng zvUo~+ZGhcAbQXUwiBho=K7)M1o8Dk(K*$SEsYAc z?u+_9Grh-YpWG{A&tJhnTLu5)4*jz!b>{a2HX>Q^AVHN_hZVpi65+Be#9Ifmnw|;r=3?6ywGO!xvhv1dtQ&T- z58J2)RVcf%n|*io%MfPL%)Be;>H0>t;**XDkP4M$Xc>=Zd1K%q@j?s%nZcSOMe8HWvKphTA*HvWm+6aK9yX_x zWs$$Kv^9BU@K9hk{;Aekvc5$JxnhdE6(yix3aHlN4LZqyX(DK9B|*l)l5#3;mNta} zIg%3EY`v968kMTUlz_438-u!Y_TYLXNd#tUE zmm*`n_Bdo7=r{ex9RjKeUS?+F?Yf@?SS^FLXPXGQT#zckTm0ybdDXPnpmqXE*?E-< z2}QOiLv62M8Ra^!rErZuX=)`Y*~-2wUd(`Tv2!D6Npr zB2#?=Cq(o8fEtP9ykarIqLpQxk3o?Qx^43_xk)M>(KgW+0>!QeH_TlhwPD&vlz^hV z06z!Ah6}kxKEcLmp{%HptcK@fg_sqchLN(>*VLT6 zw2YKD0eWY=7&FJz!3T2n7CZF;SS{$BOv_Rs=+MAh$9cC4&8*~M!PPm_qABF8?v5;G z4?wmbagRIp^hNYjtB_ ze9d%inNMI|k5oD|kX9>9x`M7uinw!=l4d)kLS)O=6I2whOJXub?rPI#qw^+Qu(WJ8 zkiPHzkakuj86^@SRBdZ0_DJhzEV-@)H|Eo%p(1$XIn=QCb$i^(xU6c|H1|r8WsFU^qu?V83z+103O{7>dVR`71LeRjX2mp+ieEIdSHK~+&ced7nZ{5d zPtd;LKCE@!0_cV}Khn5&m*uJXu#z@rI@m+Wb8`5Qn!Ketn10pF-#v;C7GPY54E5mo zx^l=W;)_|oF0ShzO1e5lc`}>~IW%B}w=M`0=92yrTz*FDeR{*sDPhjp zjm1P2ZX!)s%TlKG!{G20;CaC}_EQcVx6HCgg~^%l`lpolb%b;*FKu-)jqB|xjybb8 z!Q7yF@HQ6-oskf{mkE|@%1nSG@A^nBa$T*i54Rjq?i@yHI~Ar1I6L6vfR4?y>EQDl z@lMI^sdxE|Rs@X{ecv(iM|rzY7Oii(FHQnppPB=rbF$N(;f*;K0Byd~ca+2)(iN`K@$7 zr*)9s9p#Mo*gwQ@2X z!yk#{!>v|12w@)8@lG5|a%2nelz7DZ#Ye096h!w|6}F9)hbBw@tTEe6 zxhMLvavg+M(=*>7=@1$H`otpK&p;HTSz=#(GOtG9LylUCB2CMjOv5l%wajSbn}~X0 zwJUi!vdlErCUj`@QJE~Aj6$Lo9N8}#fC;z&SIY@;`B-?5pINFw;uAO56(Z5wsiZ#LRKwf*^B^2k{s|;*eMr;5f0&N~)LvoszU5d1cLhiAEc_GU^ zkCah`d~y2cDCP)0?*`VSDth4`@}d*QfJ55;1pH#|GF6jJKTT7x`&cWc67$A$$f1eI z9oEt)6-P}c^GxS*`dapJ0p+bKTGqlE8pwbvQgyQU$dHU0KS<8PPRdLDpr+a2dWQx!b|M z$CfIFwSq%_$zees(HmP)TQ-4c{#b|d`0Sne7c4p%oNJs-^0!M|pIV;5r;*1B> zFTXyVqzj}DqtP_3;r1b_?NCRBl)qUHuStOL(1%MUs&0C4kwU33w+M>gJSnX=NclFG zWBcHxVwT&+3t2;NEMu*h6fD^FtXcZQj38WhJ|}lvQ-J^-+3^~l6Gvq##iE|D!5o8r z3m9$<1+v4SLh?)Hq>4mIcQx{^!1x7thWgnf?!GR4TUCQu`{$SxT$davvHw@KJ7 zNTMstV7r@{uVJpwq>I7DCIj#D@wfPxjN~aP5hw9>*Vh^C6&GS@KOzZB78l@Sxht55 z`(*O8-y0yjlXg3->6-*<`3>ypk~@u0J11cx08(GOF?d{IDc%q9RG}S~3OhRcQYJXQ z{Vbl=Bp;PF+ebl8H?E^c+m0)`@qsJ*s|`&RYZRqkY|B)7p?kt4d)o0B8P|k|?5|YL zIU1^+kL|+VJsPc`=M#H9Bj2p-lGRlyl%=PGxrAXQyb;u?%w0KermCt@9G^r$)(bXUNP_DVM& zH@V4bZxy|gg*)bF&C(_I<*v4#T=Tz-RrqYC%$aw(vB8!paPv_e&06>$e*2C_Gm%v???HVX32CNf$8mNY#F@ zw8CFSp79Y?jtG-^K3WwvHL?;Qy;$(+3R{3V zn6n!!BbIZgfUC0;Po{`)gbn8$yx6mv040}D^TN+p8gD%L6a--g>>=U|`0YKE#oMJ6 zUYsJ>uP`nv-{Kx0jYCUSIdYBGUx9i_ofm<`weKC*P}h7?bFu3NB&R#cFTapqxL#yf zIQH*PlwVD(FaRXlWpAxuunte-vl?C%9uUf`8**kj>Nmtm1oE#n6iF&ZZ^ zXMX)AES?Jf1u2~QY!M+2lRd)y?;mto9?wa7q>4r#O7S2bst;JD%D_1J-0fbJZ_Q|p zk(jZ6C)}jfXE(@ZpoYdYw`%*QS3#?P|dDNlQV3S(Kbe;sLA5b42HGdJf!1ns}B zs`Yp|>1Wtt$4vTgmulqRiUL%?aimx;J)7_w(^rs$q?IFQ5}jk3v|AW0f+EhW(6zKy zR{N>>fd1UY+Y0?w>MYx_)Y0-)r<*VdD9o%pM(M!#Q^=ZPJ|{k*lyjMI!<*A7Ql|%2 z4#1gGA)$qup!+`^1;W6-j`k$lve+u3-RdvJsEq4X%{mQ@q+QUA zz|B{3cH@VQU@jYdbhs8Sa22k>6@2mlE~i+ikbsEL6SOUQgVWu0kXZKMvrEAkwRY(6 zo;?ivxoN~>zpI~_@DJCh)P?e>{A5e%PEjqd`?`kw@-OJbj5(MC6mtcM$>g=e+qb% zgG|cqlb|?0ClNI0=}*!1NHWuziFQ*ubV0$3%IXj#D6*7Nj%v5UVT~5}hd=J%J>EHP zT0V#i3{Tors;?Hzj&WoA=-5$VB+0;N&Z1`_+r)scewQ@1v9Kt6_*cU1`sV}(6|3b& zl>)ppL$U7@sj=~Je9vzCwR=l~`x+j{;+Bq%TO-)M>A^G4HBxP|ub`m%^2SZBCm+@3 zH+G`xT`n7S1jE-)PYB;r7O@DKT}4N)+Qd8^vC*EQ2y~|sbYO+S1j72VRyAajyzQs* z2w3pwv>cUmy4hJ6vf}8P5^iMO;pqWM92@I&AEmTnud?#?La0qDJG>jmrvjXe;3H<+ zMf&1w+*6zWv#p7Gn<$wyI0e7Di|!;ww+`hb*$r6ZB^79X48r)+ff7@!@;Ptw>mMXv zwjD1gov|%4bs;r-*4^qg_Jy<*@_Ue)YxSvSIdKPc;MhOzm@R#{Q(pBx(My>t#)x(F zo}AKcrNx(Y{9_qVWlo#fdY!7s!yH{9 zR7i5@CGRe;2oX@wk%1Cq`F6AkxF`O3ren3jw;f~NORE2s9O+|O_*gtU%m_?dgT=J4 zXw;T=jc_xoWZBd+H$*L`+Y&{ufQGosn;{4Fpme{Ds1#v=LPV;rVWEE%fTh+O+-xRz zR_7y{tge+_9l1-Dj!CTr$$`q&V`Z#o6H(w~=;5?u4f=wI;;xl_s+rjqasv}&bmu8J z;*CM)^!Q{KRkOe3Zv#{_-zr(}i+mx`u(bRsO$#+(6UXJjj5ZpbGa{={Mg}^4+O0FDcO+srUxLD}1~#ad=) z)zvui%J#D!(X{L8m&-3s!}SJl!oqsg{3q}|&&z}4BC%e6PQadn9mI%{-faW;_+9>U zUfZPqE>%W?)_BXkuz9IW(`&pv8HCd7{uQ=_be_-d??G~!nIEWRvEsjfjP+IL<0|qK zOn0M;x=Hy=O$LxkNN2E+N53@D5{BE7rf$WAu~+oKozGk8hk?J!a;+6p`UdC{LojpVh8kb`MpVsrWbCdo~GUbzJBei6KC5605@D zbaS&Bd<>Y-mnzN>AW>30N6E)JgeTeZ1CEM(TouCiTk{*0es>2rY4bv}AY>*Uj``y$zWqsis z*-I6a3ozd1GrOWCXyTh6m>PK@JScoa?PCQApz!KghCxyb~U`HJqEdyu$ zY)*qQRG^cgzF-4DSy;cJ$x+?%APf^F;mH;AbuCxj$o}m@-LHI&Cj72C9Fr>8O!i^vl_M{Y${^8x1FSjbmeKoL04-s$Xb8<{CGf$Lz4{x!* zh2q!VxR1|qKgqfB?eO27=2+^Wo4Xc4eSNBW@GgC(3kz`jfA(Kh@tVIMCf4Bzgj>edO*zy{Vz~-86AYhLi~$y~ z`tPQgl!1oqU)HkSY_hHykjl{fKS$)x-l$W@sreC$<1hSI3w%i0)ISI4&)&(Yf8s5I z#zPnXT_}GA<*EAL#*X+*yRz<|K}RO}-asIm|BKa=I%Fgd!;Lir+z17k< z4;+~}I{s_qc!0Hzy-~j=51t89Tyq-16N;Oz0qdkHm*g(cmgfX8exa{SY@UCC;D@>B zRUK#tPX4>4@M}W3|L)3n{^{qpR`-(iKQwspppG%|R?*Nv9J}Ew7JtDWx0%T4{wYh2 z*?LlRki!U~W9CnlpXV;soKg5t|50&16$`^$5IU)|pBOMypz!-XWn*^;N?%YpoImAY zo_0mJ=_l;r-Sbufi3dz23ZnC7io)EK6C#jE4|0u#Nw3ZK7s^d2=qE)vC|fCej7pd} zm&?F{4eQwetEZZYJ_iueNa|`M8r=xAB6FoGC|cIjO%s0yN&*#fMgqHdace;8etYvk z7STdEg)wV?fufO?f$oMJn_mvmyp*h7`k-pH#L6fldFK3VjWlK zA9or`w~ndP=*2%hrA*pqrpZmRwAsMn+u~!f;|O?@xaLy6%I&!3YnTl$&#M{7lRtV{ zmf(2)MVgy)<%Z#vG~-)AhdfwK2jw4k`sM83b_(X{2!(16Rx`%Yn?q~3gLt3;x)5De z+QiNVB*$LWvL0=XcKJS2_5-`g9p&UVz9Z=z1^R9u+l@7U$2g(2N$=pM`skuV&)_{@ ziBQ!lH0?IYc*ZYtG)7Qaxsz~xyKj6smS)Nwy}R1O;PTAO*W7s8v}xJemm1|C+)Si_+;{ILA1HAepV%o&@C{ghJ?6z`p`g7uv?K_{#hRpB;^J`utZAhTpvX|5%!dw*a( z$KT0*2KeG%IyHa*=^STyQ~l0GAp7gl?}QAO4@fmWygbg3Y}4;)D77#b&MP^n#EVEb&H((AbMu+wztQ0NQ4e&(UFb5GGf)9=}hCdMi5L- zt>0c&ihHhhd#*O&j7)?}algynxXF8p2;5#$p9z1Lj6AXpveWwfS7AT)ZxB29+f1Ax zG?JK4tWx%>{$SQU=FiIHcNR%GsWzXx9qW7*;S03(LpRIzjs+jnmbH?wF?+*Iu|~=d zr-5B68+KVoZ)$5I9*-IG0@@#gnNZjSWE22EpHO`$1E zwu;B#Ia$ZpV~UQQ1yWdT3VXkHO%2?I1(UE#Nt|%qs}m8nGL;gCY|&@ykILCFVWh?G zvA91qF^X?dNoxq|rOC`_;i0v&)4{aKqs`1z7>rh%n!Mr4%=D^aIkk-lx3eQu4a>|_ zr6y`w@tqo5=@gDM@mw8px2#ot=(x-s&01{9*#U5I4-(+tXhYYwPMgtwlG*r%F^?=| zu>Y9NI3pC~j6Q$6n2Fli89Au`UeUVWwSYO1bXMv!%(pfIMQw>Ct)Fd|I5`&mSUQ(h z@%GMjUb1oM8PJByEMV~xE zIa{BYxd`ffI5u^is1i6w>j>(sw)}a+e<~#%;(^lhZ3`(j(A&QiIE|ZxW*mh09UML! z-g)99Z1iaCozxaXoW#rOZ@ELfH58=vWKynb8HGWCsTc8%e2u78Z+<=3vP!UGwf4KLiPu>w&!~CR9RQL z>~Zyz1W#>#2C$ppK2l*)p<-c_`ai8XTlc;Y21`=mKkqZ((%3d^;(b5)CT4TdH-MF? zX;{PA-uEy&vvkYhHaQTZ`SbV0RGW@ua$mH)4Vb2S_3`#@9@t+Akhz4Fi;Sm)n55bA zQYsJvA3eJQVWdn!t+GkQ6sOLhV$`p#nIdaQ_6 zc$%WoP!x}mdR|Ec1xnT0&r70AJkVO!kg(Z0F9*p|C+t{R{B+7=C-O6eb7Yach{=z) zwglor-6MhGb;LG9`fjGxd)bKJ*%ti#z6D~Yn{fuPY|om|eM>z3y78PPV_-}J#?VEz zwchnYuEE!{=776ugPuNCv&b)>YT%`_Y3ta_$c#pP#CFPohJFmqvhv7kRtxK_w>^nB zsB@+rRVUB?gaVYQ8}~5IMB4LhABVw2Ss|Hi@qsP@o}}yv9GSU$)GNR`vj<5-8BTmU zzj$iMt(s0Z+F=P;+DFIus-IA{v=3?HKSBpYg-3o&tHW=2>;Tmo<+)^h zapoJ9OmL6+V+IJJsQnJ6!gj5S27UZIZ7f280yJ7}#*D(Ju&Jwe>Z z!lB?f=EGJ$Ef$ASQNctNtzFSj{V@K$N>*cfoVmTnIs1m}*g^1u8pqI1ngFbhL~Wek z=&o=KRv7sjkl{&exS(xmq~%LIHwc*~yFje7>ygYNI;pBgMKo%$;5$)J^$*_~8#k9& zReLul`~St>dxk~PH2=a2A`+Io1PMx(C^?90AWF_zB?=M*Ng^OBVaXXJgGdmGk`WXY zSaMpjh$vCS42xOR)3XbR@qW(pzt8#bzGtr4*`A)B`gK)TcUM>U%y>EOdph%qj;7Sa zq~&9LhJoVa6K{N;NY)coxim4V#V5NzG!|GW5Rmo4kK`u&mG{SPtG2>EG{^QOOUcylCgBqW{vD(38c_KN3l1+v;?xVyTn$w3IUQg%OE&bcio%dEfd3 z_j`QhBIon#E|-fs5A0YC4cf);zNaXKhS!1?Db4pt{p2%=?3d1OAq85yrqCw5$}!mG zCC&y9lXueaW6mqvNg1k`8^3fyj={PIidKZKbMw^@dc^8M(YWjOyMi}v1J|o`W#-P6PW9yW0$d0b_8{`%G z8VQ3s0SG>m3VlKin<>r0a29fEra3CLpF-T`U9GFFCrg^G5RbyR|e?wTk>BeesA&yXYW)1uWfJ>b{?f_ z4)T8~XkK(U$*`ng)jB6@^g>aY6GgGjnpZ4OwZh@C-h)UncF!PHA0abGm_W?jxfI13 zDXA<`co^ILVlcGPK<3~rK9knbOhL=$gElN)A&d+58EF}Yrp=|;+1Nr?Wq8zJJ+lJw zB{^=fCp|pY6CveK=O3)jzVuW{e=P7I;q)4UH|AWvT1WefmUs0Nm11S24ysA6vwLmF z#v0w5bYe(tZt$eWR<)NbztdDcNx9LTm@NeK*f+A|-u$=sgGt@-RJ%F^;uuBCs=y^U|x!s0+$HyS=TRroTY4$)Y0^xyg6YpSVkC z87R`6IwO}Se8=GoH;*24qOr{LfhuPx;sO!=@`*~fU77i`Yoxe{fOl{5@HgSonG4}> zpj{)rMnQi+w#$v@13oE1jhAl{hFh$D;YF@UTk6c=cXa|)NwQ(7@$tpBEG;LX+reE; zXQuEWh&(|l+p&$aZI;SJL(7}6AFsP^hTqs`;htvOS&EFI`clr-1^M9<#NXO+=IxIx znRU%L#?h{3+Lc*qB)H4QbBJvCMpfdzu(Fq&LeU>RJN6WFuL=D;{@m`_493ue53dPV z-`X}LQbkXH`_USBAXJzuB&3o0aqaMO#H^s1*cflu?XN`224hve(Vn`CX(65&J$HNY zB3D;vP}6AlxNOfhnl$04cpP1`&~=+ATlt=vfF|ox(>+~R$V{g)+wcBnP^!&zaEhCL zA1!y819H&L(<18X`Mu&`pHy&x&Ut18^TGV8KbCY>Y%0> zMqi{NAzE8H`bY>QuH?_&hNCdQS18g7M`}nxKF|xH8>G}5c?iKCQOQe-J@5WOd`@Mo zphRny=~w_BMqlX!KF}M*nZALoJf|9!)AE~|+}dGMIiH3~d9=pBvoOmO!RkpsArz7x z_kd(%`FG;N8)1Wm6I1dj3|BEtRv8 z`+A#0rDp|AuZeTm+c}?xMP?qQw_F;hI7InNi>S8b|8*rS>|I-t9aYu%Y?so#R*7fa z#i&G2`pS}<6$JN$LNvL(o3@9FIiC?LRB9?IB1J@LSnD^{9iyHZgO?2>X(pHVpPELY zrEd~a>)T)?DaowA&`NP>yUuOtbFWc)Wwtp;8kUHD7>_p^M=WA2@xk&a};6-rRuZC0) z#e8{X@7QuYiZf_rGJ8o^L3bLC;t&}EpA@4#oD@I6q(Gf?tqlxo)K8+q6=*9ILW;Ul z!%>d@fe9_VZ+B70W<1-u&EOTQfc#hmfrKdH1x+hRdtJ>RdOMbf8V7Xh{GTP)3l!6JmBKkXGjmf>f(A&E^+9vgD zp-p(wj%BOu@w#=)jKHhhLsQkt?4fb{L*&-S|%OKXKYl@O|( zJ)Ao*T70C{$;Ck_X6VF?hMS{N9O7g<%KX_Fr-&@L)S^_$bb3^W$dsI@cJ zmb!U0ckFPXp5wJ_gUp5$8lxwft8e1|&?hZl_wlLnRds8(d1J9?RmvEL^91+^$0&u$ zbdW-Q5z*m%P&2k7OQgT!T`n-&4h7$Mf}UKAk?1kH!8!ebPmB4z zN>Y1)@>L-VxN=59R%asPIhVqFai)@Maz9%bkN4z$gVh!IGW^bEC_2U4w#?$Esy)oL zYme}YbuU|Rs&QSQs--141(WLvR`XHSEfcC8G*(N5+rtm;se1C4x-iQ=c9W|yd8TND zC9PMd=lc2fWp!E;5jLyev?-UiG{MCUGZC(&7G0F~la=+Q%A6lP#S~@>%=Yu->ocX3> zpr&wKU;?$$fznvaf4EOel~8&{lEs}p>Ex)jeg4Y%fS_5Ba#ci9HXg-6r;sP;D=y)c zLn!!Q$nv}6ukFd3YgepI1`)7f-j7?sG);M+aj5aA=BHhiZSLcl~?4LxNo#{*|r<7P@c?* zG~rtU0cL{Rl7#&BdA+AN56ihuw#igX8 zTy0ga$TQB#g3s8+hbP-iiKXGMISEQ$9}C}aWFyN%QrAbIYpMQ~^)zk_@{5b=Xg?Wa zLn^Bhe-)|ervWvJ1BKq!?`>YTlBd$)W>DOfI9ylYS2@`e#hB{qmb__Lqv;2c++oHF zj1;FYon|cGm$q2wX%iozdomd*hZ`IwXSx;Gno;@E*G5R(ZoIKnA|=EsTj!}{AG&_kt;X58U+XKyf5w7TBx9%;JNAMVfQqW)jT}y z)7$}##P_0kEe2^5T`dK2r#@=s7Ylnd(nq)m<$Z!@P(*~QrsRnz^@r{=G6<8cJS;28 zdWOyLI_*ouCCqPNUUweYcr`nLT6JfokOr$Wh}*>h9G65S1BnGE}7 z+R9TGKJ^v)X>9>w3Na31@3{5UEk#9DE55(nR=Q3teKD8RgboqJNO~;h8T*QF$5G~V3GDr9W1!C zqvFJJTtbwLOQ!WN<-K+FkB~x*GlYyA_!?6f4El7GNEMq2nH_mQ8!+mSHe!{nWZE;+ zG~CZJ$tCg3sJ;bWkaODHd8?lSGb>a<~Fr33hHMz27M%Livn3}oI z8_CIcX)8aA{1T?O(DGo=%dy)Fb@fhono8K~1Wb&uoL=3iez$^Pj1R}-Smw6flSX0T ziy7z7>4{W7{vs0v7JVJ~c{WGS!c3|1qOGPFpHh9hjBwx^ot#}Q5m4_I)0%3{ZLQj~ z>I2-bEX5^-vTF97p056D=n$5$)3^qqS^{#@9W}8IOG- z3M@M&l5bYqq!scyAV69}>p z(AwcKS+m4%)2iEo%h)^ns2r#zpUv_m=WM4`*Wy;1;vS;TCMAEDY_ z6zgMiny9x&+=VGz!SsrMZ@VIG76IeJV26z&Sfr|YUS#Ur;K#ZRM6~~LfD;t^NZt(Autg5 z1+8_Od7PS*6QxQSbv9JN+4yCkm_=O*`CY#Yt!9Fp(UvnMhLt5BPaK#b?e&8X_c2=} zyWJeHkVEa0u!_(rOe$PejVEXsKfe^F)rx!1 zA5tRc<V707@d>0Cdms-({54Z_u5-clM8Po9f}La*N*5X=9444F z69<^`K5c8=N2WgMIM%1t`ta+ZiBOhKMaY(UdhW`$1un~XtlRem6WH~iwDs{g2K7Rr zp9&KjnW}0zBP7_4_XvycHTLLC*=yT+T=-h4jWXD#rZ;DElIRBP}DK&R*MKQ2xE|+$0HMje7Swi-<=JSw|_^V`(Em2&JO>lPu zvv3L~?$xJ_TI9ALYfZ@K^uBe4zO1_9YUV-^`OO+kuETBBB;(OLL~1-_mOuIYX0yJ* zaIcn0;Hi>jC3iQereAuSdFsW@Zb8AJjKr(%R=jlSyE9JozPqWfrDND( z!gr}NJghAkEpxj@FS(>$ zZpnV4PRin_A?81KqN#g1Gj)ndlxC`UyCsi@crtxNFAddM(*9N4-4J5_$9w<Dx39HQaxK0cW`Zb}1cBS2rPGREUd1&LR zvHs<#yk~W;_$!att?*9QB~yNgDNl zBKP{cSk(xF3-i6%-tJOD4~Gk4cRs8PDPvNnm5h6$B2z~RmZ&W7dC|YxV=J3TVnmoq z&zO|PqHf+3S)qqI6R}_PIv@l7MpB)3-dXcC<-i?g#YGroAUKRq@#o^5xW3s~qmdcH?sY z0X#&TgvNSkV3lWo?|Qgv$0*sMs&zthl@{y3tO8eapwFVN*?9jv+*Q_R9aS%QEm@x~ zxT}5{k^P3P{ZxmFYVBJtN5d+iJU4r2&?ubmpo$1jRzSVBXlGn`PEqHUF4vyyZD>$^awVb;Y#m-QVk^{uHs89b&s#(pzJ_!)Z{ecaLxBkV0%@f&hnak74|T)}@-q zE!0*FWeYf;^T(l>mx@v9?|ffsrW6e>=TXd?pe4jtS}!%`@&u{Rrl+Ut%@*3*i7GMs znA_WX4k+1SCCtHYAVWB)5x;xNTZK0>1k(6yy^*X-+|oBUu8x)xbg0YtWg1Zw2dAbVl`3_j$!N`!F44n2_eTiGT=*yMZRz!{bF#S7oEkYuA?{I} zM9g*UvC{tdjMu42Z){JKq@sp6q_YC3ZVg@$H1*!yl_v0%oWxH1x!!KOoH3Dz+?&Oz zA~NSV$PO0?d;o8jG(WPs-ubOQ$huM4!&l+&V(ZLeI*GRCnKp}m?&4MoPCCY=Z-niu zizx%wXw!U%Cl<%0tH7z`fk_8NUE+66?IbRS5o(#jl?zM8>MNH>A2-}3w#TPAk?kbj z8Ib$9nDj0yM?8-+_aTL*-Pqo<3g$6~1y5iX&DaWV>tr%MiwJ%Aubp4}UPLcX`Gh!iwmrf}ssFBkxen=EZ zjw1D{-P1j${(QJiiSzcm`&YpG@zocUNNpWr`)TzJv#+V=3+}z!ZCed2b0BFzM6 zAQyxwG(7FN0S9o+LV`DP0y0>27|J3puQfV)HjnZ*uIcwjZ3IEG8V!{#dG78sKk%c2 zdX^_eE#0>MBjsf)rAC=qPI#)zorpk}>sn@$Z**gaj0NtWFTfcxd%5jrLg8NMypUJn zjZ3v_b>QV4@BEHG(;`9dp}w~eN+U9sdv<+1{&O?54QJJLzTL5FTc>%(0eC<)`QU(3 znd620rCdStW47CqPtS7?j`J5)ntQ$4$@&Q{87h0wYNEWNS7y}yQ}nc6<}+3fo>%gV zzNan(WT-8EHzgDswSA1~ zt>wBR;a1KjyKp^1`O-EuE%i=DnwGXyW3_W(^4Y=yPXw9ImaU!pzH*0| z^)6g(l$gk2Z+?-{1yzIoXS)I^y$CN532|yNGOIm0!YD?@Mx!>2^)M2+rz4h;B*Jy5 zs5ZTHjBW0~F&&;i#2x+?p-X9?Uk)!X)l%|$oIp!UCoOQm|J3){ zJfCEawg~fwE+=X-UlUdpJ&;utmVW={Gh;xpi8QCKp>Q{6=3Cx;`|biSW#s|zK&ufV zNr>+vNxQJ4!u9c64qR^=a!fZD)2=Pyeb~s*L`D53lIfnr1!Zo_>kgNr*IMH=Oh&Ki zXAg|-qFV_=FiE6JK>onTS`U2VCvMV~qJWdqm6(IX$w(p*d9vh8xI(&#y1}y8-CO&q zqpO`3KEC=}lNoB=Je+)4lQu$)L5 z+s1v8fkyDE)G7Tjo(bi|7B>HCL6WhxK79qQd~JgC&hnEC-nX}9SkxN#yf3i!SC#TlrNc>Ai|ZY&;B)*o-3xQC3WMS&Tvv*#${uH{8Bpy4!=)#NEsp*8*aW z>lfUR-Y_kEx~k#4S~Z4+Y=_%- z@(V0pTV2mnzx*nT#`?5cKz0{z`5rBS#bb-s zbN%!qUDd7}C$fF_h}?-@tKB(r!VQupJcmI&g99HsogjougvPel$R^u zC>)N+9S+Ijh;+Qx+lrp!pK@bM+L8DQ9&|g~xmz|e%)l;gR=OV`fO2yon^UkP~X ztH(=@Z7WPGcP#IQ)2VRlx&65Idee5er%M@2FLHc@vUk7Gi<;`*)?yp>xVHDiN@c~z zbQI21J*>GDQE`ihtGeTUP~A~&C{=Zz{2fENDr{4%F4MRa$r}omK2F-uUgl54;{E>4 zS3^E6m%U-od#amo|D;`Zinr<4GE!-_6mK(dnzSgKoXn_tEq(pcHLEiZr9m3#TGS4D zhHg4GedG1>q&ef1^LC@V>tB*}=a40CliFz5f+Qbl-rdA9RTv zwy;@6f)J*!8M-K4ZZfq86b%$z*_JbPI8(|G4uGeKIUJd*YwJle>bq}slQmY z5|&(8yr62T(2PU$=CCWxP67esMg9x;`8c1mUOW6BDetJG+qpBlA03e0bw&1F()=bZsC6`M<`pd6v#?vAbuUO9Zg~BIzRcv#KlirBuH>g-{qf6Z< zR9P?C81d?D_PynX>C^QEdzDnDbgmfk(bW`oY=8TS@Z^O>r$np#-nif!TH!M;#7-uN z7h-|6Eb?GC>D@c;ARP?te4mXvaM{{!*}a{*9v3Haj*;TZB-qL2PG9>?Lr=zKKjH(||TU&58kw9@lv!Pcnyp29;5XIeET_|GXx8(*rNNkp*>u~jucb>9omMR19#D0#3Ua!YQNG=l z(p!*6xaHmX6A>j<;h_~wmsh!;d$MK}G|?yD731%D`hl~X$$ch&pSrD0_^ik* zVl1h*kPETmmBd!O10k)zgjnN4c~muu`wzPF>IdDi4}W~MsUVk`PVR^q6SI5&JK zo9PkmVVz~~E#8uEu;j@M zd`X0=G)T2jhQMWV0QYGia;fcJ*GKuO7oDdFq1B_%;UqHHRT z2td=ko%|H|JJqkTUCO@9w86;8q9YxBysf09b~)W1T=B_^Bm{qk*#-Lb0K+7Z-tQX> zdLZmW2hK}!ix_-BdU4jd+h!+qe#mp~YdW;)Jl=OB%DRV+!1p*)&m{pID?e?6qD$Nj zx}ixYE;~n>@QL1yuK>d^3 zO_ZzpkvpXd*T|+u3&!5gIvFT6Yjux=EJT>-(6wnDj%CP5o?jFaq(l@8c)d=ajVX5` z7RuN=t1+o|6+>&3loHE;O42Z)8y+QNBp)7Xx(FUJS@%=nU$=~VllMn^3nN?ok2Dvv z9Z$76)?vCmURPG}J|Jp17QQ{h;q(~I-sC<`?23Y#7MV!F%ERDYSEU4WZ zOg~|BNj>_(;nzj)wSqtFHcg?uVop~!ClJm};H`q+B|wDeCc_lXT3n%o;tcGaqj+Rdpp-$nQWn9b)xR9rjl$T{3y8Y?z>9 zs82ODXH4Fp?!q*v7Zlp4PrGo;mN3TUOvRPi?t)2=+lq=gN5dZ3z2#0Vw>8O**%mje z8B`_M)gUEc{oqR(F}!ui_3_c%1LupRa$`R?McF$z(H~H}d7F_e(n70UWbb{gOoC_W zrjg=(zKwqsp(edu< zrzzz!rbZv@zd((yvPCSMq0YX0fvKb^JTXy@nNj<3Xl$uNiV%apr%rMT?am!XGHw!_ zXc1ht6O5Y`m*9TFSabb^7&0#}Nu9?RF=YX(_zHCkqZ^-er7VwX14LU z)4sY0>GR~1vb9MSDQOQ^TnjbQ_673wU!{GgEMMIC4c5M?k!uxJeEW+17~{n(5I045 z<&&?!JUe?-8Yw7XbPjX1+e&!*^F(HO=GMt;!St|n*~eiP#}ho~^XoW>uTMs{x{|gJ z>}S66)T@5%L{>)0Nk`++sffg!OGb#sNI!&_bSY_YjNElma_c&Ol7-~7_=o0XYI4U> zIrd==kFH2F7e=tAXVg_5SUaVLJWFVGQldJ^s8LPoNl*9_dL#?rOGCS5d@ zp`Cqii~LA$;Fyp(SqGjsx3ne_j61Adk+`Zl+!5AgXB0@YJNp}K>fM3kkvmF{YrOHL zkQ6z&a3`jjV%|2dl{Vi;ozINEX`VvF@U0S07jNFH>*ViyC0AsRs)W%t+rKeV+}}zh zwA-^fD}+PK$}Gfjkx9KH??{dUO$tk~|5?A<%dF1B)j1+!2kq$h67%+`7YB8n#9q$j z^vmPv-No(FQ>0vy*Z6K5<-Q#2z?-d=EfE*gC3eeooB2@iTgh^zF1!kT7H6*$uj`@c z`nhcW6%%oc-B{p>rtKUdr6tThG~F*cZkGAvtG?4W2rF>dcc<9w8;oVM@?rDyVhqqU zH%j)tA@e{qB}eG_?lqTJ@kvs5rgJ1Z>({JgpuMN$rCI|Ou|i_>c_UTNGo;G~v>`99 z35O+!2K1DuW|i_C7RXz*-D|5RAD#RFc1 z#MO>@o}Qdp+=*)46wS*E4!QAjAH*?_vh1ho9aA2@DDBRU5=)D5y^<8^X%IoFbWJ6F zKSNcR&;6a^EJfoIo&zpfZ6;Cs1hr|Z7X@oIq@vBl_uexr9*fN{c5g^XbrR4+^jRf& zKdbU{Y|YowRUhPUZAetx#|hqPim(?J({|dKk&`%9c$DEF>e35Sc$*)J6y4|krTCWc z0SaezN4}zw%Cq^;kb-<_Jc$}zwmbB@($CPma*~vOiR?6^EoO<43O3`IXFgV=8`00p z?s3LR_{59)D$j;1qu9BamCBTyvrhS%d;H41F9%0n((szd3}>J~SsO*kMHJc|I^$Yd z%voLHJ+@LUI4VUSK|B<#HsJmoO=EBc@qsdMQDFr2QhCgE&wVE~_u_r0Wap-_!mW}+ z{o>hK{iWdLj6tlb*6^zMRb3_03o+52`_i7$(7B&3jX!0z1HS0qHm5?A%cLhCN^tXQ zoz+N7Au9G+q@5Udf>a(m?2?L-hP9?eZh~HZ;Y$hgp3m?+X}fM(VX~yKw7Hy>GaCz` zpf@i9N2<%#*WlyYnwOmNO%hr95?1 z_c(^gu;#k?$PT3-dAm^^N4LQ87kg^6*S2-W$(~2O$-LE`VT%YdY!N`EasYw>zMQ*m zlU8^y*zfnColrX2PTzkIdPVr54(NL^7i7FWT6re8)J$wt?~*q+G9?tyxcYY8y)cY8 zwu36TRQ;F~{gYNbWqQc~UdO(s1_2k|OJfs9+y-Rq0e#Zw%o=+P5 zeX&AsZ5no6>yFf|G{4-CXB(19dP@5hVWhx_wVq(pnH*a~{!&V1t;_sMbBEuyHd?8t zWz2p(Q?5FAF?tf1Bx^1ODYGPM+Qt?P48;eJk@xPF%2Qvx@l|{JinmVFoN{S5|| z?^cl>yda0ET#&A~k(MwRa{ds*=;%ypTj^1W_1Du6ltZtlzj}@+{#a?zBh|P78#)A! zpec)QT z{c(i)wUR!oz&ZifiueGhQiM`njcu1SM&eQhcK22NlHRdVROg=?tn#G>|iZ!QI&8btK zgs>z^9PUPC`aI}(M4QM3f*&3?=B^B>bMi;|ez4?!7mtZKJG@s-GRa;8uO)WSC>DAr z`|f#h$UTj>J~E*JKK_;OANnEDt-m$XW*oSGzQLfkW_}fO25wS(6#-9K*XTvEaT>@Z z`ToECzmNcRu!kGIF zOH5Dp8NRp9+?AuB99YZ%?enr=6_CzHDu6}_hi?qYqQENUr9|Haxda`)O zxVBS$#%(lJ|6 z_`0jJjN#rl7?m|%0J+b1u^a^+8NR;%J6l&!C;vBC)B&qnnM^Ads|Umr(k=BAl`g@j z{@=;}SO!5NW%4|F7lCx^@4hwtbf|3tD)6urNYxG75(bf~66^yX$aYQ>DDI!(JL$8X z3?Xr4Ab2y}L>NN0bMT~mHnG>1zjWx#UOtD|f9LYamjCLn(j4?}md$~LL?qPghyTMr zN|7~IR#~@MbQtA-_}3Cf3}+IxY5^~5{M#QjJ1ppL8QFpTIj1mUQVTP4UunH-Qb)2w~g6S5&CsqUiriyA;9yB_P0&)a`#2Yp`||gT(Leei4^cG+cCm1QKH_ zH#oZek^s8&x1FR}p6CiGY$MJ4iIF2YYtt9xzQHEZa7;5?R8ds4oq8D!AMS>WwZcWm zB96Yy8na1VK68^A&>cpc5+%G+jCKg7kev9uBkDaA`N!Sj*-7KaO*pVt3wHGT>3ESa z<~ynsHIrzf#piHQnIh3kiRilGt3hXy0bC|1NNt zTApZghjnxcRrlg|!C*L25l@qt9StYi2A9JAcM$*0U6NpcF(3P6Rjpf26e-$+mXSPLV=VBS{Te=qlfr+u-ZZIO#LS*ZTokzFvvdv zK{CX!w`~7c#3=Ge8M)dcj>GzWJ>7;OxkCA`U>)@6UB|NRr*0+_5>?0XNJs>=61eSO zmSHa?pc(|H)`e4VBjN*L0yHQ-5&(Av$T0e^{pA04B?B@f0u*6TorX5hLB%l0pmB^upaRNDs8oKVbn~Cu-d;G~Yp@ z`wQB4J=~~h^Axf`<|A+KU;Tg;@`rYwfDkmtcr5l_>S{DxEL~HK4lY`XxPJsLo+Qb> zav1)6O0)yQ!nQ)knt`AvJ(qTTHb6fL@a(mI4)Q?kJ+hTA6x1HVz(?2CO-gE*Kl1g5^1{L6%d0gSHll-E ztr=SdU&`r`tUYc9uvRWitq!nl>PALqn)9}$>Xe$uvunh6FB@Per*Avn&^hoU7O-vp zxZ$xXF>t78x@cdz_ya~x82YYQPG9TwUhN%UFE21)1htOAFWp{7EEujFy1(Ybh*8S$VAw!T1sNkxrHWyh~N?Hy7!e* zp8K?GZO4F{0frh|&tLZwUjbr&O|J`!1oxy&m2`9BGeQ`m7&nv<2#weqWZiQ=;P5i&hc0wF#zi@0f{IP zyfP5Z{v$BHz^XObA*I(v&TIL5_OPs?IP)EtZxqz8&_0; zff)?kU7bc=Rrfc*p+JHL6hhnpc|sLQ&RC&kEy%dpy5X_bQ<^*39f9~7zWllzOd~(Z z(TBkB`rZzsr48ugYtaR^gwoRwFoFOghGJb90#y?L+rn4?1oo$G6YMaWRrKiP+mV{_ z9h@}}!aty4E)T5bx`IS;?Gr%sbyTyx$V&$?Hnr!q?8RPRPfH}@*Ka=<>exVF-mH6G zW`4ok1Ov>?#_}-S2b_rH#fhLXh>$qhqY6q8V?9hm>d3Hx(A^lp5daoxf%*nxf6!Yv zmUH0aB+_837*Th?SOt0!k+=1IJr~|>phICr6p^P`P&hEXMiuVKz9hMmv5#YxeVhA zQm3{CM&ibX$Ew1muVW#K8F>HtliY9|G6{oVe7y&|yisC32gT<3S228mPb?m5z~OyX z*6mMse&8$QFoXrn7YuTlh+TmQ*1W~M-zvmv?PMthjUv7;-f_qW7{q?e&eiq}tZ<+L zLh~3Q&}?BboT(Vmupa;MPsm%J&UkK>VF19Q28|5ZXExZ`{NKcatc}TUFt-u^8nO@X zkbB3K7LKw#x*=egxzHCZ_2n*}ESY3-I40{cB(YOmwy1FV1pm!oux{ zOou9ah!|Y`97dq`@fxHZ7}*hZyl&&M4u)aa?=>q7>mc*Ydw#$QT=^AZGiC#ey`|UO z{Sy)k=4dQGJ-K`L8?53RY_Oig@Den;2UH8*uoXBUf~Qngb00r=XZ|k@y;oKdy@r$u-Sb+Ifj(8d-XA`Ulof zcX70duC#B_iw(FdFT{}*o8Z;>j&~GX6KE5Wcf7mtsIj9 zzD%K8kl29R8-#!JzlpViqqB@bdP3R;#03W}FFq0w!KomK#Oop;yB@&DKnd_zfjGP4 zH#kt#CKjN^g+W@!BC+j4o){7uU@F2ySJABTaqDzeRY8E;7K6;~12tBN4sg){EC^Pc z%>*C@6#xNckaz{d3KD=AaK!J} zz(2s?q5;-7*c$kKgS~(Km*EH%a#d$-6?15l?4JTyR1m&C|9Wi3iSes~eM4OtVJCV2DjP( z1X%NN{~l{SUjSj?NQS|wZAhpCNkoH)U;*E;NP^nf|0DVQgVqaSQJ}>h*zBZ#s?^Pp zLlNl#HM#$D)`V7|MyzjxiQoMH%jJbx1sDbFp{-X)fKMJq2nUu3bubY-;0gxg`gip( zQXbGOgI%J!LOt8V#NNT07l{ZU5RGqz?C*cMexWo0%{KOCO?L4A^Y$-e1walDgc$6% zV_SeVGJu@#1E!1Ys*db$6%z8_fDyT-=sbn!$X~Sx8v;2YEQ80&0)XF_nmv%oBa$_d zf+$xN;t@E*X3^|~%CdPxe6uiw=kG`bc9J9xz^+6ib9-fNf0tl0hu9DbK$EsQRD2Lv z^#Pju4_>Oo{^(xMmmiU#g z7U>*l2q3V@6bNAP4n+!+AW`bPE^0jj?pz&F8!dKxzXGt9A&=1(zJeCbEFpGcrzF!O zh1W@;K{DWWfqZ5)81~hEu2O;4j1}nrJQ&NgUWl0%0^(hcg9aVz$$$(|7XcQII0K)7hLXWL6C=E`3eB`1|ADe5KTfI z4(5!Ib6c=*x%$r6)6OPHHxQ3zz{DM%FK`6PhT{vw%wtGa-?c%5b{t=z)Hai01HW!y zLA-3;@O-UCgXaA^RwfqG!*4Lh3!d$pRDP~tog%mMf6G`1WTwS`uk`CJ1gP>=o+KPqUT3|hVmYv8>bADOZETD+* zw-#6cW`i)`kNtZX7XnyVVj@srAbUWevCUU&dXYUxX)c1?hO7%}wTD$+<9lV!)0bN9 zlY6&c7*a&4Exd!<1A~8}zx5PgEu%pxtSbk0s(DrJ%?s@@NP6F8C>y8%$#4W}uns*^ zRbB0;Jv0p|J>cuRoFOc{_hBo-_wY|L#6AzF@L=sbyY?d%G-d{klmTey2hg4vzS(Gt zO+CbAU>y+~@=xAowCIQqpXUIO;l7E}wWoS)&R{^7FS!~9WQcRw03KU$x-o$4B-MJ- zXMbmP4PU`PBjvfZyh09qXXBR;;A5$O!n;_Ja%~QIn)aGT%GP?_~jIJ(l65mtSk23*aC} z7Xb-x%J7Hs3IM(EA2$dqX1(!Ujn_4r-r6yL$65^+ZAJ|$k{)=mYcLot1SSEKKx;`N z+6sYD*b?vJSeb%eJ3N{bRN8&^u_79`AQ8) z@N0nu;+I+cp1aKgV?kg16bFI+1`7j+3XHTI@$SoVFL=IVdTF)S|Mw*p_)l67^%Oz| zKx!bbpo`xyIe^a%Vh_J}K@}IT;Ex($QAIXZKJ>-u~b7%BeYDwtrLmug##~N^#)TQ9>)R{DF(Ut zSOLFP7^L_*ryE!(Fg*X`2C!b=Zu$m;BVSW)tfts1L45eU|0?{@H&~nu!2J7;4fxvt z9iTNWBk#fw6@DfE6CJ3mvSiA4{Lv-1c_TJp`Hy_z>hoNsf92{_^;qxbEk6ZT;@6hZ z(^bcpev1F~6dTy`cdTD;`LAPeHpd^)Hnig(3gSOR$A)aN1Af8$x3PeI^T!RCXrR0L z*Y;5CACX`%#eY)c|4@4C$y&Akg0<~0Ui1GV8$FQY;ZNH(3~~_68~zt$f4zkiG@pO7 z0|1uT))xI&3jcdCf8q&>-}dk61*BpWf3v0jmXH79nkM9w`qTDr>micJfI@PG3iJzb ziGJiD&Kb3V&GkDOj`**>ZhDXL|BN}fsWsp+PB7W?4R#(2*3_fn?wzdeozw2$3-f|6 zh{1`m=35&~h=u@0K)8*VcpCu$UR?-iU>sud*3^3Lfw@EMx(QXwD~H`$W<{;QqbMW< zAlV`6p`rZzBPy{mY|L_!UJgtDKR#u<)jDnyD*pNl$LP7;pRyk{&*dTuW0se1lK9bM zb6)yi`)tYo^>14i0 zq1Uo;sNhq5v;LglU>~Fi%Zp)#Y95QLzG?x5yssqXf*ERL9;&KG7v!jalJScP*9_&| zyMvK46UN}K2tO}zg-V9t{5RN>d!JcsVXX!P)9Tz>+(kzUl}~DO1i3n@4HG^;Eb01u zf2l_x%rgNdg3zCA50J&s`kl*SCsD^^Qz+`R<+&ZJ*q~a ztb0oLQ_&*(6<4wBsdo*g6mQxGw9*1%SRDc~(=lgv!`un1m<7fXm>Ir?z^5Jn{uqcO zXuIDKmn(ZsRlB7{OJr1jm|J0Qk+t=^1m{cjjh;?7?Al(%XJ_-^DI&!xEzQq=AiG;Y zgsG1On>}`dy55R>`n-jIQi?H~d+L{~pSjaG*&gJ@auV?{H$2sMtX*iO&FLW^xbjdq z*0@M$VvZLRPGBmPFr#qHZahlY)WDi#k>YaAQxi3Ke3Vn#h)HtLO3xW#y^A!OX)|`S z@^qfJM_d@f^IemyX>82yk_is@@bZ3MK*yLz2uAv1WHa>TDGOtFralncO>B$Y!{3^q zA3dz1VivtTt4V5YB$G;G^HBw{ot34n=#=dVOLZmthdr_0sT^FsvEEl z5W93{jG7He))3Rk^&*(|D3fX6GK~!?h$)FbCM|8cBa*$Hnf8@3&GnnQG#Ym=4BVY~ zKou6kK(I~o_`bc*V!y$bs-J7p?VeS#K2bmST3w1fx$#Q$S&JwMv7s5Jh=|G>ts(8h zri7K|@)dQw4mK0o5(NgQS@X#+SH%VT^Gh~(P}kQ!z7y7>HO|Vl*RT>n#5Z?AWwf@F zNXD}aemL0usb&B?kg1{LWxcy6fzQX711sz_vf7@`V=t&vo;IxT5qY#feAV8{L7a%f zW|%Qlut=XC>qr~#x<0mt>%xcP7@^peqlJ%ZCYB2lG{iV-{}+329TmwFrHK{}g}Ynh zZjHM)?oQ(r4uv)D*0?kdjk~+MyEWdpyF=ss_|43lx3h2eo!xVG_n$YJaVoMRD(hxO zMr7WM``zzu?R$pJSJYAO(ldXLHjA>uSI_b&JYu50DF}{$p_DLppNA((1(~`{sh>mB zWm{8Xz|sKL)QScZ$td8r9c1;{@L-3aK0>X@g8u@n|0LA5Bzmc>d2F)c>)w|6e62g~t{h>>GC~{KYl9fNy$^*y0$a z{w_@CS-j%oA^aY9;`hasMC~p141B7!pP*nyv0zg5N9MWe$MRpmR7(Ef{FA~~-TH2J zr4C{3B9$Pv5B&tsv+=P(s0+pKzX1M3_vh|8;;+(2@6~rFZ{$nwai@O)Up^=V z3;3R6XW#tZ3&-|zFl^n@q_CQx40tq|Wk{%qv>;@x`j%$`6~2oc)&N5duORbT22NjR zVXu~jG8EnO!(9@2;89EY%_U44NE&nQE8!|F1#Lu6<;Gzsg8Gih7!Lng>CnvZDuNm= zt26&~&BIgmB}4dDfhnh(1e?%K)n(+EkH5i7{}KCL1zIPN#k*R*&t%cUlnn}Jp${H? zqRbkCoSizx{zF9|!hq!oVTRns`7*U4#q}!GJvoYmE+}iO!8v+Ps4(f}?k}#!4pnH7 zUI7vLt|ApfKJXX7wK61tr=^*JLiiCW5_ne_BMl(#p|<}`AEP<~#Tsq#_9{}c?-}ro zRss2Qh18X&E#-|~w86@|Eez|-BFF686MKs)hnf2frvs}q2i@R?n*WZ&_F^hwdkH$zI0C->S+yp4x-l;_S{rzv070g{0XZsaJnQ!rSVprfm-*)%Pir z!u1>%4@{ML#g!n;6->K96vK-Ux)K2Lc6;ZHCSN1@!A3EjG_qI^^qaN;P&x z+xQSHK66&y+5E?1Va9OCnUANQT0wy!`ulWTi^eZ%-qMHPA-R86X(&T=FGx8Hkv9bM zg;)v!Xts@y%AK1vU$|}Sqt0urf@SCPlfXNiX08u8@vt~CUK_%(VEi*%{s$|`Q!{!j*AfeE-WH5WL~wV0$%BteyhY{M7>7dUwt@%-Cz_h0v&m#G(9#F^{B9x8q-helCOAbJ7$|*d)xxP!Kbolz-Pa3+XtO8*@j; zdWOtl5Jq+agITqCaQLua86vUn31&6;9&Np12BqZFhhBYG=v|x1V`MB#GzcgZpaf)! zr?jYHjJJhROX)JhJEJ<7!U#GaXByY#GHh$^ zmW}ppv0(@Md4;Q!2g%eMgIxu<84UX}EDs{j(4^w=@ickR3wb&sdfYu*J618pGc>3o zJmJJaq>x=y1`X01-k(sz0+A3Zk0=|X=ma8_XXg@p4*UZk3`QS)WDcIxwMC>$v04TU zX&P*^SrMOA@{s~srz3Aku{%H+(FDzsms*ZL)A$IniCPJ{g`f^JI1YABW=(2IQ7Fp) z0yOs8L|n@WmBY^lWtQ`PR*gjssz#_j&g|C8H0|4Yz$y zld%?UJ+B!AHPGyeYxVd&LyRIV&&dB;*W3Db9ouPRQQ$Z0B+!|b*A4YFgwh2Wtv}XYQ9HFP%EOBiZ(j~dH63Om$l1BTr0GJ{kn+;PY=>f)H=;TDiNgp z*{@bxN?D)t{pUXyB?N9PASwd+!7Zd=^H~ny>vCX22B~PNh53`Z*|AvvuZ<}8a%tIa zNloILg~L)Q&PdIL{7q8Gvp!BPL&;(5(An5tqG*v~hFQ2vzPUaE3CZUeJ1Km~9j$xx zjUNcq41+^1;0iGY+}ik1NoyaH`F){6;fb|yLcXt=BA$MU28EyO*i#7`uLLPJjpG(G z4p2T16WMAC!8tVflB#-it^pfpe~)C|oXYHPZQ|Sx0RtHzYhSfPL(DG;>P5=?q5?Y~ z16g@nHCe6Q@rfdBdrs@_wd}BzjYuEtt{aUv#X}@brh0QTM)K>U_^Z2C{^WA7;cAgE znP1R;j;EDJgT0*owBkhI6=j6)L!O^wnj$Z8t+79^v+UQHusyy4QWCla$piVS$hFa3uUp z6E4d)J!15J-g5@7KyFD2dy|{R+C}aZRjAYjaZ*KE-;QN+_eu?kkaDXIGj+a=R?^o! zB1HudI5tmSGAoU1b**x8-G%j>AraW0J*>BMvwuh}@m_t{)?5hDjT#0~rSp$IVT`rY~)hc&0WjQ6JvjXm1O9kW5Kz$VBVq+{D^ z-$qULoMu&ta~Aqt2L}`e;ekv7%V&U$hIzWS8~iCQHDDAVid&{gL^@VN}$^8 zGG$}o{6eHoI=^h{5{TlOTi2uOTc!5%uMlNR>haCkU?q&iG1Vi&C09t_C6Gm&bLK~2$Asp~TX?GQ5Jw#>dMLm2@79tx`yR|IH(oa<F9xkBy z=di|4%M4&fmk7NGMp!fT#!OSeTZQRi>PH5MGq@+uR36(VHSFQ%GlAHR%u3UlYkYG$ z*{Oo)MSb@_6*QScNLy!HYHO7Ye}0C#&tKb!@hq4gH@L$PvkBGs{E9aA{dB6ty)Cg* zE3yLx1s*7JfIQb5gXg)njK7qK7b2@^PgasYvN4mmN-Mp@b0sF!cPya@eq^!7c5Ue* zu!VQxNu;~nIN12*w$b0&Gq|T>=uh;aCzX%+u(jPajjT%Y>=@bf)s8Al;K8#e8-9J! zgyU#rXihraanR8#lMNMRV^S&to$Q>YwRGpHLwO42oN=htE=D<)4Q|&>u&=M-bwo75 zD~2*#2_(qk4xMlKv4I%{koEtFN?lX zv17}D)7$~_W$gqXdH#OU75b`s3Owybt)};07zS3a`5(G9LOFfbU%e=MjWDVkLvn;R z4WJnFuZPzBIk$g3U<0WK8`Q3~pbv0B5R{=q4>LJ5^Q@J6QtO-cxH5GM3vdPzll+qQ z_xB`36e6UQ@ACWw6F1G}cY;7p$1a!O=p)`Jn!5oZjTofrcdJXd)0Xcbu`~c3zY=bF z_KqFK$!Uq;!oIVu$=bt>gqpS(2UltRLn!Ajz@~(ulEVAxX&|+2ZhSH#pDZ-y3D)&lsKp#v!0e>C4%{}r(VEwI*s-`@7XNSLpQ|X2aDJt zKme_@vSef2Z+BXK(8PSx*|i2I)v7eNTzj_?HB*X`WVO+SwV49(`)drLZpJZ|A464D z`_=R9WP`1b)2S*Bx4;K0l%-t0vz#~Ut@*KLHn7EI#Yj;a)~S;^L?4c6PJOEQIsV); z+#3!X1!-WWZgk6eYhnXh=<;jRZPWHw!^th&jn2?D3#k?Y&mVPz+x3WZwYy5maMg__ z?hMlwI{NaRU*3N~Cs`XL4R;t%Up?^q_)q{8bNJ$r3h(NLJF^i zzt6M50At@;c6v=bY|7`@Wx-TsMR;ZJV)P^T)lQ;8!bF?CDf?d)(uC4 zUQ0TIKlfN{?MhA&>u>y44zboOH1f6WWPUZZCzRc?yAK`b8Dx$pm;z+aMJQE@Xj*w1 zR6cB7SEn6>*(L~oF8)ES%bGFX?)#x-o{puYRU}qoLv9u7d&4`@L(vE3l+XkJpO2t^ z7}2Ngk%Vt|3w?wTVYF~KIg_mOR44sn+wf5QxIu0g>D$A=$rkn3Wn;Y*i7V#2l1msk z$YJrT=baC{yzU*0IAsPOa3}P+$>_A0s%_BpB59{yK@o;XPC)97&gs!Ry^l z?OS28A8MKK{UjLEJ{{;z0jIFW#h!}Cn%KOA9CC@D&V^^kmTLiE%A=)To~)znCLK75 zokJ-p6_&o2R62SuxhaHh3{`@)Cc<^eK$F62S0O2th)E1L(%L+|yr-2S``*8&W9=sM z`Hp;ntAUQGmVgLv&{%bwX(zPnFF+Il3nS3Y!%y8ql{N%bl{SY7TH#kTtPz@l#X)?Y#(c2E#%Sso13_fk5gAC}NZd?~xT307frOjg1acYblH1S)aQG<6OU zlqxrc8~e#KVF&Fq^H6eOue%9+=SY1(w_uYIRm1X}$*au;mZHKGQ|BjyUF{?@@F#Xz zH*iG)S7fDAGva(`k2c0iGb+nTEW2pEu9EDWrzRklUh}AurVtn>&|UHI@*8%HQu8r| zL0Z7qe;20x@j{F`M{NA@5-cEZAcId{op>o$5lT(io6D?PtajTU-CukIuha}{w31mb$R)XojW6^exY?q>PEY(E~P0W2L zWJF5MfF(nsL;*6%#1!8;=04+A2q-Jdp@vv~C|YDzNq+(DSZr^|^VlS4fnun-se~8y z3v-$C(S7}4Ko!+gM{T}Fl7m4lVk9URWr1ED_7eGe@&xc3;yej;$zyu6f5o90%|=`E zy4Vl2`c`Iv^gzH`P#GLCxK!s8F5ym8jsHJCrPZbg4!SgyJy&IQ0Ne>nxWa`;LgVlr z8tSVRs>CDBj3ZeX1)hQ{ZWXy_4clUtoo=!fD59K_WY~ih03n-VuOuJ;ANEk3i8%|7D0B!XeniIf6)@vgm>?<4pT6peMLS zwIzbJh23i=)UXrnh?`MSar;(Q0(OvE&Gom=Rj3d!tqt8DQqryP$K>BTZg1q)FrqwHR$u&DU^_YQ6?neX`PxlCX zD>1y#mc*hSIk7b4@$d63SADr7kP`in3L`rn&jCy#)v6RMH|d}3g^YhHLK1sSgGpHw z%g63@<i2l_HI;yzavI$8v%*N|l9Ey%PoLE`3c`Mh<~I0lfARS4VBm4z+i`Z_ zN&k2_JhYuU8<(@g?25#L^j@kQcuF>SE&+@5QfWxZrjP1rmP1uSj<~_ooxPOhW4Z2$ zw8u19yIAkojd5vjK*9ctA!0r|}G?dKX?!_{ymJp0$6$P8& zo+18^8m=x*L-W_cMFQ$ zh8C3~SBjrJaN{#*(@<{7aC|VhA9Q6X4g36q+2f~5{91%Z0g}_)@WUBqthzO>w4)K> zS|d3e@{!l}s!{s78pi})vs$h=$f%bIv>1!#FiyWG-LX-q{{kp}?KU9wh9>zTpZYuK zm>k1PHKch) z6%4UvHAMw)jlu4$x^|f3E!1(r3RxvyiS$4wsn$Jx(qj4&p1(_AXse^m!(lBd$Po~* zRh#@i<_0}za>QYSj3dRD>@D~gAY3YN@F`l;h|QX=FUeHKOdB8eL3LlSB$W93gW~<$ zk|&}UC)l80q^F$K@~aixLji)DP(FoH8z)+izz=NIRHn2N@9S(W;E(HJ%oJH}cq773 ziA`9d4lUVY{thC`j{7~Q;ux?nLEj) z@k;4^WU|l!{9W^hNRL$<;`9XKs;ASVFiFZmUD|hfk`A65MFpriQ}#@vk29;}NArf4 zIefcgvE-ocDxjGHe-%$DK|+`Kd=?myY^Wfb3r0`6e8-E|0!K&A{v)=O2<(SW;s7Q> zLQbQ44XO*1X&5xV_#5U;SyYFGE74xCJgT6#v#VNauw)NAjp$nFUOp6Rl*RyF6>k>dmUK< zMsCyvuLchBn*8>LkaG$M42bt;bN=`XvK`Afwx^QVNCQLgHi@+w$*;-cwQpxHkE*;L4%Yl#$`WLA%NA)L$T4gb)uN~sQd!>0-0%1`N1k%Y%NEVzxreR zctx+&n_Khv^>gL#6ZMuD&kWq#N>D7GYIQ4YXPNPr<}nuGDZlLbEVKLIv^GA|sTL91 z?$e>F$?Kq*%P(JWb$m_$p}`(%-Uhwnk=d2R>fL*qlJ4afiwMUh0)n{K&&}3|s0yn= zt;hVzJf1KR0eAF7SxJS;?!-hf6!Im_c*4S^9~xNkF-pUS2KHfv_%-&k1A+xISxkye zS!&bvJZ2IEe3A6YtN8}KH9MaRjaSxdNSRwmgONuWN<1BJ_B)T`=K@ws+_^6XUFgY+ zBn>?hyCfr~S}WMj-6Twfu2dA2hf$)-a4}0=mJK?KQu*}@WDg@~AwGu;amuD<>SUwy zs_``Xwy$MVfPQl0jRa-p+}%%N%O4oFV9RWq68kN{{XIGBG2ya3Su0lcU<&Z3bi2gLsa zcgd~oW!?T)zzgI*fR_z952n55g#X`VpUMV?am4neQR_&?-QJ5MO5GsTrpmatkr5dt zv2V5B>OqqkT(&F!WUNS7@Yax6e06p8&#~t1Z5{{#HNM;N5C#yjBu_hYHD-$yEVet= z)T3dp0&HN5P`5C{R{}C7qmsa4IJltRReF(;MO0=s5wY=s3StkD37m$dDz&xjQAb%i zCnpXMf*MI$75~;P&sN8%0ZwJl;-_~wdx^etVF90M|@nwO~aaW9Rv`w~wHL zP@JJO&PF3`!=XGS2s5~u)pJh-AKO=&ts8nzQ+@^!R+;s^Q2bbQkh`;1qGd#K0Lli?p+%s5E&5zi3_!>U@l6QeT} zs+nX`b!}O}y10vCmHO4VFY`&3R~?^}3V+{~ezo58Tk6r)x24~Mkats!6{nVrY#O3- zd13Mo#*RP$9uYcXzTUJp*FKLO1RH%M3Skcs+mYNOOt7v#QaO^bH>tQ_3}_Y%2;4R? zHqpe@|4wVy);pb`q9|Y8`?ed-6fhphlkr_h#jT#tJk8fVhYInoLbEe2Z^FC?UL$@K z5i&v|9SpO2s<5G~G+l#gNV{B@)f2<`^~FBgUc1hzzbgQ%rF6Tu?SW9h$ML%riZpWV z*(=JrO?MdXzAMU>s09puXe^H5K_n%6qMv2dV3h12md8X}o{2W14yW-#7J3(V+NWBJ z3g&yO%x~?M>{wNU#zSb=bse_iA(mX&eUMy>O$-7y*uMPf7SJ0a$f+5X?ove>{_P~( z)tL+?CxbVnRe}Z3UG9!IX@&JRT9c6zK+OrH>R4;+GijK<)9VKI?>hEnzhsADjnEtM z){R1r2IBLsIP$}cg{>NPrLtUMx>EIs^jVk3NU1-;*h5imm%7pUU<;tf9+G@?sM&+(|QLM_xvRkH}r*y&FN*d7u_?m1uCf~zFL(M8~BI%pGcB9Z2 zuls6my9e}Om}b+jLG(m{s~c|9q?(YkR3$4J9|SuHtt(<)RzQ>7WO!U#6GLYWa2#3KCMCx@%cS-uu&z<{)^5>ka#)^iuEma4l^Brs*kLj< zP%Aa@5kc`w<)NDey!ojgLeH; zGr`jA&~Ql;J!J5t3^5C>h&}p88?>cZ1sYY+)n8gG%gUcFjx(3lbvwS&3-s3Ds_}jH zSd%Wn1xl-&(_Yj_Q#zA4>d}?H&sb&;56bS=W;NuvZ4>IeXE)t9C&=v!#z#0;A`s zXAfZyUg9r;xo=rB@6(ok77fDntE4OcRmA{b4=$Q|&kU~JALhgpBHRyUeeAk(YSjBU zT)AQi``{1Dx+9h!x*p2@+aJN?%YU907GrHnNiNy+$3hx&-sJ^PS~53PKlZ=U{I4!% zo`KDjJlamHco4)~zEd)t6^xbmuQx^V(t|xX|2H4V|IU;A-|*iey=?SIt8Z}=vM;2m zFXS0#p$>KUvHwL1_cOgE!F+YtzkQV#rdzGPLY?n(G~Bs8G^oGzyDjbGajVDZpilVqVHu3-J*DJ!@Et7gJS9b;ZqCM`M3Ge1gQ*3=H z0zfv!gWWhKLwfNC_PQ59@yEIVFjC(>(esO~rdmKUOMJsoORhU#6{aj9La;-T4(_V; zmOaB5oDnPIZ+gZ^HkC`@&lK*>E=U4F;4%Y?9&EKo96LBIw0Y6KGg_^H~x$u!?n z|Mxb7pX_Mc@P`_nRgDND_kMc0y77M{=vcaQG%k8sn2dDq zOdT_Splhy~Ts4Ug(q{fLxxYU|W2?XDcYA!5RBRjpKGSU3W@aXLTA{1>nG4W^V;IWb zm@K@lU4{zCb!Im5hJOb+1=pXeYctVc!TU}jISv^1R(I@ZqFP0=rdK>Ta5(#rptEeq z_1XD&7A-B6{f}k&eEcr-wrSf#+IfjO3lSacw9~Iq+qxg;p+;1kK`QXI?dJ>Ll^;jI zTUZ@ZsI=qgm<@N#57zK?P1~IB%WzDd@N1SR=RTx+f{`5B4P+|ujtL9;A~UJF^yMQS zr&v4Mdf0OunoIj*dOC{&YX&`r^{V_t;_Z>nHE$4oW}SoK6$D{Kj<-bSPnBaSQi}3S zWCE)bYn#%ksgE#qn}s~&Uq|~FN5U4FZUtEZMk0j1z;k4bC+ucFnrQs zcTjmv#G*ECcBB1$RDxj9U#o3gv6P9Q(A{$jGC$qAiHX9Z1Q#~P|5kUCi{q085 z^fMnYFxLB8OUS|~tbtXj$LahmFwl*&5b-VgnY9iAE^VoBkG=D=OGn(P&C)<4jVIbl#i1z&?lE84}fy}08c zBO0r`XDDpJP}o6D8|G5Syjk(s2mZ`Y59S_|2c+}0=UA96L3H*BQXwEksMv& zTD?)$NAyoNY%r4FxAq8+_pySUNgoqOzP^60zF zcryT_ z)wq74!5kwl8rLu?<31hXDr&-=v48oFbqDzp3*-HwouJGxsw#j=fbeW`awW_kb;4E+ zHwC=V4RWDhbOwN@%z589Q&V=9NqNI5LOv$;Yc=9JreQ1Dm| z-`}K({7lnaPnDQHA2Mn93up}9Ugfw-#Tut>b`zhBC}ntzeba|}LY5$$0LK!nx1Plq zdyY3I*OK(l%$^bvQxn&cTJeB3QJ_h&Oq}}-wugNuR^n+1(-i&W2{AU9QnGlmrVzE? zV+Yv(0)RN%`OI1Bf2J(NZQ>O@@6M;c;ZIDhb^qz%X$L=jF&%|d467PW{2YUbNd;kK zT=5q$n`Y#OkBO101hysiN;{*uU1M*5u;_0$dm~XlytajyGVw_b$opol@KP~;2G<*- z+x0^*E?hI+78CCL0IK29I+6T_VTqYGbM~@1E-o+nkPL@zW z?95FZ8#9=~kE3#dBi?x6vm+v+-#eOEn&9`qHq2V0)AsAD^NnTOd4x2fwZ&w*gsbP~ zcB5eTV#`u`7IG#_M%TG?6ekcxtQ*EWXf zQvT_uGpi=>jFPsyK5ro+;}RX1gbQ<6+L)L@J*J{J7rY*QW{3xKAlBwnT7wvtKL;@= zKWd;jnKj9j#%2Gcd{|)1cw5j~(4UrW>Dc+GO$)}*mP7~Aldw2loIy@Yxj9Z}>Q&AF zui=g(R<-nlF#Zk<*iR!JS7&+WubEZ8?HpvI4I^nN>)^1okx-S=(Tl^$8k3vx+uzaBqztVun2?SnlP?%;31k99 z0z{;nF#~HJ`|+oENWb^rivvvk_?h8pwQuC7^bel2G_Lo)y+1Ob5Wi`cHBCs<+9;8Y zoaGN3GiLZ^O+2z;(llDuCSL@bry>6Q3;0w6G8=ssVBNgd|Dr^P;!bw`v@@Tljzf}J$a2fKb0mdoqBByqM!VF)BEz_NiY=&HRrHA38xvOup2ck zlBvEw5F>CxaAU{be*co<@eN2|iv3h8(f2{AU zE##~7C){glahW!`af~@&xJ@_@rq$qSAXUI=Cg7FUbmu4=FkV){HVY2TkAfiZ*pkrclA41Dc;pr7NipD@lip_%i|Hj3}#JK56kw zKx*5uXMy2Jd#GC;2iHC0*O*}UU;*GdCV}&Lnl#bG^cR!+d(Z14wXp5*r&J=ud}>{d z0ZX+to|l*)e&N~Q&%~`k4wVCLFfY`Y145~8T=tgcsZi6x1LFXOdOpwD6DeeK$I_31 zfOXKI^9H(IDzlkKbK;8Ns+KNbnmig(bAlV2BZ$5n3xkf#x&%T#!z~gu0mKtzIdz&mZu}L1>Mvdx&`m`V|k@5}KN64!lexzAS5G!JL%9f%`*aQ~Yxp0B;QSJerKLfVg2ONdNui+nq zEuB_bV3(=|O=)*XTzXaS1d(g|w2>Ipd}RRox)tDxt8PfX#G`M`U>0wqEo1Xv?nWEb z4#@nDWR^pPms(+T*YL1Wi&8>=?3~gtvB2<&Gd#EH1&$p!tGm+(qIH3@!{RVk*Om&^ z=vV_8(>fjZX@Re_L@?Z0wb%^XPljgfT_fy;KC$YXEAZ220of~1!ga7t@52c%xyz%2 zO2!yqQ{W8+cFthfPDQZRG)BI)B9B?7lDkhd_u+d1ja_fm=V*_4vScd0LQ^M}{_ z9X&>LB|6zW4z$jDChrI!s}ZQ1lk1ir%DM8;_UXIk0>1rPE(jfdKd|O^EJ---y3IWK zD|ym{ziWyQAk@*(A45jfj1W6FzTjHewa`6hGoLc(ZD4DB+^y{pMN%eW^q!t=GZzKy_A{z!pD3kFJ?p8IdV45bjSBG++z)GmX}L*j}{mv{o-nOAY{r; zyw|IX`|dlzTSJ@2qYIx*T~rzRyMJ4BE9S@_Oi?J9!=0+5nSOC}?q;TjZ^!y@>+px6 zENzcp5gDnjZKx{;k?F)B9?4K7gn>yJrk)y0yHd(gCBwx7?m?@qDSi^X=Hl&*3;C7W zlvXhZlO$~C+I@V&KZIVhDF}~A<%&{Wid2_Re#qZ77%qqB;&QV=_wCDGNCmH`hhM6f zDGCtq7T=wZnH27;g##b@rchTeqenI)$9o}M;Fs8&*lXT0SF%6$IMS8kfhGnJbpD~y)a|mf>(5|C zCF6m+U;NsJ|FC8MvIqx6g+jE9@t;E=;+Wwj7*t&9A?g4xe`*>))PLX%Q$!-KU21y* zg*)IY=dfv!g@PMfTDnJaaJu1eH0`Pw2Cw8fOrPatawE2^drh@XUp@k7LxZ&?j|Z(; zG``lp8FR{w71iE;5sH;4nb|@ispoK5DW2tZKPX0-uy~C&Jak>h*nl9^tJ3jX(RGze z-?{tFO-go3Z0f+RaU~jc6M!;b_sw&PC}wMGsg8lEC$(u<+jXu{D!|S^BDb*_m1jNz4+YL~9c?HRw$ihT*kHheT2bos1;XH& z*r-8eE9L~@+nz+M4XVe%3%`c{7T?o+&_^#NI_j0$R-e)jbG(*1 znRkd#9d9lbLm&JNZ4wp22|4}tP4<8@{os*|LBb_@T4)N6i(gNOq-xhbs_<;j`X;D} z)NKa{M6GnX?m*Mm=l3vwN{cLyfn&kf-GpZfY@k}=&c#$_c6}5@eCTDgPbAWj zs#SnMOl)HDg>A4QIrSKypLRiB@h#t|M>`~fnl31p{{t1-JpgKIyps1%W*Zh|As_5L zvY>xq`tc=+$<<2=f|8y2@-b<`CjH>{3Mq@d*rWD$$ZII9>Qn4_#rFl6dP8>+;-qg; zwauAE3)bvNF&DMZ{aZ$!?w_!zTA5*mYK!8xhG*o9tFkFR>6fja>sIUU+v0V94{Nxh z%C$FhB7>0j-%HuK!vgg*rdNz}Z)hQJWN0(H`;9L+x|$1t>jl;}CdE-9Ayk513t3(` z-bHkt_p~mkiq^VAF=jF$4FB9>Yc}=Z43(F0#~yS*eoU~_#<-BtKfp2;L2Z&kk34qP z5QfLFkY~S=ZIxyO><*w#G!s89K^bFmR#gYK?GfG^;EsLWT@AHG8A-NdhTPQ8+0^_C zi0k@*CW_%oI0=wa?8q9i&{730A~QmHC?;_fA9%L<8VwQ8c54D|?vnj8D}7ZGIhaTM z+#mS|QF)*7YDY=zCe`-lF>le%cMd9W{Oz`NNHHi_fAK`Tvnvfb78raA7->&cI@5lw zDC0WxeFN#0FJxqEGNs>V%&nPs_Q=T4!W5HMPrQ()J&5z4 zg}SXET>^2qDv6d^Yn)omKtwLX%#%e3SK(LT8LYAFf+9`78+Vms&l|sl4hrB<*krgJcAzEwhm$UVae-A8N`EEf7vra<#b6ei{8^PY1VY&vQ zau+6+F@qLxhM(B63W<9dVdT#tFUjj+#zv95an$&O(fD^uqX9PXO@(Pv39d8~1Fd)j zOqxIk{E7#@Jy32>7mptum+mP5&BE9j2~Mh zllSpcPhYQIFM(rNxTARG7|bxHecg3)Rhifz*RTGM0drkO!_CdcL~I7AWXFBvXrlHo z1qAe7=UiWK{FsK~tRlv|fwit0+Rcl=o~pnaDjS8JhYHqy{@2Hrhi?EF0<_zjA$p{7 zpvFQEzUB-}WOQn}aePNjDdf<4dH-$Kh=otw)C>8~e`#Bb6{P7p{A@E0-x1JT+~miM zFtd88m1?1E61{rqzhzecGs}P6^}qG4`o7g^e>{a*|KGBLe_sY3{60+eOlbcEK81#c zh5Uz{;vaSjh<{A=D%YaMxmRdpjsfw#wX>`uMquS_|9>H;_@J^7M!Q5U-5^!#5Gr=} zQM`KoxqZ*yf8rK4|DVPG*(U$lA^zK=AqE8QWneN+yl^O8+f-}qDt7i!Jb3;&dC#A5 z;>JDypT+;#CjZ$X{@bG=Bw!h3`Jlx7FJSwkzXFByo#ZaNiQo7yKslFDQ3ceAkl68i z>a!Jh=7zJ!_vRQ16>#(RLE7R*Y`XcL8_ep?K`7ig$Y?bc{NcqTUUQ1~ z;VobCO+kr0mnS)LcDTc+UX8%dzt?#P_QtCEd*vVZONIApqxsvs{8=oXvS1f$Vp#&| zzW{Q8#lv8m$KuMgwn)o%Nbf3BfD}>NELH6$);Hr&;jyL%EA482vcySpiem`Al0x2y z-s&A?ZhAT1`F}5VAg`_lM)xEB`>qg?xYEozn<5Vq;t4B`Y zzOOcu1AX%`-gidP@LP23oV~XJ-kcD!u0akm{YDhN*M4$Ih{06r<0y<(mXOKXm#}dx zVD3*py*g_T@YXhqbQ``zZqanmBCkg0C&11vx@kb6j#4Rd(G1o*yC@>HH{9A4EuyGc z)U|(2emlz#D9Tau-xCkU&Ob-29s)!!ia0^;mSUygCECQIcvXGNy$|C|2%)awCPg_SvlTNCF{c6s;@cg_;g9$XHBIby5M&Xx_kshN~ zC+ckyn+coI>Y&)!%dZ$ia&r(Ic;t^6aClOgIfqqnkhb-oWNpTJS`mj689=t$+w!R$ zny^OOF4ttn(17a|5Ujdp-A*wevci458zScvKk$d`r!}9i=06Bacy8buBSZLZ2qOwLiAjOyRCq^eyL`5V6>I)upjawO>$b)FttuA4reZFES^PIMzI%bye)K~S2 z;w|WY4j}DAw6O#p{e6Z#l2aw$9!6ES@4~uV{s_WIQ$#GLqmial-3b>omR%7Rsr!(_ZD-L@}LOmY%E_MJO6TC*dQI^*)Xz>>W~h$QJz9O%jBzmx$4vg~<`T;sHz8(?Apum(nICq40w@SP36f%`TlPL9K2y)Jdu%C4? zh;GxqO|p!=RtP`qO}Sllf*b%uokVB%2UU=5fjm=LgfLQ!_$l+)Uu>bd7+?zgx?1Ro z6uombTVL3tB9;8$uU@=Xg0fc};p;Bh*eI1WH!PTl|wc~G_@ws@#A_-DLi>QxNF;-tFv z@``v`{8_h$OGu!x?CgD6>c~$;9R4RtoPN_MhN8xlv3qR~mF!!5GYSJ6v3dL^b*Lds zdgJsOvy6dN|6JGov-B)YLZst-?eH=UXYFDcTwJ`ZDNHghsj~ytshgAvP?A-F(H_Ge~XBIq{xs`3IEc^`sVA#SXt% zpJrh6(7Wjs(mdFTZfN9}ByMh~ffH&>D0vU6x{(<7HW(6N9jd||ww_42aEXQ@J9CoG z9+H#CC)*PDQviP6A*PYP@~jvpx_Dq$VjX^sAi{)|Acc$SqRRBRW%?ExTq|7RkY@z8 zM(WL|ey)WSN)@&0DQDuQHP=lMBK=>$3(_NJC2W5hEbUPQ>&nGH!xBk2sMCfod%&NU zg(wM51p#?PZdx)_r9(SFv~M|8Cj~E@7OjxJJbmDRc|2!^==&LYczE9lDI)q-k^e9* zl82m^J5{y2F%wTzK8!ND7`<#s3pd=%DToUk#YlJ3Lk5Yg@J_M5Q31c$nDhAaMT)#e z1e<5!^Z?&QXAl<{RuMNI+<|b6-t*^GTB=V68DdOmgo70OX!?N3;uq}#1XIrRr#z&9 z9Dyl@OptkrqdIPsifnYsR3RGUMK#{Vq66f$+Tydk6GR4+s}=u6_5Nj>;%J%Mxr&~e zxblY)n42iXR|2j9$=%F6l>1;`Hm5o~h6D>iRI+C7t4vQ#Ymt0=Nwg3w?ul35xFnT! z?c*2rBHH#TTRzO1Jv?-5ZA%1ip&IFt@p6{na}RM59Tq^N?$om+r46u23erG+L`*8yEtCo*8g zx{aeuT;8Z1gx0m()AS?jfouL88xTu6)VbM)4dR2aLwe-VT+;AMufDWEp%UzI7{skZ z2mD_^7mIwRjT4BTqE2VUS5GgzRC_(|ckr-;jM*QS*nULWZ4fd!x4c3AyI6UXW%&S` z(3F|-58iL~t!!=jNzm(A(5y?}Yr$M*TYZS&j+U{de8BSm#ok*-#o0UygToB2gAD^r zaCc`QxCeK4cNicE5Q4kALkR9pkdQFAyAy&t2|)u1gm-!0-@Uv0?fL%L^X<9!p8d|9 zGv~BCE%j7aKiyT;T~&EU5>`)idiPQv*bv)8CQ7zlK(PCKqa*GZJ zAy@P0cnW1>n`!K*S)xOs?8_kmRa`BmjK+3rl=gefS8-FTSqR%MT&8R-7N4yRx3-bT zY~O!!iIbQ|n{hLoH6!e`ry@F|QRZaHxDnODr=)!XWYOf6OR{i|(pU5yZNJdwXA zkx3d_%XGHqI75b7Hdj{v%)L?X7!2uJY*>V+*`J8veI8XOjsPl5I>sb)Ty_wl6l&Ap z-18!Y>LjxI@Su+L)oDNP7D9a_ffS5QQ-T;q>C^-^W}!?$e?^4lmnw-Qiwvj|@L&?i zHR;y442dDN#zG=9xd&*2NLVYc(60KSdnwLt7Hm1X{$=laIid*l*V^O7LP_Fd*^RC6 zSi5N|s+0G0iH6W+qECqwp`31Y;$jE)>y{&r_&2pDK}3XgJiEyO9DNGC)lDCw$Jc7W zUA+c?D0do~N^z3CkuyueGaV{nqgV9A$6h(+8O&gF%*}Bq0wvJS zAa*v2g%+Xf(|B&S*=n~Pzc6>fCgg1tY)sKn#K{u?janLR;vjp3d)FzwV7VgHQt&C+ z&bk0o$bgQsT05|RVtQ~9J@7IXPKV#sJ#U6`uURsS(9<4SwSW0(iZU;<`7(%o3*XY% ziDMi?cWxD;GDGgxa-CFX$(Lp5yFzGLuG05zLdj)_o$paeDR_y~g5N60Yn^^={jd{- zxwH|7#r4)|$8!f;Jo?q3Zs?|;@E?N`@72nB*s?Hh#LuVbMIL}qgz4d&C(RVJ)1Rp^+pO9bXlzihRAO0B0U~;j`FUO0kgdO1t9<`-CCA!f`#5?9S}faPPpoQ-)4zmVuB zJ1`prdei>OYlH)d^6LO(mfdwei%2)Lg%-PoJ1$y1#W zM>BqGul;-{U;fb+q9Sq%@YKCh}7FOr^+ zdP0<=P*P&1MClKXbB7q)vXXK#CX&0$zzih8FL9Dk>mUuaX2sqQ#NqfsWp_~77g#dm ztW#K`bRUb~N7bOnS+l2cj8ttLLx?9d`t8v3_#@+$*(*i{a|eyD=WHn#SxBBIRwze* z$pnWcg0;6wqzo_!uwM-VkzOAwM~jM8$G*MH%5@eqcr;oS6kBO+ApJgPqy~K-FemYB z9Po1z>d%B9^2fZlYF`U=8m~2>2OaRHEIb_SiLdzX!zpC>QFnmW z_hgaH?Mbcr;fve<^xrmN9CeaGa(RjHWz1~>-LE^kwlwP;f+lzBh3GY?bQmt+EH#I} z{bX}_AdUy45wd;g@_8*-K7HU%eJ+yhV$27t$R+;exG<}x}+*9 z#%G!fbW3*r&kO^{QCe4DR;5!Wjc+J6JVv=KvHk@(KR@#wqzJlu(|St!G%wF@-!5^M ze$jDLYZF@dFAAgw`L+Dw`5I3DAGmYvT`OL)G0pfd1MI&DAODfvYO1writy+WwhIp& zd}bEB5NU{+3{ZF3@wOn&y~|(z@67*3$p3c4AS+b2JDS2SGB%~})sm54yYIj*V)WmM z|G(pajMN(=>n}i5<@-$nz(w~e2@KKOUFud+Y_H|a=OH=F%diI z?Y8)OzJ2H=tRq3MA=u*?xqmmqF^WNIi_h^H`!&g&(E~JQtVqHTH548*{1-rE%ifZj zBf$1xDj#_M7a;ryI=sm^@-W`g%cC8}()`L{{o8a0mSqj{SE7<7=F=Iv*n!$<&L>+8 z7X3lJS%nYWqd{|?=p70As%hjC!HpmeCtqW(_ANsrn$|M^D1I3LO~;7t!yV1TDr$=s zLVDtPQ)mXyw4P-<9wz5kD~#RX(psM^0?*_Mz9!Z45Ef>aJ;KOZ6Ak(LPms?8&%>O< zH04DWRExXm(Oi%_$x3TXl+{uqBBrqIc6M;layPvdX8;=E8IttitUk|t0Ok}+AB$O0 z3YxaV;5OI7&IhBl;&^fC_OOHHgNaFVh^V%Kzo#nQydnv7kTq;=3BN7SQ+`%{mv)v# zd!bk@Lo;*d35I+yVB?Jpu;LNRgjF=p?_iapNs4GTbeZ(H{4|RtY4j|8gRac!etz!R zah`z>pV2f=Bk-9smM#Yuf&pvw+FPdk0fw+Mlqk-19@1C(aJh{6XJHzc79^Dot7%$e zmXS1r=XmCFPI(@M>6yP(RT5Z$sV(qQg+%O(IslrvG$xX$VTP<)*?)zyL>}dABFXZ= zEGA#tUZuCrQQNS9?8VJ(fe#;Xt}I6@vz z-f#{4l*Q(>Qkw{{;W-K_2HNTG@0}*rm9%(J?vP zkL==TvIm;E_>v5fjc~23h?1|GE8NzHF~x#JLb}d{1bW$9N%Bs7@ZF%3l^McWS2u;`LE@sc1%}N zJcpR!NaxeovZd6v1lvOm{&G&DE|GcaBz#SOct$|IrXDFdqYc_+8#)#K?l50@4zFp5 zUiK+xxS^i&T(W1`>6i_pbpl5TjH5k_@EKa0N^9@$EzcnXX}`|iCpOus)usakdD9n0 zh+?}OR_Fnt*ff*@41;mTs6LZYkoLe)>0Kf3330Z6@YkJ`LCwhm0mIbrUW{fKbTZ!T*ZFUhuH?8@Um4n#N42j5&$zp`g%Ft(a}-T&vLuH?taoJp~MaiOAYDl>X-x%Qg-`xAd|BjzzrGMS~a)Q z!ON`z01|X*Zo%GV67p<MU zmESUFe%_HRSTLt4da4)A2-DsIQZ*j7VY-Co6GjcnvEnIrWOCI|Yer9*_G=F!So`zY zUA?Fi-f9=}Y^*t37RO7$cxd`7(e!g6?Fq}>6;yA$P;v3-kP?1N!@TXS=rg2wnZBTO zQk*U0h>E0VmIHplWw2Tl~xfi{q`jm~rBL8A~4u!^u+LYQwN_88v9*~~i+t-u` z42FW-e)X17no&q`EmQI>88lU8M@#8#z*biU3%(dRyQzA9aplHDK)V!iYX8`+PY5** ziS|kNRt^A%9(@O1))+%RgNQ7pIpHS#>8&Z2vTga$>bX6FF$ka(ac%ARY2>36k7q`d zk5p3w)GH++xfkQ4TrT6Qlz!~k_ed6PH<@Ajgs(1~QqnTtwPHrIKL!vszBL>%dG7T1 z7l5QZSF-nO8P0yMC~5MxKK>UX&yIE@+<;+0o}8vM&nMAW!0sB zCPQN7`*a))_d^Z9v?S*OOI_79!DyQ!zhjZ8yOvJ#~JlYqEPvg~V(NZqL*D z!dZ<@|85s7)dzTc2odJg;7urod-87Ec44DwuO?n9Xk^7xj~F|I`MaB{ zdl2BhsBt^d6?vOFzDM3N0AyBKrK` z0dI(QA|WuxMwj)c_%fwMpMQ1*jytt%Y`-a0{L5v>p_9hfYp z7`u_RHpb*=Q>9Z8wm(3Ocw+%Ygf52 zqZH1^Sc$=5Gf8qCjDdm9LZbPEn6)=4Y^{*A4Tgr1HJWRGFb|$BS&Tm->Lp%hR8V?x z(Hi%jr`_3Nm0?z4KtfMf0~+YO9GNhHBSkSzp%kK?#A!f7mykCbdc8({ zlqIdKPOGT?nn?=VL*xkCGP!25BAPyXQpjhR+3LmJUw~j1KFP3;&Z-*^VsHCj%-^{z z?`S1NJNJ#S&#{M9uL&B~b<2ndnE$f3cZs&wTM)QDIUkS`r>z)Fcm^UG`(SB(l?{Mb z+PQ5&d%K-4rL%Y7Zx*ZoMVz(VE`;wD)5tLe;jMYjY>bsd1x%DDG1E|k(PI@-2sKsM+z5Z_gvQEoYz^er7v4yI>} z@@^z}=94%=1ztI=ba_GQ*~zwRhxYuQF)#M!8HxjGi!y$`V;c$<41yZ-yK#S*>M)s<4(uzoH@!YD(Rf7ZH0fn+#`Y13P=2J!AN6@?mj~v>LMY$4|bz%p_5jf#*UF17eiK+J%&*=BAR# z;hB>7@UHkW5yLi>9dDDMEo@ctCo6a#mA z4*qjcAQhi-WQ4L^)cbVa=>mdyL{0t0U;LyUnsCl+$aH|FTT4A3svF=t>^k<_ zC!PljucbB=v+{np)d-!&WvM)VkeIL6`O{QhUuMwVmIo81pit$Zf>|Y@+3+v2%!Rj2 zTWhKXDaH zxI?Dave=b^MrfXxBApG@!=Uz8W)h|Y#_i;|Fz!0P=VbqX2mj$H818uY86JW=%=bJWo%)Hb zqFtF!k5ua8wg*fuHV0cHqA#s;;95q$qCo_$rwKIX-=Qj(yXsc zqWY-j`e=;a=sVHVrE65K=(^(MKI0$Qa1s=V7EL12qn8ula>ZgS=4@5hG1g*2w{D!d zpH-QIoLaHBc<;(S&k8ZUc#iLd66p424#Z4G3W;Z(4sp71xnISx!gCHmRy< zLb|cD&#|B;iYgF_aNG{*In$hy#&+x+Z;^dIEjU#DAv%QM>#R`}?WnrfKf>NnC*!`t znXid}@eETAV=YRu2(Co-_^~K-1UtuCI?0^ZR*OH`&J2-4;_?w!JSr7!%5T+PHW5WL zCfvX%yc|l?;%kx=aZF8Dhx@Uy5zt?zUY@^nM)I{W59Jfq+uxxGDwXZ^y;lU)T6xx{ zBy@OYN9KWb8_h}6h%P$qh?7_6K2bqm*LgQO$MSKgo>P1CPsm8abP_sC&DvhyiKh1Y z&L8+__*36VDzVh_*gcRu!{kJWamJRmsf*kdy>RiAPy15o%P5xR*h~DvUllD#C?3bR z*YN^fL{?v)qk9X6h50^Hy*WSOCboOhH?g+p&Gl{}1FvV!igeU!c=`Q}oF6*NALGtO zz8E_Z`6;)oXYTLuG`C=YoC&MXE`nX5jk3X>K+OQhA6>Ez>k8ih3)}co%RH?unDe-1 z#O!Ywds9QF0<4e5K!e5No)ceKyw@4qQ;Ub5vJwFibcSis?+_}|h;a9g%!T?7PAyeKoS>aDQU5H#iKg6n`8%0j~B40A#4a0?ph;iSCjM$-A%RWn; z*ff0Bt?tjss(R??JD+@&13naM_8=SBk0l?hgj)bd0*yl+|B)-@u1Wj zb~dt_!P~z8UqyExD;5H9sdu7GgQ#?7LG%1U$Ildzy;*l9^$$q6SUwP5r zN_0p5nUQ-vRfOXQW_rT(&7lY9c^Cdl+r^ zDASPE=jIW=WZRNw_mSl^iOsG$?-V53{c;~CiOWtC%-u7xO+xjG|4rqp9n$*!YJ+J zP~DOsfSY3zyacs&;vBHc%yX%sT3_ZOxf-gck60Lg5u2;R@fV-~D&6riTlvdWF;4aD z!FnOuG0VAv^mwM{IYo$+a`tm-rFj#Xe+4*41pK6sLUSjpWa%j?2qF z)D#m79ZAB~>{1RZzWm5g3Q#`Q!#KTh4iF7G%L6C@Jti##z-%6{l;v|+wbG`T^)Uh; z%wt7bS_NhDb^4Vqs-i9Ws_)CbleSeG)~j-m9>Q33MFw-(tjyEa>9sS8KFkcdgaG+9 zm}Cj{eb}L6@CfC1EIiW?(&BZn8Hz6)83HgyS3>W+GZYB^G|F~g@sWdtfOIq)w7NRi zzE4g1R+S$ojlJTvhz`-rwkmyK6aYK*Wokc_q`Vg4)boK_o`^zJVu)vEhZ+##SpmI2 zZ;7)duaGghVDPPhxEkBq5HB*NSGEuYJ(Sc-p1f7;9TfcwKt$z5+JvD*&Wt8wG{Djo zcCs>hhtz$!MAq2nY%yAfJO=JuiWKp5qurtY5OvUekhY#4RIwhrp;4yImNRO1d7#2< zvcWK1Y8e-1m2p|G#OLVUX)G0y9w+HrL$UvGkqu88s{;Y&!*P|I%aMXE2@c@^E9sE$ z;yrRWgr9b4cEnzoP4mK$NMwHsP&V39UtLr-h67y}U0HeTHK*jbZDPM!{0}z13wsq# z)HW-WY3w+};*WhVaT88?s(PZ77sjau?&wC1P9`@bL$1A1CImT?V8L3n?U7mc5o7sjutTB+O7uD z>t@OTg+a^<^f}yxRD+Jnr@606ss5W@iWfK|M`nic^VSeV(k7`HLFs=PhKFJ;B`-q&zj#Tfh0(*8OF+D9gx6!diSbeOL>KC*0fva}1EIju*-j)6Z0oyIFFBM#8gY zFY~DKCUXbI-zh&5f@zooBOL-P1n1SXDc=gv<14WDooQ}b-suBbU>+0Y8iTpwxB*r& z+i86mdy?S}OOhwP(~>u00uks^B$4YKe*qZZ?I?3(yenny)gLc@+U(@2mUq)>PwuQ;G?T<|*m+e1wr z(aOcr-QeO_^)q~D)c&I&NwD&DPtGCo2$dC<9gkgfutO0@*G@^wegn93WFgoRQjo`0 zwL4l0Z=v{|m+S7-S$^=}!G8h@lJv-dL(yMAB8pIFUi+SjxaW~+Ai*p&Wc?B!8f}`n zX4KHs2}`+vXX3szTVFkF^OW*KZ{^@kKjz^JcDe}#Knhf}!;%W_T57^R>P&ODPSFA* zbmI?=1?I?Wkx7qDY@NBuYiBrT@o9_H1LzEE?>N*Ovr()Mc|e)?d`n}#*k)2V7-H-D znm1dxF)}PGc4BoRTKyr8obDQ+4+oZ5hPgBP;%1&1sIqip9oh=&Fj-vk>W3{Ptx_ff z`cM&zWb`}f0^z;C0F}3#P=m|f2acDRNGY*ec^H5j^vqI(u5lU{pbyqOq?UIM#=-Gx z$jAP~>8V>IeLRJ!RMPfjlC`(n#Itx@WNI_dy306!)08Yh>H;V0LtYJIo)hm5T{Rzm zXJ2)<@=FmJ(T~B^!jwUV<5W8^X>u$NwAp79=4`Fw(l9kc=BZA6bTR^EzrO${*SN<$ z%zqR+9jEemE!SfO9`VbtGPu$m$Z}9}$M8W$P&{?3K=u^W126-0)`j3rY;J6NR)_)R zjY#kDU}%W~H2wp_Lah@S?LHKGg}Yswh!FTl^A-_IjZTzRwUmwKdK;QN--Lqr1;HOa z$#;x#GN+K%v{QX&_4wqb9_fe;J(%kMT@ZEioZ#dY^};vFs+Ay}#>Wb)KkEPQv&;SL zpGQ2IP%{q<`FHe^&p)k{kL*PbYQH2;#K#qURud8GJv&|KKAUNeCF`vYYLN)_pns+B zrsF#on%--T)?N!%AE82=D-}F#4I|&p`rC-f%Vqa4WP=S7W*GF z{eNQCI%Y}BoR`X0{&14Risr1`apei|=%R{(7mP#}f+z6+njtbHrJ8N3_$;Ro3a=3F zrlVy#LPjNLp&HV6wH^#<+0`E~aCoPs*a|6i`*c*)4%v6Z2Ad)nYqCjwC*RS)3OtFC3is;6$Mwe^vsqm{0mxGLVq zRd3&unX((gHd_+oyjMxE95NlY>RRhtSH6t$C_ZunkbBnN_59xIdlqA;K2FEqe2TzDJTpBP%hW6Lsg38s?$BUbEuG6R=* zefh|-CSP;-I^d)Fx93`VgOhGNR2I$2*ds1<5Ptok(9kh%N_#w@DW1%GJ#8aXmX6ID zN9Y#`a*W_r^BMH3n)02F0UG3jvt~J=(V^00IS%G|5wwJg6#|Mkie!B>`Fmpi&Hd@Xefv$Hzn63;(e;j2PeYhJFk&%<2gF!O=iKre>VmMK!Zw zP)2VBkZDm>ExS&cgV>}sH?gfow$M-5V0;-tT|7}PBMT-=#1}NRA}=45Y7cq!RRGRK z^yCjNHTJ~;j5FAREu|)S%!O}CfNZ|6U~e5r%ef3?XjgW@J@OsoYq=h4B+_o$H%HMH zq7h(r$cS;=-$2q0Aq8w+$x9z-@o0?^`#sDQt9_)veFS;qyg53g^Ow-Vup@W z`?Wv+1Vr)jw>9Zj|MA*K3#tv*&j8>RCK$isGvsRf!8f_iP~Xwj4fYcAKeKK|RA(Vd zQ9To~s<7$ru*h?KrvC zPl|Dh?f9TApi&aNV{9fsGu1Jfyi_(}LBWXKR3=C9v5GCEpY-jv4;p$qPvvkrs(6yI zof{!?R)KJ4JHsRydA#{v)Ts$>&;jLYjCn=fE|nrsKL*w#0)VG!UNBLqs@|pg$iP?moxh?qF3U5 zG}x)$ox)Jz2;Pb% zf1OVd{qdWW>D-A>v=A_!=#=s&4G_g>mZCBFqZP`r_oc)yEMOPN6wQ|t{1b+*qI&_g z|0xJ4!UOWz_xVShf|Y}5Szi`~CO&Ic^BjtE4lEc~BIU+Qr6_vgZ;z<(J|#2qygD zi(_m|IG`$-<5`&CQ+b~&HX6(fTtpDW;&pRU&4`6ll}t8=ujKyor{%NdqYJu%H?TcQ&nl8wNmDVvVV7&YtMM-7_hot3VKAt(MW^9@p zzl$!Xq;HSVgMC$LGFhLQ=Zgb*4pkn~DM?{<1>&MOz|U<>^)uEXXfm+4Bk;$B5RfRd z%+j3^M22PRZf|TE+u{f{=_`@{!w<228_M(>+Qu4v#HiT$HHvPDqoYE!cM(J4Yh|<{ zJv251r`Q_>ttEZP1MN@pqS}`AzNIWfGyY<^txLX?xDA8Xhp-r4fI~mY;3SVHUK5DW z`aqT&E9Z)3RwK$|Aw;pE=}GyELMuSD;e1{cyCI?vKR*{Gu_|KxCMZ|uE==u`TMefJ zq!@rgbYn$uC7;&dExo4V1if04jK&&Bv_nE0ySytWPFpnxuFkv18lNR^!Y)4A5+k8) z2rpKBVO#Y=2fZ?}z7@%uRV@V*79YETvdXILa2#@ew)j@>pyoz+SQQ@(>aAagFxl?u(+%iy=n2$U5crXTqW4maqbMS_NGj z%5MY~Co*VFwT&LQypQVVAllCW@Ne{${VcG5eIQ9hs4B^Mb}(oPJ>JVl6>a2Wuv77! zGC(;dhMSFJv@6Fo40pjAIYe%ip@*^~2&JdM>80=ZYJ$skHbH$SSObxkZcpOS+U!HO z(v&6D+f!{ebG4u=p44wYUII|FwG*s~H+&t1gkzjb4gDazJLv zQP%A2VNZVih7R2J2y+BgHf`zfcvG9|loY;KOvdm0ma|3W#T>>gL<|fv7F`&y1?yW* z$|hS35cwQ#3ytf((Aklvk~c1WTNg6ml-^9%G>@iLtl@3sv#u=jFi`hm) zTN^tdfB;)TpbIcUD_*;q{7YM5(p_e3t(CilU_vpE}ANq0XJG1gt_5}7K8)qjhY+X+d>bx&saLq02iMT<@#VlAcVTOR&u3 z#rEt;XJlJ~ZyjUrabP>}Kr*E^6(dH(<%~cizBx_7RH(K5ved#u6@+sJeM{ceVhdE4 z>tTUBZVL1jAz}0n22xXbmwxKQ1 zbXK-2Gb(hDkrtjhHmiKE+Sh3@wHO^gSfNRftTQ02B$akb8)2%UT?;* zz%WBq)6uw%ZkMG{MJov5=U^32P3c#!O+1ze+8d+TNW?}{KlWjUymu2B9<0utZUd1T{El*&m`n6jTeDX+L?o>5Hg2-{HO^^pR=T?;1no~RnaF)=lxSApcnhXjMW9t-Kd~8e0GQQ*^zd|<8Z(ZBVDp0}T zm#oLOWuMcJt)%8@F#HacW1Fl~Zw2Bh!HDVdWP=-zbuC5M)+kJ6b*vMl_@A}u;dF4cgD^wS zg2{^@T~om(%V_j?-@M6CAzCg;|4srJ=ZA4%syH%?x>nZD#X*>yyj4#JQloj`z1Q$) zilJmxDqeTX&edw9xMfWfS+iWPxa5uK|HV6=^OywzrDg=2$FN#5L;>Ta!D%G(JtaA8d3tnv$gi zHk{*IXLnJ-*tLB4Y}?u)#|WS@T84ZF8T&xau6Ft~_aFTKJ1fkAqIfRKG6$J43D;N* zFhlR{n;5CofTbdKta+zl)eLiG&{R{=ui3dbu=mNJyQ0-Q2}U^@qjlMWUX4DMlJ(3N zE^za;Yw<;5bPjv=TmJ0N6b+y>psM#w-!1T$%7brtjZs?_Pk8c2I zi`CFWnSJf0EI-ac_f?j{!63)#G=fxT$-eEHfYQDu9{W{ApRQWw@PqEIk zouiJucVW$^V{K1Gc8@j+^`4$Ha4cC|TBN=Er<%+BzgG)zndbhxT&>OPgjrar<1355 z3m7cVzV*|uGj0tT=ii{1`ik@K+{+)^*Nc9cPi?qR89L3(>$1)bf4duE22*-aJaoGm z_kQl;dw|qC@13x+%jl@|VR6t(>dwO4ElZxRy^M7Ik#pnb=>}1mCOAXe0 zClo2#r~StLGUoU@C2jLuo{ygHfel$emqUj)`P$I2M%P|VSRLr~>U)fab z1f5W?yvfJ0B_$P;;S`@A(t4-?e|4PW0f9;5GL{=vyaP13eai=#e!2?s@A18ks$?#B zdGuM!(e1P6OwA`fc-`980{7g=j1NgiO0@D|c8WH0uQzdOB5JGXu5>svdfJmkoPWro zX*q}rYpK%Lr%g;6_}r);46|SX(?|v1r!{&h#vELPa$@XB<*G-?tzEfHHVAe1Y&p!o zXggTWGk5c|Dl3Q4)(eKMcGXyc7Ue=dn1vvVU!P^%~DX}R6 zZ^i<+0TzEF_PlS2+hC2Vg*|I9nS48{^t(TmdCo7%PyY;UlfzGzSiGd{v9^v^c1p3= zk-@;=CyUoJ|L}I-UTzLYs27zluRm?px^VbSq4LwL?XvEXUqKJ&I~1l^ZiaqrvSohV zJ|xS5r2+wXr61{D$$g7i1w+M$`(LaQH#6I+k7QDqmRS3m3dU&pKC}g+x?18V#Dz6P z1ujq7*~s5TZl-Ywd)qTjsD5-xDV-SlMZq+omL2wt6N+;gaLr1*S=#TN}Rb}aBm^f3Eig%ySq61&EM^KG33=U(}4ZcCpDw^{{GSj z_ebts^u774-j@;Hw{#5aMg6cE35qcx5A^Z(NT>d+yeDr5(r39n#aA@!!78HKx#Ubd zJ8?IcOK;x)&c+BmtO3AKSqgSJj0>NAQAOt>zUA$Y6OP(`+ivyC2^(Y-+G`WxL_uc! zpo(E-i8Iucfl(0PLG8GajCQ8aPsspOG``B*mgWqQT^+*a#KzpgC%2ihWsWjO4)fpU zi3$aV6LQK(wlsvOdDdd3DoVL?(5DP`c!iMTR&~`5{}|8t8aBBuSRi?^edZo*U0wEM4r3uwQz{RM#hL&}G$ae0a87tYJRDpdPo zrkY4|ZXj=U24=+-0G#?K#%5z8re?simR4TFSXu#vLZ9Rqtg}PXc99DP0&hMA8#&YB ze^ZHz246Js{*!bwdfhu!pY~MmpBfsfoF(I@LbcMU=hWG1*xfB=tCVBn%H2yM z8?aE!?1mRNP{&?O+j=^qe|qctj8lPQ_hCEcsCwc^?WqYf_VJ_shZnO?Pn#BK&V7iT zCyagK%|hd%Wd7+tKc|~VD2ct+Y@tTfrs5Of&Pl=RjGh!ick^Z(_ND6ZfeeSD#j*pa z&D6hv>twT{f=b|J z^R?*u{A|qzBSu;r;V^J1zx*4g~iPCrK6UOuspSvzEvpimahUo{Zg*ic`ppd|a^41l<7CuyMw&MI18T(xHd?_*7B0~r ztD^(Kc$Ox#F>m*N@jq|$`xx0$@hB#a;zOJ-bF~#fFH&_Nbi{Fc{`fx*fBcg^;$Je( z1Lgt8E`t$BfkwjMaIp02NuLY(FP^%$)Vy&P zZ@Z@Z&SvOAf0W3JyN_=tfzF$o{^5<#uSgS8OKt<9XK}?%0!oRax!e_?4CQ={kRswI zu>bYC6l@8Ck3L8Sy4G&;xEXFA&LNNEp7O`M-yX+aGnrNY7VRlZN?EHGzTTVsaHy%aC$&7vt!6&dwf9Aq$?MC7AAq&d}c-O&L*~oa8;l10JF)ev^ODe^see)|*P_ zBT`~bx0QEan%e&ZQ%_0XyU<$Wgk-Em&+Ks}1F9@^8-9ix+P){KOTe;lW2S!*d7|(uMtF=S^tuZ9)fnFEWL^2;jM5KDRcmg$q)AI1m|MnRgFJ*0ca&#BtSs* z@$T}rARvQNm&}PNJiPCVLW%eIY1>Kmvl~CNFB;$kwerpS!i#{VSO)KjPB3p68ESXY zS5!Q72^^VY)&n|f{v4EaPTDMp>itmR$M2KMR+%Nb~;Sv{FRluy))B*I1<7=-cgOc8o{U-2P{DYB=?fGdW z3{hz&#-hsm(u4m=86G!+wRdnpt<1#3ThWjH7r^%~z+v1EbfTzgjZRBw8I>ID3x3nq zdma)lky>#ka%FaI0vb62zhmjUf-L5K)E$FDn=ShlZAJgwi19SiSAfqon0>)Tm+z-^ z0{3MkX$X8Xu}PyJsSs+z@`>%L!96)zOEf!G3Sy>pG;I~%i5*kSgG9PfQR~``sUB`}_J#Oa9IE2iep=ihVk?U{usf7kPy*@a4gp zXH9tKmn+V@Id523q51bxHcmI`dq26!%(hz7D2e7LCFwrxFER~7hs$I0cwlfJK^6?t2j*ASjZ|Wd68I$rQqUUYx((}m%p;7Vae6@#&yQQiF)d#hJ4o& z;?qZi&cNs4GnxkogQO-!?k6_w%3^m@{xR&UV372I^>?Q_-#)5v0&F&-(rmv6jMlaeVs1za&VeXs9L{Qi=PT5jIBz>y*Bnfo<{h%){f@Nm=yyO|6sF2u043 zWluN@&BzI*b-m@^Lz-?P4*D^upuIO|&2D`)M(*dA*R{O9Q|8n7`&}$W#(=;dAE%BEsm0zDQygPRsBq2O}Qc?xE%v@ei zkN!D(lB&20to?A23$M-RkxY@&D=J93eNKJ$N^<>MQ0K~D0Pm*9Lz_QS!}tGd`2hbd zi=mZi4M2y26A?zPKBni+(yQC`?>{=fY67VO3vkt~d?bB6_MJ6Eu74QZjoGydrr{_=-+E2lk4+;!-gGq2Y=H}M3lxL zC+>_-Qt;Mw#rlu6I}k_c%D01uFip`vcGS(?Z$8KWpb!&*ePt9KyOu;;{Vt+Avd-w9 zr0xn(Spatv7UWGPW;~hJwxtpWJwNCf{v7wu2H8|t#!|<2c)l>-* z@G6ysKCO-iD0SxuL+H<>_l_eJFEa;wVbZC)HyrsKVvxv>+`I~Qo=m7^vc0XAxeilo z6tk`{*YUhO?9J!o5@$RsSBhq}hjydquB0!zhk^qY?z6VIRPxjSa~n4 z9zziqs1@<5|9|*4SgY?se?6*tR5MQfyf65E`~1t<>-O~@9-SMT-k&}nzIpqr^P#XHx0?9nDkZ|lH=fj^Tm_j$y(Ld<9IojZA(Ky{imN25$D&7v5waR%Z|zFpeyh)Olj=>qxVB zLforO%1AsKM|$9|rsT|H45C94cRaw1C!(ak{9g)`-(%T*xXUlr(qQaJ>)M?4v`%I( z^}UEA?m@wV(dVhJ88rX!V-FN(Ueccx?#-+Z3fvx3VTGT5iT5F>EX z>MOJ4p;ecAJobO^_7rSYEzusjyTe0CcXxMpN+S(=5JgJ5Q>42=LOP|p8$m+4Q6wb< ze4FsTd*2UuALsj+GqKj1*>h&E9`n-Dr)v=a2!QmbEbp)3oeV}d9tdc)vATo(FsJyE z_3&%t*0?Sd=`bCrG`2kUAo}jsI_%uQfGJNT^Qy{zDaezV_vC?G5I#s*9ddHm`#hYJ=IVzwIZD7 z>`V;O$6Sn>We5PlgZii1F-#53-ur)Z3e%Bv)C}7yfg(|Yib*@Cwz5BIfB_wjPKCb4 z9!q+AG;%=jcc=Y+^7d%+r@$o|1s6k3I+Qx!Q6>Y4x|e3rf}&t>ylZ;6q>DAdFU3Vt zhT)GZ?j3xva8R4~FSx+ii5U%n#)21b7y!MBASKCF8nXPcv83Sa`ixo)OkdN^=uvLF z>->AB=Y}R7_Bf8SQmr(>2xWTil4hok@13U#9~nRI38w8_Z`zW3A!W%3kyc z{O_*P`b0^Ucd8f3M{8Tyz4WuYr@}0#kjZ`GWjTqYaXs$LPfZozmRtby#~%pPLRO zJ{z7_WBjUQ;Y!5T_@PQItIC$RL;7X-gbk$mdIMq5Q2_8wCVBBRYWOiLkeq^;5M0a- z7Fs(Mc0qH}mH}MhIf6$|NrP^uG4FfDt*hL506`I96jT?PUhiZJ6)6}Xchv?5j>J+7 zq*0S=0oJ@Hb_n~!8v7XPKNA7S1_n~w^H7=S!D(0VhK@YcERZ$Ui8}clX>d6QOVEqs zY!Nu@Sh&ai)-o~wHg5=VPGmZr7aDPAPhxcScn$w-7n1BA#%BB$vXbceq~IGhZP_P= zL>slKfZnnXNn3B4X};bJTT47&))G?Ml06`GD7CY32A1(Ju7=Pj$Xw>mx96`W(s|xG zdY=!*e;y(lxvoyVwB|h?eYqJ#2<7|07T+nA5}Wro&gXWSULXsIIX0_*#2CicYB>{% zb2RBR{mJmLR46+G4W}R7p$qb&os{3bwOJ4Ly)bB1ur9-JOn&ao;Tbf}402besOoJ2 z%K{m4?ay}LE|NUU$)q24uBmIHnY3>w($}R(M$C`Jd(35{Imr%0fS@8I{b!rd-+p$r z>~d{Zc?jKaBxSP~3D>`O+Bz$1ca=CNKa{Y3VOSb z)lD9d%~4DxrmOA~uZ?2Z>`_p@594LvE8KU_zErPHL;z0 z<;h&~mQ^vYkFj>K=IDp#YGPuuJscP={HLlM4|=Q8=yx!^Td%8Ew1afU1VI^J^l;PR zhsz2F$MWnM{HOFDuI4Ni-S4wzi>|4fUsD>+LtnEs$9CM5OAbwm(w^TrkyY0NmdHP# zlBnN-KIUsp;PR6g|}lTngk`Z?KPx|(b^hfmWJ zO;G$bRP_wB@XbZZibhXXE-L-E|A0bw-iTu9PkRX|?-29@I;%-0WIAqLP{!rUu#uJ1 zocR?QhDueu8O`JFOV+zbC;J1ea>wCPd}r;;d}LItlaG+UiXf6c2{#D-Vci-{=_Agh zkUXl+HJ`Mmh7An>I;bJPEJot*eg|l)bTTLD ze631e^|Qgh{-xd<{Z^{^=qZ@QN-4c)I!kPcO=^ey($^H7xF@SG?rD!4sjOeJC(-3M zY#s7iOT1@N#Kr5O*tzkFU{ZEHU7-Qn_TyZ%#@?>L-z*Yt5%%$}539bZ|I7T1Pvx6n z0h5sM;;%LMXUEPUFL@WAyjolxGs~aXG8Hzo#G5;oC+pv8jvtsq;@zC%QMtAtr=t8r zh|<*2%i(&lZ61u!mTwaq2UqL_L3%4AjQsT(m~ELTNru|Vse-!Id_=O**?^o89B39k zi5Hw0EjD^t9((M(4xG7nyy2m(8ll0bga$48@?~_$Re6AD(z_?@@-0j)lrKsfe!VK1 z%TyOiojM!#pAvHu0rE6C7uj8-D*isE7OzA2C@lf*^BW4PY$#&T6m(TItzfb}R*w_c zA=D661EJp>#RZ4y50+!$FX;woe;H#d-op^32+&EO(tC^DaT)f>amv|(g>%}~2S=yC zUCBSH%zLdF0`?C0-Y9~**H*2x;DrxAU{*s8n_K>x-bc2PVN@pBWaWnY~f|i$ULZ_eN?RN$t%kfDv;>2WfK<4<;CScNe?VOn!Rv$)bRE zEzWIJz_jp?ZQI_0hC3g=Be zIZOZkVpz&Sxwq`rfsy^L{X|ud;gCGAH5B*bshD4qx@7bTlei74pI*TN3k|F|DIq#B z@VXJ+4`kKuM8ca?F*QG^5V4pgtl#JtQPqR5BuKB1>CcV(?v4eBo?#A97#-~>U38VG zr6C>xeBVRpkF?g?Olgn=bg5Xw!-hXHs29wjiRVG_L5Z*b99tD+Q4K;}LL#ipJw#9) z1Cv>?4Gk@E$J?h)3R-z^P6x$BL9ck9tqqip@>3#>LWdm0IM=U)yl0N}Zg8a1d^v$g z-HT!{iiwH}CE&e2@B_pD-+B(~7c)@$e{BctE0QHzZ0}Yf^Wp=akjpU1jmcLR_(9cNMCm;ieF^Zo4|FVJtNhzSnOO%{GAah~=t|*+? z-$jmYffGnTdiUq3leQqVd{;VyG#NFk#aMoQ4jY|F$jT0Gy6!0nO=X0?QXrR)=;+M~ z*$+QByfUQQ1o(Bft>HHmcspQ%7&#_GPs$7d zY=}~xAHyLSpy>=I9|rvDLjoyblx6p%oA)2k?K)i%oYEiAz40x*Ne>F+;fT43de8*= zhy4MasJ+S@U@<5)?+qKaxuJPJx@gaRI9nggoJPD6-~{{f$4mFno%vi6NJj2)Mu6tP z-*evYR09r1!Z}Ys;q}m$1m-82>9bRNn!-kwX1Pba`7ck_-Qjy!toPtyUvPx-dXyQhNM(^77^v*zZEML{4FM@GhY6E569XUD^U2_q-}o-H%_fV% z^so4&Mlvaw5;=!{J||smq3n4E0#=3wca%zi>7`br$i9r#&o35&_sSr)w31`j^A>8!;+-+s*uE@l&)MGJ zz1_3a*Wh=jCOd$fd@LGFK)JqlJB7u*6MZ|1nIlpgQCkp!tXOSeX{m2Q6XTBIgwO+b zRgRoWqSJI14=;Ww1VJZjw#{;9odd6BHTYg9TT&g<~_Xg50RREqqFe?WS3H{}?c-9-Djq z0f}P#$8=rsTxBJ_L(uOn>;cjuP6I9E|Mlw=*vT)+4-(Bjdkupdc7|wN+X6SuU~1iq zxhAV0esZ#7rQfPu`XciC@`O?BMJoCY?SsaVwpU5&xMbp`x!w3zp=%4yy`Ht~Abbb92<(vg0Bv%t{s56wMbv_XlpC@LS2k4k$EP8g)7eHe< zFXjSGla}QHEMXlW>q(@LknI`JxFY91ufw-W0h~VA??##WKx+s`Z^Tsx7oOqOpb3=o z$O6z@?J}|ehJ&7L%Q{y!>A&t$cB`VHk945>-Ed10rwn7$k7|mY!4D*OUtXVtYRZKZ zQW1yL!%e&!mOs{sa7y~I9%Fk>n7sVa%2*s(p12Vt%-;!_1mR+Do&+t2>VEAy?kU9I zHbuO6U+_dgr;5+7LqS|sNtMi9)pFqImzlk{1T%J-DU!0Q@rM9ZxTjecW_JRT;6(gb z%B^Qc*Og_V#$LNdhktHq_Z3~umJ&_D1>|RD5O3@I0Bgs2%ETqnNzRyr?>r1L6!*l%l&digQvpY2_|Q6Ye9%fB0N9cMMmdi-7shwm=LG`P9nqsoV8n3E z_cK7FMFsAm#E6Wt9!AL2!^{Ar(0J{P7nlx=nqmqgb2LoQ=&vAmz7iwMh<^F92~WJs!;j?R ze?XI#baMr4R0Yp|V*i*zw?rqk$1o>nJ-Ydbw^fHPxg1eRjZ*v-BB--X2hqy9y%LY`Lesggns0O{ zcKsc%U6!wSSZ#Zf)UNxRpB{ro2R)1=PEFy#rIr_lr<%u-tgNmxgrns%uhuTP%Esew ztxHx?Ee$shF8D_0akiBd$Rf;;keo!PW@4IyYnIfyf30%ofA`%ho>gO zelH3!*u<*Io`8y#LE#uCov@0enndqHHNO|Aaf>AD1h&z-ObsRP3bM2k7L-MUdyDzP zpl@8S>i&R42;71tF;|Exa`}k*LWFI(EA!wxv*<9;@9lIL5>B3Xhp}vOB7TO*opek5 z#zcMz|L!Ojj5vHJH;y<%98COlGB9pCg1<4y%@pdpM2MA+pGAD_KC}$B!oE9`94&fa z{bsucIuk1)zmMI%xMwrrT-Fj&CZakmJG~>%_SrulvTj$>3L&oKFm~G;<;MSfB)Vdu z%9v^NsXK0zg2XD<7CN~ZOw>>evkd*&!;5S6DxI|`R8hIYsX+!_F}ZQZs(Xe!r=};i zgQ7?!5ZCoQ$W~3tHYztKD#0?#RT#J1$qH6j3L$)XN%%k59}4ht&up(GC}J@S&!8_> z5d)i}_!T@UbG?!~Qk}2DX|MEOnY{(N~ zn#J3Kzn|=da`OFMlw3Xm_kR{eEDNksP8WglA#<9Jmy}lWk(dXP!0jM(=CrnZUM_^& zgqWCm;Qz35?~-WCwbRx`VwUsQ7_Ts}d@>G14!@%^_b^+#>jA1OSQqzqm;Y$BzB4V8 zpe1P>7}iHWEni*TXM3GYoo&O9Az`)oH48$PSbeV|&0$8tAnNM=>8=o!Kat_rrV75> zFC?{gqq;bSnePZjH_*SRs{7(Fc9oVzn6h(nN1?p0SsjS;(Q-wnH=leuFhp&DuP%!c zED}W7k6W42yLy~A=~pV{>+VXU{7lP&y zHpemyj&C(ID<_Gsn}I&WGEOxjg?{_oev@--fluJ6p_YPdDZA-&4f^$$utP*skR`_A zY{ZRsvV?HenX-YINT9coAP#86L#}xo>yb&u&Zz#GXOLH_76Z;t#0rv35|7AI-uNT?@gK1wQs3C^Dlb zd_^!5$L>A+kxT-=KDsFFYXRjugh!1I=xBObFoU5N>DiARa6w|-#D9IW`XV3Yt1bL5 zfmw8D1|R8&J?my5T5BVk97E!c{G7z78(ZJCEQFXAt>j1K?M!Oks8CM4ax)FGEbD>R zK@9}*@M-OQ)x}OCUVBvBtQ$CCG^(seJy;X(WQX=B%xWak*|KR77l$7(}3EdZ^ z`l}~H-!54%&NR1EJPk<;1@c*&=OaEbZH_n}p*U%y4gM$te&OCxml1?{5#jmcmCiEwLvz2kf~AzaOBaZjU#MsUHXrOgH(xh2fq5+ z8ztVZbQ@-#eT)x1$M?H{j!;?_hH@Lz=XN7|*|5AC&iBt1M;tKK*i5fHTJ35Uvp(fU z^on}oE2$8XeaP`79oDyI)(lJ|bTdhbvfNEfzci}w zBs45I!ZYAP^0C8Y3~3NDu~5ZODxK1LYkmeEr=EUTjUI2%MQV{bBbk1w9)J6!+Hfh? z-FF7XNsL*|4o*C(K*_3}nbVT@qxY2>vP;}DUTgjp(~Lk)K0DWZt^~!400frC6y-hM zk4w3jmRGiM!01TeTg~JQ2f;e0;h+|aQ}1BV1b_h5an-!{vTH*KB$n$}{CN~oDg1+S zR1-EG@BQ>p*e-UEoq!g(YtqOb2)V(D7iuq?u-PG`UhWX9SS`svOH?ca3_U|i4P7AS{c zJ=I&M&l^`h>nIYCCWxO}l_0k-A2Ikt`XGB)_Zu$P3w3uHH2T%=PL}EhNwRRSIW(i# zK%lIn@9y9iKMK3V!aBK*kEGe@AkAqvQwZ~@#ttqimjI(197S}74)`VD-dMrsi*in{ zULK2VcWt$&R{)&gk=7NRlUJtKe|8+&Aaw9y21)~T&>X4R_24%4A}T?P3X~j0xE;Qt zVWls=kbo^g5MrxZH&&a1VkiYP@_g7>nuXK=5^$d+(P@*NX*m4n*2-u;2IibCEdmAs zd`(v0BgAy30iV!FuHR3Qp3K0qqm!>~V+5%qI=BM)g$bz~Vf>Q=j2J%Te_E(CVZ-wI z2ru3_ez67B-__u}8ji9Uga@9ljTZN?@kehL0Z ztOilF=gh+1D67Nb32*;^Fsu3xMU&^=sEp(gE-uIZRY#O=O1#b}C|3pFl(Ls`lC$f@ z!!r{63gl{`Viwna%DReq3~&=9-VowVFZS;dKoy6v^%*toS7%6 z8-LUy+|-7tW<5@z+8GJ5W!GWaX#+zw@by&a+k$E^B~`~QT*`XoFK>=$f{oDVVVBRB zk{!F5&0sP}|M4+9wL#cxQHUp*Ua1{M*oLV8R%dHw!0sPVG@k0%MKDS7f0@i$dmsdE zeWL-^f=39c*H7P6F-!~Jf|R)vmS(F`4dOtHQVw;XCe~vQBGv9h%K|AYYWm+ao9kF4 z)gs`srsRE+d?ihHm?s)fz}MJ&bYEkZiD=CwW;qqa5?0R zWe~Sk-IJuyRAKLXr(% z4tBi6&LZqTupJDQfbyMxP06XzWwPXde{-7 z!F)(>o7J8X5a>&aFc4w;aruWPqm6||zDI?LoAIl&e`eBL7H$e3Do!lky-K%q6wlz( z*(mk1vYuxRrr4F168$4`V+}m}6pp=z!9uC{2MUTxabinF9R}j64#of$YPokHm^G2$ zdRZZ{cT*t=R#!*h!%h-)D`&emQp?4S{Nqr#kdy_kunKm@_K=SlnEFz5iF(!edcD;= z?^|De7mP%A!5Bdv$x^8shl&ug^;;a?-}b&Vy0bt2)rq~y2p$WhiX!NuN0;@>??JCl zV?Nf`?^Lz@BFGb({j>jI-pyH<)^bMw-4l7WJwVo?(4{&wD6c8mxfp0w+FJ#IaETE7 z_^{NDD8kv%l?T;S2(lC3>?$c0=E@<1Wgj+~dr;^;>;-%W&8~#+DB!Q2+l6WFk%Pn{ zB)!2ONy`>VLbF!Ik1Rst_2ka~KqTR}?%_+oS6<*&DX_y7vaQ0K%aN$r;SH=XcT z#vAu7&TS&AlV7&Uw;#~|ty}r3Xk|SJ&;z#^wJ}mhW-$YlLDD48^gRffk6|maj@JEu z{%eTSBI9B&@hqs3^%ALka+2@ZXETmFl7m);~z0sHGyJln>RmYul$Lp#u1z{<7iu3AWgTjN@8G*pp+ts ztgvdAYJkhXrA#Dt+64awMg@a`b#10-qIm+|jTXK_%pK@Ilg>IqGm~lnjLk2|2qAgL zT}o&jq%21kRT8UTn+;gSb3OgJPkll(D@cV=PS{G()m&ML??zCg4A_zcXz_qDYCTev zUR9DqiFsgB?cWhIKk)~I0mMmJNYO{-f3W5$q^Y)-q*4CRrK)AMY{87G>e5m$6OhGM z=fvzxDrZy+4G&)R-yLIWq|fjFmK=+Rzg|Om?|SMs;htZc%ej)8%K?(Mut6 zIlA&b!l0LY=K!h=bNTfhuIXIyB5LY$G=cnr4g2pE*MDo=9DW*fynYm~_OTKe_&LVD zCAQTSm}$Byukr>gVW~{I)uwj7MDn6ug8=`Usl8r*Ikh@WSmxQYzBGTmEs zwhIr(9m%l{v7505p*zwXrgoyB@UBe)BksxuHg=K9@m4d>6yIBIgMUPxhzDTTe;_=j z&J>QOvm|BjW7-}@g`LH}?;cVH(}V587_P&2nZ!B*5P<{Gf`Lz}6Z5H3X`V(PI8w>O zTHqxju`gk~pGD`_c2i4ETiyWB6|6cbwzVLK--38T?p0!B28%C57LgfhEz}V}{Sqkw zsj((mOza~!|CebzP+348qFNIZzbV1~BMROC28}|z`+%EfoAYq$5!F%M+_hyJco3JG z|Hp-&D~FaL@kq{e-$_f?3!BA`wD{5>Yc;D!$k^`>1X5xN(wL6iN#Twl!Q_O{tJe4p zM0LHk3VMl2u3%O5(oaTaUS!VkD4Q%Kd%WYW{#=2KP>EGxN_) z-Pt0t!0wNyRuv>5!ewt0(_H@I9XKKnv36t8gYi4@WlZK;cOt_V0=-z3v!}f+8%q zW~%+7gzy2>Y=V~~#uN5Kcl9ZfgL{#h8w4S@H>{@oh{@m<1Bk6l`el((l+jj|&0?4H5UF7po6`CjzFmnMU3r~do!tHv#c^-GoW zLZ6b(E<#NuTJ8=$s*p}^JNR|!)%ONYCFqd`h?Rg{^KdgZ>0tA_{63!DnVXHm(;+rnV`#q8qyK#M1b9Zjj%GjtOnwT=c1I-)Flc zFze+ znk6kE@)XX12)~T$C>~4Nr2hO#`8y#a_n+O|BgX%co>om2ZsC8vAWRBAO<+LMhxPz@ z@4UWC1@=qe=lamnn?p6mmR?7`GuZ3w<>o%zp{4>;+$sfZ|-xMJmh zIxS;+xmfwBcFFWLntmu%ow)Z^2$SD>h2yC1u~gI3kHsnqq>UBZax7e1#0`osS)(6S zu2tz0?y_`U>X?Y6%YE{@4q&E=lV0Sd)IzGRXEEM_Af--`S7+;5* zCq%#532tD}<}zD2rIq?W@G5NtNU%b1-$7o z_QF;?5@E0!w9m;;;QbHXT90Xl&(Ceyo&TL3-iHK~kT!#HKJuCTsM0>du;CQpX+OEK zTmUaInOb{+;ZAn87%>~PuYGj%>o4m8>vT@6xb9@h8Ai6sY^=4HWO2X2@7I%CQPzoe zHG5E7bf|whaT~B`dCZ$}^JO$PfBB{s#;g1IoMEd(ocy=jO=vPImzi%ajWM{olr&Q2 z-wi)o@$%ig3KfWrX^)q$;4(2LKqFf-Dmiwj^C5tl+5KdNqXfoYwxEik*QoS2!00Bg zx`e(cPMaBU?jg=Lm73e%z>JC%6kP+~WNw zFGQ1e(y()AHtwt%X?B}XAC9URO7aypwU9?wvDGn(N%svL2}zE030NMeQuFw|D-${# zvF~{A-I=$YnkbM#wf@}|qeNj_>btuHC8ai6pmD^AF_g$7&m@``YV3@YDQAH?JGJ`S z`6`s=Ns%`+=11Fx=60}Qodv~+AL1^ZF)x0)};n2 zYNM*iO5VCorc-n{3$?M2wrqc6X6JV(K2q(JgF=#m7PEbU$iyEl+ zliTNl>jeIlX{GCFhnYG%zdcT1La2$PiW=Cl<>Ag0}m%zYSFKHyPgelI4 zNU5dTTLPEq<0NAtNpjRVi$l4%?_U&(HCEq5We;^S3R=L!9kR!!xJu_ebFW!w)th+= zG2r};WLe>gw-bH1#f^*KoX!7YRy?mkqoGuMC$3xrZWAIlT!~Thv!|Qzr}6kMn@y0*UlD{)7hutnViqP z!?M1DI4LDZNNka83EZyHbgw@d51A)4dqz$}SE~C4{`_Y#dk#v?q?RNB-X>nsL`F4@ z;Q-?r$LD91iw3>E2k&_qGs$pI6=>{mrPw}d#A}a8Or}X1BKn_sJA<*b8#$at9m^LN zOd9LjoE^PZ7sQWm%7Dep>Fqi;wpdH?*+SUV1D@m|^6U znByv2oRJ}Ee#;iRNL;VE`Z?R>jHyvbjqN^vV7pmxslJ-eaBq!ML)2>wpW>e+45Jtf zrdc4EaGyTd8G4TfkdP9?XwPv+{7;Hl^*-1EQu(?zCob5o{04V`fK>Z)p4kI{w5o}T z-~PRn754j>a}HRMSJ@s=dPjwwWRNU)gkgYg&v6Ht)@nzut@#^ZSa1}>%FQVAwLj|R zpm-zn=syhrJQ((*k5e;GzDb)-jAG4d`*ELzeQ!{bEN%KlmhIBK90eeAgsz}}0R{%G zub0Xmuo8ELhyts!P05OIz$xUHCq%o>jsw%sxN2o(Jo0rB+9^1!*VR@M%m!22O($$M ze>246w{|z;ftZImF#HO{E4oEps}J}}15T&aU}DhnIz&yEpLavsO7R`QV^!u$@k z*#`rN%J1x%e(A@(w+RhWm)^+vSbXNG!{n$Fi#-P29(C5~B<&U|lV%D^5KwuO)@a(r z6$iV=`X2UqB}!q+>g4s7dx^l;BAdU5F-}fHs~OtWjNk^NH{1h@&Kk)j?6Y zr9*2$h(u5juhvcgUFA5Ud(Gce5R(bRMEK<6%Q6@yHj^flTgNz!I3d$oIodF87h=xV z-Bn@xbkp)%0fj0=C8g=D#GkN!Wl$`QG1vO~>| zyVf+7?RBX6==8$na92aT-`?k5NY@l7&Cq<^pTMZEM0doA6B4R*8`L<1k(@sm9l-lE zF>sx(>ZeIOsD?n3_`7_k9O%>Y83N`A4)PqniJsBr$B;-?V&7w=gWsd$L2PeMx zL12EK`$RENiGZD(N=Qf5oQSBqV;32bpd6g<6E5R;CpX)e7B_lPDVLwXS_?p6~|dnmIqTtBDqbO?Q*TP_ZI;hMQ*8;?+E!%Oj!zpbj-b8 z!1R1lb|^gx%4MEJ;JV%aC!Ggih!3=vp1I9oHT?Q<^|&p>x_TP7{$%xsWR69BayWY7 z%TSXIfngLq1YEt-KK-~LrDsVb73 za%Gng6jB({txwoB@K((1Q+;hNwe2t!AlMrPbAhwuu<2)42dv+nefcfQiMwAs&8BA5~?&u%r< zAN0x5Pcyrig#x(Tax13>nb1ujE=*I&;oS9S$HgLWOma88?Vs#4r=!X5Zfe2`GD=<` zn5Bwi54Qw?Y08YGEKk!K5mt}h412@k9eA1i$1N&7T(2$@xTHcTdkGoyd1x$GAe`;u zsL9~4O!2mWegf&>%IX{J5geT20i37|ocNK%sv_g;RV+bEBa^jZb0&IPZplEvS^K5` z_5_S1mFom#p@ck>h=&GEv7feVWq2B}VICJ~b8_aIj8xSS$zr^5IF_sExTe z4u{0=2JE%n6HJqo;FLN$dsytYdl9M&^(@+LM~3~r_|fDU`qfpWHQ|?8CYRhS-7}Vb zM%Qj0qpf&*C=(GQ4(5!%{O3_lJF+ZSy+U!`MPYQGQynj!gRc9eoakzJf70vYzf!+0 zw3^_L`vby3ieus6gP36Ohb-5B7xZi;go?-#jj;SFfgv#N)Yqvsm!Pe zPk-f~Vj_qAqq`ZN5$}-OcL^S}5t__-r#cjhXEfP{aE=p~f{p+I(T4ceLOAQvXj$fM zBqlM$))bsin2%4$O@bLPLEG>I9yGjxiErCzf;jX2@<_HA^$D*%VOs)5)0oFDyAi5W3gD z6E@Ed40l{&_qPZ4#2bzcz`4KN{*R1p?LLE??2X_u{Rae>`kx<(K)#o#b5eVXNa#_s zO<56eyMu~i*@txPR(P$e_Hi%y!F_KeL7otYT9c3)rR&O!q{~wo)6$VIUd~^F^Vp~B zR_O~d;@t-)OHR?7y_n+lpn7tM1KD69D`bYRtsKxwNljj9qE?#eA(*%HCwOEWbq+HS z6rpK!uBizGFAo5}mJ?qQTRLUW+@T&$IHGS73o zNKNSe$MW3!S?<34q`T_-8q`_C*JdOf4*fI*LRfihy8VcYiukwzxVDIofS<^9;o76c z6;N2w#j=AJD{-NGB}4A;Ra4>5cNcsB3e*DH{e}+xL3I^UG(&1?xK)>RN|Kn(Z06f) zn9%6B_nj%^BCzzSsMTu4f$KjV`h8zw$KWdJLp{NQPzu|vYDzp}qeF+zR(TA(X|#ua zbqn7d5AT*aN&VdCwV{{j>3s^Yt3h@j(kE8!NjpYRwbndRb2!hHV~E>_3aX#W=wpey zVYq})HS0MQ4ha|Zr&1a)wAe=BFx_z2L(}C>;3+Lefa(ON^O>_${p|Tx+at`7;kcx8 z*(R7bSScbLLaWG4zTkA}r-ZN+_-wvtFyqYZShFJ~R5nJWd&h?vdH#%QQedQ(%1Dz& z`wwV>ybI6YGiSbq;kA)_8|pnF>=k^V%EfvHSnr)A39BUKI8wGE&^`+Z4#aiToyt1 zdwi0jpO$GeSFpXV1GXI$>}cv??$8ov3L4hotP)<Nmg~@!#l70 z!x_`te@>^a5m}4zledl{N-Q#MnYY8n{=yNzk5|-v1z+gKlH6+D#1j0_-uCaAoA9Ej zZ+jUfTyk>gl^{btIK#_+e1*eG*Y@l<4Gr$Q^@!<((KU(~ct3(vfV-%Nv_*ppM&p>Z ziRRz=jXK`Vup55H7BS^;xbGiUu5@Etc2{ieWOrD= z=yz^b=wm(Jz#%Cxdmm5Hh zVF?Z~u>2vp1$Wb)^OQJXkhRrUBebxWh0!LAj{+&_oLoP2N9((uvs|&!hm-E`NoNzz zl*_c);Tn%;u5LrhU}pv<9U;ANLAY1Nsf!G~V^WJNm(YXJgzbV~JN1F+hARoWXNw34 zZJdR}7e=8m6m&+3o%FBy4R4A3BP_))t*q^3yv)+J$rZXsBL^+;EP2q1b-NH`KpbSv zp6NeTu_oyYTSo1-tgKWC72lGGIN3B>y(QIGlo179!`EmGBmP!)llZZy)oPqnPZ$B^>+L^33`s=_b--9YL+BEIyehN} zbH3O$^jTyquP?j^h<$v~d{oeM9E<9|BpNR%qdhE(9_kMh8dFJ+v)y50o(pMDi?Eci zrI4y`dYQDG?t9rSrqrc@}B(DyYjmgl0j+z1BfHg{ZB1K+)r4*>p-} zQ0sftUU>{Ww!lYP zEBlrHLuL~=KIlas(~_x&q_N%_i$@h5;)WG4C=G9Ob%U=qKGC9(Q^G39M$D-jR5uELNY&B;}Hop0o}t^pd%q>zg+?!hb!2@ zb+c6uers}B-Bp@Se^;%WAff}Hr(~&^vAIkI1~>GA5=TRH4|&0Jhhyo6Sj$0A@}gz91_g)w|Xn}&6I8xdQWO6cxd>IyfJJZeH9%?N146^I@9>h|%$ z;y7f7(jjX&dvW|5TQZXA5a(1O8TI29(Z$hOztlT@0{_?NHhy)i=8?4NtfFWIF|lE2 zHlFTSWyrWbBvRl$#0Jtq4X)f_y1Udey?rs4Nz#(FvTBNrp@r<%%RyKhOv9`bJdyJ} zG*HLQ|B3L_YC1s1vwr-|JxC#KecN|l1<|a3{kV;9%8yd-GZPFgogUvsM*&{r|73N1 z@P51$N1#C$GZfL@Y=33e6IQp8HvSgAm3uz|*?4F>B2+6c76<7a?yp1ic=Adt25^Cn zNzRPcD`F+E4=HLW5RW>{#7~wSCJ{cN+89AWJTnF4+w3;$7cfFRWFrFomvw1fDp{f* zy3A;9j1h~E`?e20s*rkbki2oCSHX>VQN71 z_atY=pMc>Ju7zq>vfxg^xGnrQE3`30sXF1!dj-g1GcM2Fz2`>sNEj4Pv&PYd+& z@UO@c?(#X2oJJ4pJ*h}Bk6sZk&Z^yHCxh+*70wmr;gbIM`MmvMT0C&;fF;%-9KA)W z=a!rz{4AnVmyZh$rJc?S4a)E*nN2onsB#Ft!D^{uw<)h;$S;Rq>0?SiudJ1>z4!iY ze7Y4Cyi*6bdDK0m5I8>n;T`eqPQC*hpBE$-uChYjKJEHdGq57 zsuc7Rk2*9gz=LG$TqJ`t{$nr$Zs0KGxHWxkvJ{dD*&ml>D}6Pzg`ILv=(U151EycPk?{Z=sY1bh%#f7}`UK~Bbqki7Y{L;rwdG`?l9cEprvv=Cc-e+WE z0?gqb9eN?#&cH10n-wLFh6-W+_p2@{^H%vI+43!va3dtOmAe0Yr^2L83AAvyUj$%y zjDmZICU-O2{k-q6>%blr6A3(m7ANNTro(aZta)pENs2g2M~9m|lO(aUmIat8bx+h1 zPNX624}=#!_Q;H-07M0xluCURc@#z#b3DqJ(9{q03je@mS@L7B!0_kv#JKHYV=Ex5 zWu@0u4K)AUhioBLobAfhB{>g5&`0G&oE9c$;q&{%i1fIYPov7FnT8%LCCjzZFCT+L zgIJ~Ir8|yu8aDgUS(9e_r*rXyJo8Ck)jIl;`HJOqMojtFQs6G*ud{wQ`C@B82{+eB$+=W?i&G^{Z0e2T;x|0F*mszH za|-8ueEm&(07)q-bsZy&)*o+A`u)~t$f=|{SRiRsLft$X$EFLFnpfQ$_cDY`6XXX0hBo9z=5}AEh%4X> zD9;eN;gqWwj>u`9;}77g-72@!pin2Dr?~mzi#v6#(ZCfkR|nYr?sw@@gD%OlQUt51 zJLaitJJs8Drb#gbS_TODP||E{5sZzDtV>)N(%>^1O+XZ%M2JW*o6a~eO#Kgg?-|zA z_pOU2fzW#ip-2Zq57kgadKIMC(0i4t0TDw9z4zXmH0e!5I!NyzpwbaUrHFzeo~Xb7 zKL7nZ`<(OP-g`dWXY(albImd4$YQR!));e)_q|X1QR^KvEKEq(9S@IA{DA!UX&^{8@GZo{5ISc#%10(1EFag{D%YXe8Gu%zv{o%l%Z zpac4);wL*9=v)a#vrdY*iy$EyO{WXGn(SUov26}Fqsi_kK8mfFVk~Q_Z2zPk=yBU!1I_M zsu_m{jVr>ve&XBJ_nFWDKrb1~(nkg*%;|X{br~>=fZkfwxdBGeXpGjh>IKjWZ0Id> zl8Bz=4(j1m+4^?4d~)p6&D!za_1GZ8hg8};$KWXz5#iiXyZhDK3y(QR#^T%c^>Ti$Dlgd-%!VYhZPcB7^vjB8$U}|WN zIVflnPZF4peRLPUpyf;OxdqprHTnxfohXTwTELkBI~u|K#6~!w$)QnM$K56)-v;ww z5o#y=3@{>-*yf90N@kK+&{3V*5>Q`_s9%qw7&78VU(@`YRvWzH>HY-(=JjZ&qxP6* z9V1487e1sFI%^qe$^e&t&4@LV_J39^Z{G_(JQKrwOtu}qGfkhMZSsw+rp?qwI42yu z8e)m73yb2?$tgmE;WhvZViYghx2*qg^`+?9^cA0(32QSrP9n5|qQghn3!wtEo_fF= zosz)iJ@W2cKW-R=lQXPdKW=8AUqGjxXa8b0BpZ!G1)JG;m-VhKq(onUmwjxO(*;dT z;z$W(FnE+hASt=Y%wBjc=MScHeHK?yr)F_ztDOI4Q@EiIp<~_5Xtt=v9CVK>jYCHO zR(F8M+gvnQPxwG8JfOe8v(LW9nKzHBO+1&EozZqx16g^e%9@E%!I5K1VF%dIbTdCT zX@rT&gvPceoQ$a6jUW8et4JAUHe64A`9>GOIzuNX6i0#kmdy((?5w*pyaKldZjsWR z?3w+@4~fB{G-%YC1pQ6cf_honsF(eb}{t2!KQd0nCZ`^%^W`)g1XHb%y(3YUeL>byb zd6nx21hl&NI}(^u_c?y>Vujt(qAT7v6g&Za6Idr@XozUAEFxl00CeSM$RYidB9;d* zZ_Q*3%*yF>N%%d(bbNrsP>iZLyNP}QH=W7;KHY|0nc!F20N6c>u@@*N*YRshg-Eg)$b)PmjClwV0a1kKN;8IwD7mTxY27!s0&sDsaucAfLg zdYg#w?~}SnV5p4vg-Mj|!BFIb7&x|(MymAD4GAAS6r z4yl1;_nW-Ry?+o0uxahTk;$hwxEUzcYPN4_@Kmq7jn*`(K8hHbwUN0D502Cs!M7!V6ot~@(lo6=ov zX;ep93P)K@hIlddK_sruO>8-xsZeRL@(}4EB}7N6{LQX5Bs~I1&rm6u&iB`v z_O9aP!4)i4+8pL_5|ea_US`E<6V;3Q4}XYXW~^sYHV{>ArL9avE%JD)^bz5dNGg;2 zZ#d4>`LIg@>hW9ww@omKxwXT@D(gTUJk<+Dv`VUV8xJv2--*j3WJ7j(Ze|(KNzyk?0 z8DitBBa7c4^egAFg?~B6rd-j>OS`548XK$l;xAFW`>y+Xm@hOv+<{$T`5bT_gStz> zXuc)Jl4hK@4}Jj_K4KA;*02ak?bk&Ky~+%XeF?!FV8u1f_!PODan$LMydiZ6)%d>j zlf!>TsEPqq<(WX1P^hE~_+H%8YKcW{*Uu-%T?##N5sAd9#43sa9sG!oo8@2n;K2KT z?<*Vd^?TCbUx2^A>-$xLDhtW~w6^m8Po)S5QNj7O?(+xtS^V-}p8wl;7(O`N_Fpv* zK3b1oc=#(02P#?{Aa-br8clwFw)eGaJolq@Cbm}(O+<#j@sn0?X4 z{_9lZ?vec^X%}mi{mIxj0{4FYL2&qIwF;79>6d#eY$q>j(dojkv~iaNSv}0n{3A$~ z*u>pdkkoWVKZfh}iTFNcBy*B>J{eyzlcWv#qkCsG?|ZwTzW09toc`Rle<@y9TEm1k z-AQ|y1r(|GZirX`boz>o&IagpynAtPgCL9D!94Z@1d%tjGRn_u7!m|(IC+o+@Hh^g z+F_lYGc6{zgv&c$|BWIRi}`~W_j2e@XyPwLuJqSNMfBOrzpmati}18JD-jllPqk>E z%8O94FGHhYwX3J|#YV!=m3MTj}>$ z`~CO)+4KKy{_kt@|6oFZ`J$?J^|FY3##9>seeBG1b+-MMz!-= zD92z1!Qn+Be8KvzSXs-$2fQ6;X2_iSAcA{@VKjY%BF<2@05M!m)u{@!IgDS2B?3i$ z9&^)>Bu8-}TO)!%{3WV4qwKes_j!2dl zXX=PiLp2lDeBtU^4CA#$p*@6C%!dYq2y$kf$;TyCWNMR%1Y0Ajl@@f2#K9Tx^5NCt zU5n>hQ4DcZHE)@ul#T8R=1i-ro4w_8Q(wM@i>vhuHS>b2})P{v`i#HfETuplfUh7LPX+@?ysc@z9XMyh>R`a$Q)mkF6m{~X@Huh4tWW2%+@RT{HX zpzk9Ca&N6ZDSJl}0<-${07j9}PdUXmm95Bq#LXq;b4jI6GCoBV2U69MnrBfVs*{N} z6`ojsNZe=zzYlRc5hm6UttJzh?&9KPKP$KU1yDZ}edz|6y%MXu9qrZT1TmXL9w);NA%)%gGP~9%R+eVk`h2f6f7p#4OHJKnV%TxMSu#gCqQ8VcfY49n zSngw`Q4+9=l^$kBH`IA9%fk7^i{%-aJ`?(5G5d&M?X0&eKR?Vl`Ihyu;L0onGm!Ry z2F;Pbad}LvR%}v^+$LpvP$YpLDw0|LkcUis%C`M_(ji+(mvT@@%KPn~#$pH}4li7Z(|tY<#A2TS}Rxg!o7b1ayevi3>Wt>I+enR z0*)$-FnP-@ZoCIfzW`xXQ$K3yovc>u3llhRi+v4K=kYx7mHED{j}OO!q!AK--lS zP{-ccAyN_J zl+c`BeW_SGv~zSR29?%Le%bobGe#Co%1hCaAz9%bB$y)Rkv+c=;qDNYl_a|2N@f{c zO8cOrFOxj+L>2jD;d|UsS6}iq>_g)BLf!mj6DIP%&9$Gf`7IP5=cgG2{387GG?KQ4 zQi3jlSWd`ZaiX*cI;W^{S-Xv!FMeFsyM-YY7p8v!kg7{}usE+Ww|@bkmB-(11p9oB z@!8CCD=qn85OS{X zPJ|F>R#*avalq)CV9FiJP4s9;F&f~==uy-0lETJ6J4k8!2Zs5Sux+mn$>CBZ z?-$GD7g0cZNzsZ@HYLpV2xLMk#@%L7xtg(6_R~x@t%+s0+mo|zuwl2vd@DYOWZhWp zt%qD~YK;8X6P-K~J_ujDjn)Sy3R%2H&f_@tFR*Pt~Tp{6h8f{;krredZzmC^KV%w|Rtyouz3wz>4uP zW-+?D!B2Q~P#=6k4Gfe^Okf1zGMR9g^)Fy-K)PAh$W)agzQn;cV3}@BjR-t~&{z<*|^4e;wjDpNShicLqO6UG2Wp z2NN9QsJ6+LPz>4_I7SpeE+*vQeA7%^PMn<|Znjyz6?-M=B1SqA8?f4&1(FcK78Tam z0kwiCJteQ2t}GlhH3f}p75Z-5#H`&ts@mr#r{WMd6&b_zcp;Er`>ckRtYQE-xEa?r zHqieR_m!q6J0&T&tXMQ+cz(K8kq&I+(Ldfcj!ZN-%?+q_Y< z!YQf}VpP6BS2S+euhV`WWR0j}qv6nuZO(|}KXEj8BjEo?D&paWyFp5?PlH~I?L*gq zID+;_S>dsVp*eE6FOl50+4~?SXh$ zBCEksm@xQ#$oHroGJY+|O_8jv=>>pMTb>DuJtbP(qt!fVx_|w!Yg;NJC7;C1Fub9^ zluJ8?a+Hh-8cOq;6h;!a)psRe4zjQ`W+YE{(aET9db$0FW$=6A@1Wy@7q%}W!c|+< zSY4_=CID^}vau_LtxLudwFU{FxL&P=c8|Wk$wkk?;vGYVI(YAO{tK}DZ{iPc+ugYH zpCSaZ4Oag-AZxdL5veM`Dq!UtsRO;?;{GlF`8Vl@pB{W(cMqmLId$)oEMOf2Oxoys z@V$h)Hw*m?{+(%my2T9YXlH-z03!)AcwVb^qwr>8(-=eXYpi+D6Kp#h2zA4NPsO6ar#z3byGmSt_8B8h zp6!xUtd__aTHBx+#CT$EtuOa8peP~lVv&HYI$*V&#sJ{15N<%aB>kbMdf<$(daJr( z^{RM$nj+pd2yo;^y*2{)DE>?XnqG6^bsTD&OBsAzjH3_>epZOcVy2N;$HDXXu9##< z1ei1}sC^8lbxn{n&|Z2-CC9z43m;?JeQpckD3KE{eMK7k?`u2>%#FpYUQ;nELMSy@X0(saanIY`T@u??j~l-HyhsUR$;50nQ9yDZ6nc2e}uG zAZ~X|KU>BZbhPcl=PuUe7w;PS6P)`M{fLEe0A9x3t~jdjdNZV?3c@D;gW_}J{_m(~g z<%nDCE5B#)Ly^xhU7OG3v^wgJJT;7WgJ|f5!x^yRIoW;6=jDSjcRh)FYsI|J!}YYh zWBKU#qeHi}>2##hHF<(GnxWtGilgUd&p!Y$OBfpBdf}$3{{~f=1p~zpo)_K=5s)7GBS;z@Xrm^THx0i z>9=lS3 zGKr%G%RM};$Ywox6-4I-4k?T&{DATs&O`Wk<5{?0qNkQ=Td7xyM~}-rt6NF4G6XqU z_&ck2cbIgFm0)M=?;hM3?1+ZzrC;pBGmC<%XrFqn==KvwEp~hQZQ?jmI+!%njV{g) zBN1i30B0P#`ldXHFS<=1A`fHs3Uzi~eWWtbnpU34jZ16CKbfUX1QL~xiSDtla$l?> zb}H(P3O5EIG^$Zs<~im?`w+LMw3^P&#?J_??O_?Nqb8lSLK(2o;k^UxX6CnaF_a?u zWW;dvy~+C_9>@K%{iNjwHKDaYGnxf!)|s$@+v&Z8S+zkfu`;)Mc|nVs2;qtT2ObS+ zNl))@TKB%qlu;EH(aK&g^w+p7+1@p^6oT+hhf8l3A{2NpB`9H@Gs6i}3A`>mgZagQ zTQa5btQeYnfLK%g(WS;@HG#}9WcAJ*G>Le)grMP)reiyHS6|L6#_7jCxY?kVNp*Ih zk3Qr)e_IusKZ%8dNQaQf*qcE@X+lp2S@q2rb_H7*H|Z?cJTp~v^K>GT%vNg%+K!4B zn#?`H&|d9ZnrQx(X*~g5Mcvr}M9;1@TeImK+@ZZWmH;!suMO2;;_bUs3vp>#z#89- z3gceSWw1+8T=!5uTNkOM7H5AA7_#j)Cqf*gCO9u76bUm+BgFCUA1AY68g`Ls$i1z~ z&(WcTgc{l&ZaOhUiUA$`6634d#$fu{!l3Ks29xv%A(=fgkz8WkVii=mjSH_UI~@aF z3cG@Vl+L`#X4qzwsr6z=Q!74W{5$7(d!UifJ(j`psMA=S;ywesoSJ>2cc3C`6kfc; zg0_N}wlOGM2)vOEu_s*cVx@md<}XG&=xXsu&NGClW8ry!b7#!ph#^02AIr}Oz#}CZ zy`-ply7C&+ifX6l{XC#b_E6TYu}O(V?KmQ()e86)u2sb&?(kA=6Cez)e@?>LcdQA%&Ai^JvSd0BV?O zyQ_WdSfA|J6Dq#4p|<67a}^W!N(6(*IH_|>Q%xtW{hlW(p711<@lJxfnx=^9a~OF> z>9fEG1OQn5V&Bl%KE1AVQ4AFEa_9MPE~ADIbXW3;q^o7DRi!wVy@#ranLCQ$saiwc z>aAyqja`C{R4+(B!@rb!l+u>SLPBM|?AboVj@o2ey1-dFEU(4whs5=kM`=$0gj~J) ziZOW-0_@?lmH`V?l2u~7E$WKNj;tLP-Uq6 ziB@gNTGZwMnTcIi=5C`(649KGeD3}?=G(MG`&v(i8mzl(CuGQ}QwH*N33P9pt!vny zhQW7ClX1`SSML|D^8^F~6}C(FR4yz#HM^to*h#}|?fO^&c&eB6fA)i=hGc#LFzgJV z#Z8n&E)T)CsB}Xt^|K$(L%TB!h2bZU5N*Vi}2yb&XyUG-d;vWC`2 zk0lvUBzET{H4hg!p(@(@G4aBCY$oITFTkxo?-ypV^0s5bnl}hB;7J~$N^0Ec3tHeN zZ~b^z6c=UmhV^3Tc~Bfpo^N@hn_?kG0<$!#E9PWlylX?mUkV?rgTJ_U94FDn5f6Eb z>3o7mU;!yH8dG3op-iYV%!QsbEX$7yK9f2X70&nOZ-kN@DH-2Qq6iHmMjX)JDqy7O za?$+qWcJ(dhnL+FUg9Qkv0Nk5u&t!^FF=vp4zucXp>1HBVKv=47AF1A-eLMQyw}sZ z!3jKus9&f}B>N8#NK76P3t&Ycp+-eH85-&+x&9 zp@ii$Nhb2>{e11koN@SV1;^9b!{@>G%C&)npOmv&1?>n$hwe~lXh@aSUZi{9@XnHF zQ`m5G6>JR$$0+nNX|=tBs+V;>^rrUkZncF_S9e=MRBMOWD$AtO*z-!<$H5$O0G<0H z$4$*%#H_7+vlNvC#W)4HycDMCz#NqVf2M|b> zbb!*4ma@mI+$;UW9!G5$&%7yeFo)$HP8#>QB zV79{tgKOB!pnK*#=ClNdm2SAiUc;7C4DAJ#*G(oV%ls$NgU6pSiiZG}Sk2-z+$+>I zfX2F|?JX_%aU*Sxyu2rw5IYIv%jQM1I1X$SAC96Ro7S>n$wvtwZ z6bmddhQ^B};c7cQZd-WY2A1z%$?S(%QH92ojD$pPadj5jNWkI*>+zv!FHA{&l*jZY zPdgfqk)>V`2?ykdkj_vGm`^fQ(ux6_Fe5Ieg~o>dMq3Tx;E*#dP);Fb6|NyaoN}(( z`ynXwDR|ws7(52z-Hgappm}m0JkTRU)}T(uZ}4bLV8 zYI`s(o}~iR!&%;Q6sXD=RACkAWw#V##UzeFj)hjp_fhniaw%X#%0auhjEKHG-M+L2 z4#}>q!tWnKulsnf35+S)Pu@@IrD?$$GWX<8+c1E&>9Fb+K#1tonHPy zQVA~;pXydooFQ;$g9;39v6l-!VzuQ2Opfx;>SiG08QIQLF@n0-f)bR(%F#=QH25EQ z&Uh#>BY>WD4tBORMYXXEMVow?&{x(@RJ$wuZN`J)X~jhfO!LBu3?7Q3`{0sZ^E4(x z(-SU1`YIDAfKLLz>%bD}ETo`+ZGd6jA!e|X&qCk(JA>B!L-=l87{&TzNVl*kxyve! zBsv}GSVk58$p6DLh%4`=(sJoZQf-!T>)|U;fuGp;xSCa4?)&Z!gI5Z2?Gm+gr9iYo z@~gzPI&}-vehDAfS_(7JRXR)ZauuXdje1=sf?d)CTW)dD{ttMs#J#I zYGE>_oqQ6-E)E);W&M!@U;t)DbPyXG89XyAh=kd@p+>woiS|NV64q*|q*61JpkSIr zXh^4m;iPFJUvgJ57`tBnanyeSOqlLqUUm7U5bm_5GI`>~#XS+ildNU9fXY_mto5>r zU;vJ_(gg6ANcm6L$T!60oq*3`*I0-@%%gD^ z9xkyAtLcLEIjEIZRbPQ@0q!FmFDU~DKZR_iesGH|H|qua>yZ$xhKw;kas8MBhDR$9 zdQe6VXk1TkLAe{Cql6mz4UgH62$1vh>0ceDwCqx92p1cEWr9fp8gE%AYQE*KunuK- zgIj}nrzv=;1}Q6l)U-Q|IuLK3Jyt?PYUJ+PvTgV(%ITf-SpDUpK4<>@y#4&WFYF>w zS7oRhQIEE52bFEJCLCIY4Q4tIkwsZJJVZ6e-V-k;y($ z-DY|(M#5cRk+~81n==Q~eNgO9fWj&n!fpve#tJx?0(x7eZ#8Yd4x!#CW@Zh2;TKCP zClJGp%ctAnYnwq+g0Rqh8u39Awaz4(tuU1!l;ika^Rqs$?4~?T1}V-XGa`pLnGuH7 zSYzsJT7jD&p`5Huc%lwYGG!kNj!A}~yO+NRWy!U!;?y+EHJVyBX=BkZtd};h2!<14 zQWCRq!)|wcRX7oHs&aMxc~M2tiZ9ThVEem-w}a3 zV3n)aNE0rwiCMih#$#}nK!Jq zqfUjmd*t#vp5MFF>(YPCI`9+9Eei_>SGrg z*|DZJ+L#tuu5}bTl6WhZ?zIiNB)tDi9((P*+SYH5g6LpebC3{JBGUf3qHvKhLx?i! z;p01ne__%i8jP8~gld~o2Q(<9QO^K(v5Bf>)1x9O2p_mM4PO7V&eO4UUY&tP$Mno> zY!0hlX88Kqo(8s8$$TZL3^wsp1MW>u!#+`9+VeXNm44l>Pp+TZmDv#6S(Q6(xHcfA zTqB+5^t7txk63x+F-YW(oLaMyCMd>rGU&Gwk048;ZGwz>Y8R0Z6n%psO^-ZycEkDk z@#zn9C^0e9i=a8I8s5&++C~R!CgaOj;pV;CO1g0YFKXO&u_tuoc_E}EFLYGHqv@~) z8x)UEr$jLSwuluHI{2C88?M(WRsrn2?{?*%-%<0~iStj(($=UKOPVDw=JdB6Zf-bZ z?MJ_Vvu=DjHuvrHENkl$+n)BsR=a@|G%K{fbNP?QM6L!~;ikZH+QM zQ3}o{i71v=n$uP^Pt~72??c)sj{R2o_=`{O^lzK0M}&opfBZ@D{P?{u8S@K3|3QUV zm~&%W7fo7Ye7-}MNgr)HlEacRC5pcizhPP|3Cr7ktoa1RS1apOL+f8^8Zln(#i{`N zLey@!buq^W|2nq4Vu#1e)HRnZ4aRxnLg3s;Ww1I2>1;ajP1&_u@Xj-T@j_wWOPC(} z6kv}!?;$ygJp;bMdx!+7m#0C( zsw7vY;p0lGG>Lzn;ABbx$T2M90iXbbJM0*1Un&M(`*X)7$OlWDg$0;@ImZJ!c+MO} z1V3%WNi~8Frg~?_FsiSwqr^}p99XeRgJn$nQ{#C@=@)=4lDJD~dvf;b&ApS|PhH*b z-im4GUCTbY?(zIKKEAZCehgg`tB$Rf!n1XvX!4d z6XlTKgz%o@{R}~G%X!$P!f)Q}pq&tsds3^~>9*_lL`-nJGP6c|=vij*<#RzxR}04R z8JYT5Wrt5P$KQ&DzEN(Et}JVZF(-12JQFcVCtwbohigD&Fmm~;_*(c1NiF=1n z5{3L1_pVP${Ps`I-kqO4?6I_Lm%2MiD;%4NGuXSbG+1jb2g$RE6X*+xp#=^pex5fV zS;Rmgs#ovwF3;a(->gy0TObq~p%hJpmW0kn=V?9&GVXzhOY+%guS>wVV&s z&sUMtW+%)#VZN8>vpn|x+SwxWo{&*~td)@B*#l(vLf5U#q5z9jbu65ZP+C2xVdH5; z23;b4=tNR4lVMbTsHN`Imk^x8UL@i%34L^0jj0SL+jGURjX1S(SRaSy?Ie~fw7T`z z8EE!%gicEZDRKt6PtDcR%=+4NozIY2#0C zQPN}TEM;#`o4htLAz5^we{K)_2&y--5exW2D#m;}(Q1!9aLAvCP?$%07jLKfeQE+& zw>^K5AK*6pGnA;MZ%|s@S+x9Mi*f>w)>ktdEV`qy_5*Ei&1x0Os;HL7p5&Z1ABSZy z`^}pM^K>Q}wQ>uO#`39&Vrk5vS(2^);#Bip*$h`Z?fS#oCh9({(}DUe@kcPi{=TaE z9|ay+cznh4lKfu(bQ@wn;ZVC4_JeQsbDhHu+I|cAi2%zddU+mu_ca| z&CJcLMB?P~P~Mc?q$^c1`xdldpHh{kvaNpgxCN=ixs}P6f9$jdVBf*@|5kpkoriJ! zQ)%>nymiMymSX!-LI51oyQz!7Nq1u&eeCJ?n7Ghox(5sE}A zXNiPGZHSc7bdb;-`#va|^o_b1VnRpSzx)=ufrQzN{S{!9ui)}g#-bTvo7cEsBUs{%q1aEXkP7>J z$e3(Sw_7guKvAIj?xozy#IsxX)O4UL#;{4gi=wSgU>8Sxc@qd~vu} zJ?bBmJVGeOssH{b-kgWvh+b$Y+N6}yetu|w8%KzJoPW!{Zc#5Y!BV2FlV!XN^!*_0 z!`^h}og)(W3|oh%cn$W+N4F^AbP4BXnep|6cbIXW zYWe4_gXD=R8&4=+sB0QbT>shDy`~1MTk!$v9JFkn)lyJ|a><)4$R#mznq&!q9KE1XgacHPTa5I>CZydK>o1v-cvVp_%^&J#)%q#m=F_+TXJx)!xW#n_qiFP3a zl)pLl{)5Z@`3Kso+s~22&O_OXtRWwb!^^BoGPVM{?uTE6$Mn7=DpbuGM*P5;18o@S zUCahKB#0{qDcnlcVCLi`vOLmZ__NHg=u+mAv_L8-`^Le*ZUj=A7J)cM4Hqk!q z)Hd?+XIr!gpx|MwA?~Vzj2;8t5;Ai$tfqOOgwjk0CyDK|YOE=h7jh*dA`Q5R{2fQ@ z=UVv6mnGjuL4`Sd53VN{Mh44JF0 z*Qbm1(X=8+FRMLU4pGKHIRZ+^uJK-roym!IxI^gWx4)l*<0L2s3Wbj3v{n^?%hFg%3B|NAVI3sa- zyjS<7G<=JlBa7lo%Qr`9$Bs<$vU{Mnt{jK8n`TQceZ>GyV$ky3ZKm70{6+05=3Dop zd$=#+F8F}xZ73|jz!{$VPgDBb)|Y?2=Wf}*)O+R|q;+y@@Evu~UHgxS+uRp_PO?It z+$a@LMGF4WlepnRL(7T2S&{ zOI+`#)$F$_pFwZxn+R7~jQ^-{^zM##R?s{gILLZ5`PWvh6Cz!``X`E?%q}fhCW)I4 zb`R$p-FDvxdyZ%3WU8EgA?Isyk$SfqcL2eOlz`FFT^0~#f7#GbvQ0z8S0u3$5lZe{ zG0D8Bv=V*ITp%~wneUC|{}sA>^|z*e>T@j~%fMRe1dY;^1d_;i>5S+`TH}~yB#)=( zCa({f%sRVzAY;e=mQVM$wO!i&v4-Q%UW^A44f>z1ydIoJD51T?MI$^=V`knT090?a z05}MfZ-7Qnw-?STyZ=z}bFU?l*9UTJ+Fwh$ykhB(czH-L4A#f-bt=lHvPOn5)X}kR zg4bry(60WlCwqfYO@u$w!=AEb_#Q&;pfUZOj%A)uUI{{{@7`R&NhV@FgYy`}md!}A z&IFod(7SB@1&Fv+eH__MY{I)pV|G(i{(XPG5*mID1BK%0i`<3rjn0nUsHav&-^`Up z!q}zg8R|GAy8}#>ORF|E#7o>LUWCrj3{IzeId;em-7G??k5n$#wOJEtH(v5zk*@N+ z(j}!%t|M9z&zBtT( zA{)b;!<73$O#8Y-PvvWUI^&z19>KN=RCd>&9<5*z_ANNc_?sECroMdSeh;;zc-=t| z_>NIHYv%ZU;SPNBq4HHg*PYy_*D^1bjdM@VRW^%a#j~J|xrIg*OQBrxg?-acmM7m{ zqWIGu^uK_^+-5R)*JtlglZ^z9(1@o5L{h1V$Vx`cCLmV4C8{M z6eqmD7ooIYf6s3|mv%aQFugz`M`Qef;sZ%j<;s7weJre+nW5eJ{_p+vCD(L{@TMQm zVP!rRK3%(`-xgd>A!h9OZ^xDHKb021k);QJRsZRxv%wpdvWqKo)8pT@^|lcPS=s+f z4^~c*eShOI$1O2Z#RR?Xj!?JU2;a8g5_KLEQ6$&IH#Jft+qcqBqm?$!hwhL>{}~)B z^j9NqQu}Qy^?z#%VJ&^slM<%cPWW$)(ekbZ_saflL`w z#eVc zz(#I~i=H~|SVc5)p%;g1TY?#g+VEm^-&2@~11gU-Y!d^k;3Em}s!L%U4{Yv6wm}a} zJRb*nmf@ZHjy*dft}heKXAHF#siH4;Nc0ckN#y9S^QvUn^)7&QogBmztKa4rJ|u~V zpXhWwG!k+z)#}WYhNZP4=)zG}RCC`*hMi30UcO9&=*(r(+)~nUZV_a&Xx8u);AZ3v zEtg=;dCRrwo4N(*wHEfth*~7wJ2EDBesu08&*X5jsWS>x+et=`z3FUk=+UJ)u0((H zFheg6RD4H-t}L3cUTE`E9luiFFnS=?4pt>6o+7m4p^i!}d7BammxyGG0k{{`LNl%I zksM6yLlDh^#d8H-I`GVd%D;iE|H9+iJyT@u3>7x6iNUw-eO0wbtqlp(gbIy5Ii=n9 z(;eUlCSqefmiZErQ)~Ztn)&Zr$+5#h3F75d_*Wb{V-t^)E)H8|`CupYw5*KT@A^Uk z1y1J~lOk=RR2Fn|LPH8l*A9j0Do0>CuEC4~8O8JR=Cn~7l+i4Gwj*JQW)_5tDZxaJ zM&rN+b+UK;ls4D!iwZ-ZE^^yJD_z3iwJt7qM|jqfZ;JcVri!s#Ucc3rT;4z5^FDR3 z*K6yW{%Llv5}@nDcGv$SWh^K5Pp8leUBP=u>c6`5GQ+e9{^_EzQlBFiY!lu$RN`LM z%UKP~X(lBapcuNFh?6*2DoJSVeQ#q!J60w2uc*F*s>OQ&N2~$Zv5R{o++Zz!f9%tD z(0JGQuhAk(IX4!rzx}CpevGYWlAE(hDpLOb2F4s6CS7y*(0Jj~v#Kq4yn)etMDVQ! z`dj0*blFV{ys#+=pO=3V1FwUluNBGY{H#WDOE0?$x5wZ32L!JTNmW$cD*B!;xfQA( zE#U{Xe$VMD>}vM>#z`V&ou-j_@w9bDCzdY$AGZ*^93i%tP&_0AkD}tsB(&Ovi^Q3D z0dU1&CHh47QrTe}FD(ThnB7Kdpx=MEi~IgY(4&GLB#Vt_*=Ot~tjYh{FGgjSOjHOB za-pp6lqqH)6By**0oQ6g;p8#t*nm5`J1+*Q+3O$fHT0dbXan^yUgpi;C**$$88|Hw zmHxvgb@h|_DuPMy{maB#s%QRpMl!n&Pk-7^y;9GpyU@-%r7nh-7X;mYMOrfXbz;GO zrpc7gnH(VZC3=D3b_1?$u`z<6i;Nce?)SXR8IQM$BD-bXviU;}Kmwvn>E%d)HseOH z_5chk!kLN_c2&t#Yx}SWE-fg@y)SWkjvMX8Xd7=d0E{QUy?%c7;P#WOC`UPC<&<5> zepjt1=JVT+kdSO2GG0m-4AVLsFl3uR`QTUq)W%wDQ%!GQ&5lNn+jKz6Gazp&Eeb&P za~qNWn=41^hLlu{u`O|V{6wc{Pd;(tC`YMYtO~T+uwr&f>sqjR!TbKz0_F=|!R_sP zw_KJdzN%LK#sE7gO#SNsy*@`@gXFr952L&c{?If1W=8rC_M$&LN&l}MQG^Q7bg1s& zUSp9qv3%_R#hiRXYG)AC-0hq%xnXriX*pc05vD9(eS98m<9(BO`fK!>8i00l1JlBC z6Z=T@{6w%Ftu#bPY}Rd4nq;!J+MKe%WXzhNXjyXd4s`Ja#7$u0AbfXmF&gI3IdzNU zRxvmR&NCf+!2s|&eRw74hy+?OoATRS%z+FJ>IbNG32`x6_35`ZY< zX=O*<%IFEIuaZl36F=m0VQSvCp*UtJGB8+)29%-d{j=l?42ZAwv)9a5~ zq78HO;NP|3!DBm)`762y@pr=hlsBW3Z~^P#$Tu7@m-Xwe>)CdYj&bRB2{TObX~TMU zDO))!LR0c=TH6R<$Y@fO{Xe|C;ywYTzcVs-ox{n-dpczY1!*ANpLi(w=?bi;w zpM?E(DAQM3BbvcKP(O|7dg-J+-bJ2ih$Y;mr^Iov5lUM^j2A^8uPC z$tx5Hm{G&X$p0b;IhNdCeawp5KDk~I(9uCj6B*)&+p6ps8{SWkXI#(Jym%sOc~Y6% z@?q~0$Nc!4meViOul@ox9)0igv%4mKu2R_dXP=<&37H5Hf4hp@EulWVHsDqtPmo#1 z-Si++k188_p4#gF(cX6lMfEKI?gG2y49kK@l$@5lAUS8rStRF-0T6M?Ip?GxIcEVy zV97`j5KvHb$w`r<0;0Yp_`Tly4Zm0KRlTZLb^qCOW_o7&GiT4tnd$E7o=MEW>+-X3 z;_=L0ybNc#@kqwTjGBX2ZuOBs)?OpHnWxI&whiTUJrB)GkHFP;-$$Q)%eng%aNLS- zS!Kf%r&OI@FK}*z2CYLc+Z%GfY ze4_DtdM-+M_WQo+MHQv;=R+3%Zjp5R87lTynniqME6&QI$@P5}^lh4Dw)n;bL*CYc z!*^|Hw0FW>s=c;(m5BqN54sAN0>6A*k|6o5ZxS z?`dFjc=7Namxys&0u8~1%QUckoCF?zJjUdPo6!_C+Qmacnyi96Yfy8~8^)UEpnaf~ zu{7C_TKPhlrQON*!fU9r(*!8%6QJzi>j{X z6f1UnxE8<3c!#V+Cw(gSY!DG&k{5lQf!Z-pcj^w0sW%bWW4s=M39A0ebCAb2 zeE#M8Tdl!RoFQcdmTtOi0L~6pzrG_*hN*Mn@^#y#P`+L+dUH>y?c17cdM6!!GitYw zyM!}7f2-Tf`w0+>66ii!cy_mI=j6N7bsN>4zRfrrM_j!eV|++1uHL%KoT0nRdlFhy z)X7uDHDf6naaIg*4}5;|(Chvm$88fXwXWku$h2bf#xffFdfqE8E1!OUX~MV^9ZIu0 zP>N}p(vhtV?#4X`Agv%&Gih1V{d8c z*FMtR$NpQj%)Q;N%CDB%G`@VGQ0HR09WhfJR90!pjREu~u{U`hJ6C~@(eL4RU%`Hw zpuWR!LN!}H*_QXIg5L-L+N3@c6in0BdN8ZVOA+mN>5?Oqj{*6AJnaev#4%a41wqG@ zn?{&Kz$22-$Lj;K#bENTX-SDkBw+#G8|!oqjwJM|dM>Kx zIU|l1H##1ym3c;_TfG-e!>+t$}SX8fFmc49}S10xsr5i*E@6nv5Fo1(P@3H z9-joV0N*z6+e|u&WV^L{sT%Vk4?og_kiP>!=L7QbK3prQ3vTvxB44>Eq%$)^*96sz` z@e;dVlN-5?$5QQBi>h6!n*KNW%_g08luj+u=DwDb}BSIpvB#bR~ zh>_i^m`wF*^uR<<%($@@@oO1vTdN=Z|6$!7W9O!J%c@phRvZgM%%rb|Z>rOj0qDd> z9^Ju5O!rHtw|_e?;DxRKsasWxtfTX|!aq-@1%$b zb%o^tpY7|gJZ0CJ=kvr)0=|9^AU(Mmpp_yrGE*DOM&G(1WzN#|H0DvZ_|`|1N`_>g zCw9AXa4i+X7pI{B@wXdN7HhRR^;_SqC5LZt6&@AbzAo|1^fphxGvd)%?|t3%cT{8@ zMmL;1X{dUTCC`?rHH{b+>o#-guDBkWWv^}#+Qg%M)D56zZx2`YzW(L*;eNN|AH6|g zSdJtOzAgz>R&Vd#Va23{E2}gFI91x!wcQ#mW6n3b4WX_LhS^7V#|LsS)ge zp<2d&h5Qj!K6NE$L$|1fG(%T}xg{6i#q^8!J=J3=E8G4@PtiIarGt0t7~ACdYp+6G z*;5rStJ7*wS$V=Ho~j}>>?3vVuv^Ehzo*>5uNk9uYy?YO`x^rMT<<=DqgnZ>JseXLlOI(Et zo7RdXtXd%9UXx6Gl;1l(71s!akk<8h-19o?6f(^vq!;yDieuBVmt2`91aTii+}7!y z+ZkajW)mAh(&COVFV1|-9n<|!wX zXmtT+-8s}8+;|T2rH2w;@R#h-^ZsZ)W0$OS?L^)DI9k-f%UvI7{;?PwY2xO5C%|y<_gNp4~lh;7XvqPM!Shobbfzld%i!yx@5~{+n0&-_u?c zw3&ee*E5Y62lze_Ij97f8|>O|WNZBdNPIylF75RqRn6lE-*@k-CQp-(IOY7S9c=)~Yu(%b zO6xUs6l)yjSM6G<<3Mdpp^1XVH%aq}Xi`l^J|h?vEUXvnjiN_Wj?;|Y_?n9C67RN! zGE zdI_FOoUawO@{eoFQ(0BanFap-!L>c8=hH_&BYWKxDGsDv%wFzZ z@_gfcK?-N46U3}svRR)f>y9l0kg~52hU0WToQmtypE0dm}nE z>S;alF_5_{*Pqh(36HfubHDkJ&*#-B+RCK&q`KN8bs!eFdhc8167)~xMc7Xsj}~RYB2S zkkix3*UI}C=FS_5yR9HL^Kh<{-7k7j^O~OO&~`giL8=PcfC7s*wEC;(_l{%W-L0=* zp|bOAj_etlgrv(xAF$Qz>EkK#^Vjh2RtvlgA@Y6S#G_lx%WY;PUbZyMx;&->zy1n8 z6{Nwc#p~)4^{IX{fYq_S7~oqr-bym_I>Z#*HsvbAiX5gWowX)`6cO0MQ}El2tgE(G z5GNIMPFh9Y6?z{U`l0UYSYKfbMzq)p+N&m#G*~rPl%Mh-Wq@Ym6`|~xOecqfciLwa z0v0N~xKJ`*LX|<3M6T}K5c@4a=~Y-J^&kZt>?gMxChJ~?)ihy5I|Cta!wnpNJ+}K+ z!^>cQr_wQ?MB9aXFSfonB%5tnkvaVN#vSXu1tTgYdO;SAaOQ4-by>&@aOx2PNMW37 z!vdW=6a(8!bk?5DEdRL(Eu8pf*)R&10x=)^xYs-ANkYFlF=h7AmB zm(=wN*aUbIVJA>X(g@=ri22O^*e?9_=1ew|%aBE#6=hlL>+i`I)f21DIqnr!X1HU} zB2a8-?A^OpL{OX%f^ej}jQRXSwpQs;Kn9gpx4AZtU1IsjmgpY8y&UZ`W2Lf6i?Ehn zfI0??UoCcV7a@$Sn!;cJn9WfE##``We`Ss=03T}+LDGglSrU;wY5!6d%vr)t_3?@n zAcE)$uel}*b1dkv&{$zvyCdM?A3D&(dmhrL&CbYT&0->~jhIj`RP&e^c1d_*M7V!2 zWq1oFV~6>w1fkgQg9SbmwOLP!YFHI-=B(=$wr6oSpLSq|0bU4qxg9@V zuV}y+SM)(wZ8yEni-A*V?CLT#JEZEV>sHTn*7LTuK3^$&3RXvC4tOD)PNz$Pm@3XG zGn;G?dN5$V2-F=WV=6`*qzA;*XoI*V5vTwnUU#vIu9PP_wpgD6exLEmX?RXDYO_X<+4uabCODt9`@}PNpCrqWIn_P#W2jh+ zP6MYAy{b+1#5Gmtr=IsB2=1P79dFnbXZr;U-9D%qCGjAIiW+4(x`IwhE=SP!X{da3 z`JfNkLq#*p3)d2`F(v*`WbH&d%#ug=sN?fM*(r}dx=|W;XwAJcE2k0n;ylx_J8Pz&EWWKRCQK}iX6X?27m9#% z6ccEX2e%|0MMX&@+A?;8&&qp9V1Cm1hAg*~8nq*u$5$C#B57<5C(K^1SDPtDHWZk@ zG`tUcj!*E8v5NQb9T@HjlnIygzEq#Vp4t_ZyRP8kRmm+n%s%(xDHCLMs~Raw(Lroy z&hRLN2K^b85K0Rsy@_jX6?Xg_QHc*Sg!eD@7?~O|i*0>=S>S1`pI(mWZdJe$=iyOz zdVaONR0iM$+e9jwZVOgxHM}}8L3n%T^gg=lX#s5IT8MSwWHnCtE5#P1gc;v zcIdyXi_Hjl!XG!-uHxba=DKh5mxIE27vommE2D^Li5?%mfM=^k2mGYtrOyse4XPyG z#;D)?3BZvnj2nvvnGy+Xzln-oUTnv)P9Z6$XnW%&>m?)~A-Cae*-y=~D7-uP877mJ zo+16?3NBYMM8@~r81;YHsjKaW-W!m8+1}Hg=4)YAVL%^eOh_&=wWK#7ACZ#phlgjn zix@P|^jz#!Q(SN}dE@<-t|V|5Ap$<*dxJpzzrrLz{`i$X^7DY{S86lGN zeB5$p3AOk?NgpZjJ=u8ng?U+zwYDlC0%4gu3KkiM6Sh~WGZK6!#C@Izb37$Wwe-o< zk}ADC0mY`I%Tax0WoY1ZN=`3#CwKnUdWPy7x0=|_N*|^i-u1k?~4b)jnl06t`CkMz- zK>t#H{k}@?!deu=pT*duF19^2lJ}XR(*pvhg4lqV9er=aC=XbPN1zaYFs!RWf{@Ny zFa$VDvlc~SZ7-5CW_Dc|xN97)C7M3WT~|x1Zfod?`1nRODA0_%p?{oa+R3k~VVJvd z`Oxa@h9{``-7Q}eHB49{j0pG2%V{QEeh)^(JAyi3=H?3Fj_0~Wx`~ZrE>W`gi0_Pv z#sD`<>M>d5)EiRBgflW}h|GtE78Qnv#fluc#eb{W>Zr}qN=A%kg{%*G_29!%4K|0x z_C9{!F#(~(jEP-3WH!t1wV!LlFbu_Wi_sL%;K&6H>lFdsSf_A0ntq2d)8yzBpi89} zMa}&k+-~LW8E|UDQFpn62St@|7z?DC+K&@S#o7HbS00;NDz_`2HZf7T0&%A&*aceP z8Nu?J45v%RbaVh|&6*Vzs?C{g6YJ*Jy!tA~LJ;AIF-Uc(tc5jSnXyMBx@rh$c_4g%wc3@3~vc2U!pz2m^at|H8z%p zfH|BZ0SgDvgL_`t&!M}N07TQQQQuM#Uqf>JvlHQQ9fDB?z!+T3XFAA#L-oE22}Wy7 zNW8oF6)~MG=INSLkfCRudT>We;L<7Ef~BGh-g7&Tso1J65^iqKw>X3jPLZQ?#xiPB z&U!Jon=hzLRQmPo0Db2cXyd~28zFUNw*f@5r1O{{{2HmH1}rP8dbA*jNq4WsM7Oqx zKX%qe0s5uB!70GOP>`DW%ZQsYC1+)J?SG267L>{YMRO~$gLLJx9(vxb3tX)_L8}h5 za(COJr4mC%Abbl8!wjpA?U#W>Vb|F;6lo(&lyk8!F?07vumfN^*kKQ;i>^Mpf|>g1 z_~_|-zTZ}*nUO_`eVNhL%Y@F>(%)0SsW%G5)V~kLV{oAGOk!_gU50=V4xTw#?aR4X zWjkca7yfr(ZjhfdBS?0l-|3q%Wd$q#=v#$!v_tMhfD(>HILVL z1>xE<8+t(~uOLemP=0XR9g4HcrMDC&NIAPb6kk6cHZ&((va8xI&t9fGU`(kJ3!OH+ z6&u;a+P3}H(B%w9`Mh?1*KCX`n(H}E{^i0^bd9sLLN-mK3y0VR$UFLKmYjV>7c{je zfVk#!0W4bWVJz?yK>1g@317|=xvH;0GRi_^89}tg<>~yNKzF+UmY99bm%*+TgqdRW zz7Y2qOH`1FMz_^v{CN4D=U;3MONot@s4MQ;a9u`;bxhFS91zmcQVMqsXo;CRA-r`z zjU%|HV3+bIV3Hg{!L@&%PctIG8DuAXt+&IY6g6ol+~D_E2HvZq68HmuCth zl>!{JpkC-lNCM^@J}ICBH@4?bC$#3%U*g0x$a==Q5XLr|QPuT$y|MN)W|7GQXW4;NqPXRSZur<#WXkZNe|*x-dE zwEa<|Qj){Nv<3`9W49VsxZhG#f88Eh7%rV09G|Hrez#NaQWsZtx7y@mF8Szn4GVJ2 zl0)y$q2f)^&h#-rC-NW9<7u0gN<+MK4B%>;x5h*vu- z$YZe_wA%Ceml2l95Co4;gs?9T3`)H1FbQtt84eCM#i8m}CNbb=tc=)H`x<0Bxh$&O z8xSj~_cGKA>`iuSbzK$W+GC1CzjML>iSXc$;k;?J23w#BjaDf?rEm}I5||- zutFbPcc@b^(Eeof2tm|cZ>!|0Oq#PVy6Q3VKJxZENA2i zMoSWYuXk*bK9Tv#n=i2^^*m&uK$`Sck)a#k-C@O~mq%-IU-gB|bAhpoHIGi)WXC|6 zl@M!q*`IV1;u~0XzmG|(ts)yotq}-d!!y0b8X2h`8xI~)Kt9E6;$}CZK@B9?7ZwR) z`I=RpA$3`0>NYG5kH9K>?OL*2&#c6*>8sc(x|hxK)amg72BMEvvQnS|zCOn&U+n zxbUOArO%8(PEQwnZkr4^xA8bGGGG-cYblZuCW+>c|8Sf)`}u0)<{gJAiVx#hDOZhv zCi4&Vo=TP8zNhu!I4yuS+iByRzWt&cO?mLuPO11-ct^^wB2nqDX1m4hETNrc;A;MQ zBRGnIbah0BW@txnyd@Q8BfA^5YPZ2#S&1FPHlv_9Y)a#gZuk$bn87>X2YSh0?bILI zDA$l&c0ZsG7bGeU{9>Hyy)w*R8yV|UqC3;U!6e1q48y<6gv0=*Y=CDL_83~ z)XJGnibqhN{I8Km4Zy1*znO4}(Prb`JPtGs9%kDs#lVi$if@*UjdRe;9|c5&T((Mz zrDM0Mml?#F)OHNs%8M9O#F&X1o~grSb4dH9^Vd za6(Oxg+$hT9>G5WQArueRBCA}1wLGz-HQD8j*>M4&p$JzGPuH1hKfRK?C*RCQyI56YAtznk7+q`WWp;fbGr(+AT-pCr5} zPba~MVcrI$2G;5>+DWr*+~%?ZZy8t2BdVRKM9%j$ox~Ak)IELqG5dn^cNx<`Kp9{3 zsXfC;$H$g5@c^8v3|PJv{@5X@Y-{TB_Yp%@3}4`8!5AdTZWD&TSIGr`9ze@SMb7Ti zwUQ2ewVYuhm%e)J{OPCBZ9|bxzKAke$bf(H0SFv!X5Qec zB|5%hocx*P%MeA78Y+dFaz3wdQ`Wi)_4l4A*?Y zb;`m0-q_o3d2L^>`fNieaTX_Uuf6@P;28FB_IXa0@m=W4S?~n2F#j${h`&Xpjl{n4 zEAJNg@~bZbpIfhoj?lak-nVKSfcNbU%F@lDbd<4?CJ_P{GG63gv|fM90Hwj?HM}fz z>YOQ2G?>|DWnkx+r<4O>YSv+mpX`6fK~>QhjfV3ZlEoW38m52XJSW&ykW+E|>JXY1 z8`d4nxqRFFH}w7W?5i`sHFMhYXT!TL|K9jgWyNHTG;(RhR>!oE3Ny5(WAdRC(-@Ck zDwu&?r#ul_ivy#O^3sK=yKb8M5BWh!)HrUMyU5BgT3eRL|88j}XNs_qs<%Ca*aj}J zNJNM)%ey{KJ)|#7=Q#ozk#1-QFp?yUy@FtRb3gNh-(}p(4GtNa^^VbfH8GN4Y%_ld zrzacaS55iI--_VS?@N=yw|*Hf6Zw!EN0?O`nBmQn)7STr^DagT`aA)hk?rI^tXghd zgl+nPMWo-h2`ppIl?YeKgtE`qZOjB~WQEgZ!r0XGP>d=7ZEerrEq&s!Y;Cy5oW(jH zu`xp0{8?qY5Qd!nCc`-Gd0LS<>2le=ZRuO#RB=58+8BsH7H*TGKvsW;=lT$1d4A%| zF>&1k?;aex#qv&xN;@-CJ`EePXd6L!woYpGup3;S{^X}J!ZW@8zkj($6uU@@)afw} z34Kif12g&66S7%yU*+FDL*2`9X-Ypz3gMEHH=S&-P{G;$|CB|0UnRmvCo_U_c z!WJtI?mvR0_P^Xty-O+su=2iW)BSaPc}D;&=x&UP8Gss60-hhzou{K)mzhuuK;?tJ zA$@|4U~7^)5euK=wfqD>8()$t|73Z_)BajB=hsW8IP5=$P`7I(j|lD3Rn0fv9(^O~ zYFcVA`HRJD3{MBqMS}2U)#p@G@zwJV*azftZHT&_)xl&!G|xDk?aw->vhGKpnRESa zMG;me(3}|-6_w}j8Oo{e^P+ty=LNV^$@Q*)9_=N2vk7DwW6Yp{7`+5zQgCJ;a zB}Cm^Wro*7#bKD$tweyr;4sRC4BuIOohoBW3FGf>y+apFihCy*Y#1&w%A^o(crGJZ z!eM~q=y_5DM$K6=^W+}FykoSjLM<5Vuu$?xbv;(xE~&(PET#}TW)HS-AI`>Sn}?*f3w zg4Flrvwe=c1h%BluGDa&Jc-=vr>z;BI~qUoh{oV)Xz^K#$mu+x*2dh}aRePRLsPN#p}IeeB{`XKnzZMh|_4jLMfO^CcZuMZ2NM1~7)ws4-dSOiWj-j9qF`;G@genlp0Zl|lh5N!ZAR3svrp z+gGhVj~VNlcqMuq1>jPV=5DKw}WWt zZU;L`7L$DjI^vnyqKrq&-)DVz^OsUM^aTZyE5*d4CZfIDh4allp~Dzv?6{50X9L6B0+M!X$a(Qr=a_-SHF~~@gVF&be1gQ*J1jw?=F0Wq?m8Yyi9iI{!eXm(^!wio z%mRox7|+y#&@;^tavf4HoR@Z$^S1tna$Q3n%fkD@K;dZN>()M*CGcf)+`e7f{C+U0 zhpzEXX5ME1fl{fRS_TB|x%bO?t+Af~W%|A@?dR&j^`2Z2Ld`*W0s&5Zsz_IhM}3DO zFuK>Z_ZLaG?XjMf@1|8p7tK-JGNx{LRtP3)S7s`#%S%xZ^6U-H4dAf*X` z>>0bhhOy0`_it7=g0wk<?aG{LQ&JVEq6_VDF-X&z+=+Ek zvnWRu%JFCsx*(ElTR$0T1vg9%T4Tm;(C0=atr^pOfYzQ6%)Diqd`aF}w!s6^f4n7s zX|YjajW-UPM8=+k?7J^*EbHXyNmVfPv@sDh8(E_OK_$E!CSB*rIn^qxf=AxZJ>n>ASMPsHJ$)?6vX|4in~?ZEBvd~rX9+_47MBkg&a(fVF03{_2e^*Dbr450(99?nd6fvB_FNy~e?|&qh z=vR7;_FskbD1W8uIr6;>i;ee+3XEOZmVRMa$Tlu73QE}b5+ru*6|3*NuY>fF*h^nR zIOTe;tcU7uFdRtupPz#I=c!6MlpFP>xmpIJ5;w|sZi*#9Ds!}LiX(!k6+7W?oO}F@ zq1?jmzXVQ*p0Ke{sExD|-?QHREq$v^mJ@Ysm$G}Xx7@Nh&&D?+;Ex~gh^A#oj>|+l zO64=gKc`e~8P9AZoAfi4vaz&lNg7>yPTXIoTvqpWseoJN(qn`#^#Pv>u|)*Wl6eC2 zWuxI!bCrLZSFE`urlu^}Az>v=)=}SVb=51$a&55uFEQL9km0Eu(`ifLfJErWu)nRq z{?w$~!IOq6_oi84&AI{!)>HnI81D}cCyI%4keWsqPKAEe!zt3b$4WgE7_S(oGot_Mo z-*j zx>6AM)`l5)?bLjva!byYbhajJqJG3@d+$+`9@*%@uFYl%t8PRu@XG*lP`zRizgvI( z6Wly{uesVej^A`@L770@L;hiio~cq*q1{Ud3ibs%fYoRg<^kU>r^?>7GCQv0NLGhW z-@8L%9fFRclDR>R_Z%d-f`)r_9xqtNJ^U6umn3(st8q7<)>g_DX7ir1yx&M%v zl5#7!qQ|)Mb3O~4(+b74DPnr*TFMf#HMkR*t76)5=lM+Y6KJhRR<(=Sh-k-sF4LJs zf6ulR$ivi`=2j!&y8+cMruc+v(9IWT6Yl+f>0e^zF01Yj1qPhFa7OHYj^ixcm?U}B zkK)W5l*(0DU9{_dHcaeouOGBuw%cS>YAld^6OIK1U4NFpN1v8#b}_2;BLGl?}B-5|!8QzoNK# z1S4rQI|(AaY7vyBXlMP;*&Lm*r?|{vL=d_}aZH)O;`^l{H#cOA!cONn(kz}vY#mfr z{^-g*h&Ff_EgtxX0}399ckj;wXQI!>9umOKUk8mUR+j~6Vh*>bQsHry!d&TW1sK>t zSihf9+`oUS@;p5HlTPHqQU}M_);;>TPKwpw%;&~Gd1Cr2tL+{Ci>Q=;vTOUNiQ>=l z=>PQX@po%2n_p|Ko~-rJRmK?+8nVhVvUtJmH=jfn-Fn|dwU7X>V$~^h`r5n@hy;jX zp>xRHS>GxJ=4n=ptX-5LcW3e(Uq(%yRX8KFJ~E=K6+0`c*&5qeAge`0HnYo7t8h_Y z)}I(72ijX~2-W$;=O({8X2{GRO~~yVJI4&=c>N-mRi*ZN`PjeJY(33mrQo0(B0eNh zTV}n;A)h9s;Cfj82O?(SI?e;t3j{e{z&9RZCtFt#OkvAvi?8wC${ze`T3t>oBu;9% zZih%W>lFJb1F})Z8Qi3;U%WZ)5m9I$t{Q~oUci^x?6n&B7cW*U{>v4k7Y#Zk#D&`{LcHoh(m^C z1?Jz21$TeJquvnWzx4~h@`?@@X~?-$GPM83#Vg%csB5h`ojT`xe5ASuc-I)4%u%%L zr=+1ZliS|YyO0$#BJIG6^4V*Y!DutpZ`O-9dj8|y@C?pQ2CJ2YehbU6C*D3RE)43^G*~P;M^o8PKCKE zE`{EDi_zRhMAJMYddFHjo>pllw&@$k!#ujY&Y-%6Tc&)FcecuOwoxS(BoaKKJ_h48 z_7LV|T2~md)_F%DeQ&9ev>}HhEFrhQqW}-kl}J^=Y;yGtfkwe#CUNQlVo56(z@nJ> zg(50Tq_ygnRSrNtreY`4`1KRDYa;iiIl_XrXvcG=o9^qg_f8x4UN>bgzyZ>Out^lX z)o3QwIzTf<=5|*N(1%21ScqU^)jsw$hGfvDnJEz>o`xYSnr8`=r{)HjL6 zV;kTz7PoVvmEt;bX_9D7jiTswo27Qji;md<*-tcaJ|5&aTfrv}GA^4}3DH4AM?%&L z(<3vWpd^QzsdNB+DmsZ`SLHl1^KPHVl#fk;vgXE`%n6EFZBZ$eV_6KcR<{x_xzMWk zc$1>Ik&Mx5k7H(scOPwZ8%*n1+a7VNEh>UUJiB(yujAVh%Lq;SKQLp+U3A7bp_Vl# z!x$!j9HjZEU|Tftl*f_N>|7RFHRfa4;{O`8C02+*ZJ-8^{%8l*zf_j&0DsHNb7!w1 z{Sl3mq7Ew>sD>k5bfkF zcT7YN+I$qi6+l3>Kz&mTrm{oAKhe4qJxfkO0Ucv_$J1dWx~x>T&i;fCKGaTTi2s1G zdvZLz_Is3fJ)t@VV{>SudPcs&c9E}G`0bg#F6)6eEcuSuSVOc5%Jf$wRzs?=;hw{^ zDv>ESpJH+rSl29;W|`>Sd)3CTW-Ua9tp%So%n}ImKGWYGcWU;B23@_+S>J5!G4kLcAYM=ct_w4YiofmMV zj7s>Mx$F@)^aGj5UtW@6zZYt=$?Y6LRIFr{?}cLxfWN~G$Nm~r-R%|cZ+f60SOk82 z??%j_@~$3IcXyiR&kzZ`)L`K^?=L3S4V8M(;!GQufX!<>n?E z2!#I{g^V75Q_$j4Eror!AqR@>i^2taH4@w4hs{Vyf+GhBK zo`ijn1rmP({B3xWYqRBpt?C041 zSfZjHS)kBAGB^ajit)-YSQU?w%Eq_vdR0E$y3wvVD2Ob6Qa!5|;`p-oXtN+#o1%EP z->9y&t53#A7`ut-kT`mT2ZS3$3xEN;th`(N@4nnWxmMl|(F?Fsf zHrpAmAOK)lUf8y`_E4X;akJ4S_juZ-bdG{qUdXb==EXpy2<=h$ z;T0)7HcoL(`o7tqE9sCRIo zBMS#Tpc5#TjH@ino?hUh!wD}aVtR0A15?*YhO+kIj@{+r8pdd|zHXMj^j2D>cSNKC z9)uOK6yiNA!xk0|Qv&(yEXUX>r|m9`>f+#~-zYn*SIkdfTHH4XMoqw|XJ6+ zMzNn@kz$id2vr68##Ii}-KSr>ijFzbamlFm{JX!Ob9G!37SCmeh0MAIV3b84BTOVM z(3NR~j*fhea}=U+Y#ih;*oX{Q{j#@Fk`2F=O2**}yFs^G!45=FQBATB0YrBnQ-NXR z(}GF?ON|gt?fYTxd>}$a9~BMsm#7+zvzXF7yAI=Tgd1pN+c3SZ=13On&}#~0giH(& z1!@XSxMgMcW%3VXBv?%^;6?*Mx=bH7|gmD{0 zfBtec34;1&uMH9bEQku1{AvkJ~>mz@uP?MO%V2bZyagtU3k!CKCzo z-}Fv1+s{&(JJ9VIVo=eINiPuLTMgj{=wq3e@jagfM(fbH-aVpV*A~T%CN<^$=KJL` z+7}UKMQei)FBK+q2A=mmo}Ge^Lih<-pprSB`o__~=}Xh&)%D`SgaLeKV=uwdO{<~G zxLdcrls4JAGdbL*=h1e171wer-=kybnDX7!;<-P5A~t~`)d39-d!JQAa*Eh>GlaK$&dSQ8Xm&9qE*Yvs-)c!k zQD27|8=Hy`@+ux~zPvNTb zR&47ByA3+tv+sS;aP*LgZ~X}<|9k^$fS|MQGI~hqZA#(3;zNg$_PcQli%Oq>^3INi z&E#P!K6U#_LpuU(Me`F-a*H18!%qOkt%20qT;H_h#^ytL19Zso#v4#vIAc{~Prfp4 z&#`P*C4rHn;#&qE{k}T|&QG*7={8(CrjL1{KLIa%p%mA1Un1f1 z`g9d!0{S7n#CmezPfETxs3(5M35UI7t$WEi?4yGIDUUaH5)CfJCXvG|w*0W zebf6~*$VAY&2-M^@9N`W*Lhr}lz-ec-J{Y|vVaIKBs!vyvj{E^;_JkyP3a{;@ulIQ zSt|zK%s47%3t>Ky9tmuqA3>sJ+{A6;x1NUTOH`YrdbC?cMn<1_4F+>ZG~(R371$3h z9pfzYQCijULpW}>PQRf}xxZ_4Bl9QVq0K@#qLKCC!lo4U`@_N->CJmeHX*g=p|}&m z@BCnw=I1qULmO&k{C{`Z>;Qhd9}Tod5!?>?B%0N#+3im5nG^b!bwL#Ozls0rXFv&* zgc1xhdF{l)&|`@yqiQ5$DzMb=p4Ri@ytL0(xi)VbO?{{>pS_{i&{1ml-|YWBlmEjP zqOPMLRr=ZlP8SX)ATxjCW$9r-z`%FSfY-blT@F9kw-SyY8tiDG-$C)EAEi?P;64)> zbNQ3ow6Gmy4>kc5hWw%7yVKJHCXj$xc0IO?hp9#&2g0+0XRPA!O=4$*efxHLf;U2r zH_^-N0=Y2sGW}nd;P>d1-)T+@67P~V%s8wu(%0<9@%KWdI2s|xiT~Y;WRwF^z}49G z24f1V#!CW)*B$b#8d*`%0pS{1A{>jhLpj&@(UdF^uJ48Di-7IWY3jENxIcgZ`s5PL zB^v4qsH3X~rc!@6XSnl@kVChS!y)rInuqKEj!FtxNFw48R4Am5%dN+4^8M-Uk8fJs v=l^txSaP4w9DlacsBwSO=s$B=b>t=cDIt=i_JTWsBnH=f~~n=Im{4>0!(5?q&b=SezUMg%(9cUPj+P z=P1`O$rzEf{vHW?N8tczTXb>L&|Q(&fhdfUt7I@4TC-h#win-tVX07MhmG1)w5zl0 zWvP_@O)6*PsnBO92v9NLOKnz?QzWVf_TvU5635(y1u*c_QJ0&&8n!33N6_wH)!`jIpk40vwn`sgL=kMlEO zpmIgw5DXhES((KPTu2#RN5tu%*taeC$I6$Qxr%RC=I=2PXvymlbr+=9f^`NmjF>W7 zAMA)UbL1z@jRUnTd1#Y&#=3jezer(Wu~L89)8qBW@(Qv%errKYCx9~tw>U-z5n)G< z6E|3|_dLx#DGDy3%SrsjWbKPO4ZWp7oyxy?nRdpr^>D3)WiKg1CK>&ADsK3eu_vnw zMfyciajLB{uP2f1!Sp3wL`Oju;dOW_4gD_#`O4_JBhc^uN3j!XVQxJVc&DY`cbv1nE|Q_J?N_g#wmW#d?6a?9b%N; zar)fbSVWvWwIlrM9!BJuoNulw{w4KoIKstm-f23g?`PfZvOA(j`Yqd!~Yt$3Ozum+arIyr5rjh9zSZEfw{ zd?Z{fydX&P@!yP=3M0{%vKV9lX=KmF2CXcXzCo!zJ6*Vig#`*7Xu>X4pIw0v)L_K* zYY&2wVQaZ8m|d1Uw!^ED8v1E()o)z^0UaD16b`wC#1UoD^iT?vflyJ zohQYPCjT!a?BVW;9|CFEKGPCE1M&0oo3wgG0ApLi z_?;)I%8W_6zgz?>-c;cd5CHQnbJVTSCzMTsBRx%psBJQAUkeK>lSV2p=6wJwSXx+w zy{U?@B)#rT+!AJum1Rzi5fi;V$$uNISo{J$60eAkfRd4tN*m1Ky>GEKynbBb5+(Q{ zE*Xk^a8s2G_{&&dU*CS(`@Cev@zjEAc18 zjDh)TW9z0iZ=|N%T6;_pt3LZHd@9lY{(jJ&Pxb+#2Czf;mT>#OUux3EySfG!c^;q5 z?LjhA6R@!zzUnt|fR`W|3|MFo*M~!H@V6o zE(Eh`tTEBYe0L9Oo^tr@rG7_A-JZ2co*=NBR(lC#VJ59a&r@Ht1$sI;#ah)ympRTl zHNF$YI29*g+lka(>-mJK{YpkM-}tKSy=j4?Xe=S%#h+Cs|0=a%2Y&}C-22aMJIw4) z);iQRHH!{hYqWt~QBj_^4`gTLWUjTp|Cg^q?sXw|zU1-1ZX1ZiAM)hIEv}&z5*2NO zy}w4h;P709@uQ=o=Lma#bZW$DC|wWin0(IDPH6s8@J)T@NN;S+J0cz#it_n)!Y>a2y5mTFj!eRJ~5FzVfWK*AJKy{lt`19 zcy+k=k_HZk`%xj!<2p4pX68fI-iNud7L}(qyDkqlxh}skFtkTjKil!jk}dpk7_i)A zeV|{G=?*EXux~|O3ox}=T2)^F+xVPqDoo}}B_adBaSBe2Lj`0*9h+2}?Sr6ftm6zALQWzMJ(a<34;gWq24X}dD z3X0 zCYyg5ulQ+XL}hSj$kQ~_sd0387;Dby)mYsy^4uXi($)|pBTb+D&a<-Be7DZuwB0|M z7R$1TF37gedy@+`t$@_qPJZxq_F&;R;wB>tCQBrzN`BG z%=Vi6@$m3;IEhDgyh?Q06crUosi;b|SuG6bE6JOvfdn)j`0;9#n`WL7IAjP5D%lf` zHVQ1F&@i82Mr3al5A${^8W_mJpOr_vtszo z!!VZmY}j!_kn^((zEn8ME4gdo*n~2yoDqt&wfomz7D>tLkC;_Wi)sb4rweY`phCuU zRjVNG4}>KKBkPlq;%NjTQjVV9og;4vaL*fi7hz3m&)jZ#J?I zTf9DQMo&JMBuWzJk;eMqxF(q=>h)HUU9H@@j~92ap6-K#63oBI)hS)~7w$%1K>ktW zWCH3-F7WnC(Kh;mKC(^KS8}Wnm{~uMR;vH?Yp?a2;%gC(o0dM_&k=pQ!nd=O`6bL) zeMylA{InoT!VQ(4pmO;6a7?j{d+6Zgk>MXN@__n>wY=rrtvmbj07rLN)$tYVZGdMK zS!Q))N+zkCm3n&sva$UyyjJU;V&&6&lm;=nWaZNyOd5}kZd4$o;D?8tLkS@-=sM^j&TbW(fhLp@CiR-vtqYL#xoTj=btgAX*ixa8M6qo~FK*`Eb}oD+fa_@+ zN;E=`UHUaUky1pKG*bNjv=61_c%MNUw{N#-+i8Xc{Cqzy%fieFM8ExfK*2hqGR()+ z2b68;qvN%6K~uleTC=76s?VX>7=iau`?A!AjcLv)-9<1ijgJ~PcRhioC_oTasdE8R zt&-tRwm>LcY|uebvsp9X#;oOy(8BHPHtF zoem~}LuF>L4vE*$nBJ}h#?QJ3KSMyzN@6)?FFk3o(FqW*)L2DQNBT1w13NSe_Jes( zuQ%rjp_~fLse^fbn+I>?Mt-Ppnsp4@)&Flu*4U3}DT>9@3L|B7Y*?=}goF>W z#XD9}432bp^h>$0$l9jug4z;tIu!nNGlbir$$^IPRWBoAD`J-?N19xVTevY%c>Yb5 zCXfU!G>zS4Y|PGb;WJAlxYJnd&l)o(N_|2Gj4yLr)_+hgJB69b{)zjd88o#t)7-N2 z%>PZg3-hzQIG!F)mldN|3=Z&tJ)b}W)TpJ7d6|>`Yx?kLF;1L`qY|6FphMn`w@egF zryfVdyK!#g3aXccl^E5;ohQFdZo%iZJilAHWkLR&x^;1}GIyrm8BJM;V&0iWgBmlo zv^`8Ky)}#~a5V;Y_Wwa9d+eu2Hhz24=Xa}TvyMRK_`4YYnc@Upp-y>_mng+8%ryyU zOyij>502woIJ3u?7$+FwF+E?PB(7O5^wZR20#(xAxJ)9iVpV($Wd2=O8a%@aCd%TJ zsrul3qleLT&DmMh#MTm(Qd}3BCjR=hOsk03nXk?s8#tiXnUYk;Rl;HlB48* zws~zTs-~|(bSuDl$ijob!waCq4S6b;&+N8!7IOD`wT@4G6;feNEt%O9R+cQ=?k?Yn z5SKs+CEOInmz~bgtt*=Ge~9|BbytflQ(<=!C%G%sD){zAHf}6@S8GJf@7M-F65UqY zjvGHW+DH|NC$O)5mNs|inqMsEDS2n(=3MK(r3&(5H$YhT^YUI%F}HXMP`8BAf3PGC zA1v}9bvoLbg3Lo$I8?P*;1d%%+?gt+x>$6ZC?9}Qw!6y;;AaXUHvsDbVD!mU>QqyQ z72M|1onZ^r@lu@r;gXqFX;I0ZjjJ8VYN+=N?E_!kE9Cu<%@yS#7QMa*v_oYM`?{l# z!E@0oZ3P>CzJ%_!&wP0Z9<62ZG>!7EdHU@BxaF>QyT^gsbsF#g0<8hlR7OBp4?0}F zRHYJJ{))gz3fGg00s*zIydX}HaCc3^mB?*(M!=xWS~ZxW$Zc)a640{eYp@JQxcn}i zen(tN$BhVielFe9cwBTZ)-UybsQd$lnP%2 zYfNdqH^(4r_s%fwk-Q7yn z7VT#5b8-mR=ie_%++jLx$F=?)e@8h?+Gg!b#ecZ3OLjh9)YNR!IV&M^IA6t|&4@t=dCk2+qzrj!#={;`RJ}Rr%vdYCPNt2c_G=l3J4a z=DhGg$k;1MN=a3Z`k4Ko;G_Ode?q&+W(!h9N@YSB9UpbaB+aJ9#;gghC887+h%`Ay zPvdi3$w%Dzp>nIP3nNxT*0Fc(C28+66a>GPF1lQHh!FJ`O`26lmC*2I;YQ(8#lH8O z9Fh6Po1x}uA*?W;7Hq2&%aL_d!eeG#6H4ukG=mrntppJg3rlZ&8wdCEejjQJ} zeVXrMa6Lfqd3!aiU@m=<6cQah=d9E|mJLG>V`kLl4lQ_)RJgM^YujWkJfS00R*=Dv z75+>Q1zm;9W7x=~8B%ATtCsvN%p7d_@eT#Bd!t+WQR;H#c*@ZWTvI>2rA&15E z$I@CDZ9e{VprSs)nW`9;1uWE(1pM;%lMm{{$Tor~mI14-y3;ErF zsF0mXLklT1nbx>(RcUZZSgL3y6@LWFV3w+~VSgtVdPKgrJL!6=2{~wg`7O3dWFq?o zzz!@Ii1pPqtledh8yPy3iYh_WZI7&Z1`!dhYFHFuzZ-hLaG#1#K~YOcDF1E*;8@lx z2K3;gxHhu>xL6P*jHK)9>+=)~UN4_>8hGvSKnZ?f)%Z>6^rK*uhbajiDDULOY$a=g z9G3d~6ZlB`=}*%uv5bLXsk@mL7btt$;rd91Ex@srnU@v+UEnl#7;k&Ewn${DmyOj> z!lRzAA88VYz)@?feyq2y)tRYMQ&N8Z#7u9k9IpA$QUCqzDIYF6l}Z7b1Db4K|BQe> z+|uHS!9gkE<(G;UYW_@?7SEXeGy8c)|7W37U8#B%`rxoL&53<{9=j{VIsneeS zRu@M)kOdkFmZX;w2;-(-Ih2dPGJ`aIR#ZlEUHhR}5#2)S4Xc=ECGusuSPC1MmfyMe z?h*<{3#yLibm|IDr~ttNj${Mlfa3=4mU^ZxRmmbh*czsd+2Z!m=DO8y0D?mOVn1X; zxZTgUDHkl5tJ#b^ZxvxrR}IcjOEIceY&41=b6Gy#v={^UxdkaklPfUOO>ve;OYQ16 zTlVF;u9e^CoFL?QDC+zlvXp5*+!#?QO)KN!H9y6L-#De))i1uGii*8f6a)Laxw+w0 zY1lkzu6@@wfMRpJlJ$xAoq_!H`XK$YUd%g2K3mq_G+E+NbH-R6y(=|B5!EjjD!>00 z*-oX11AS&l|0Aox`5&#v3`>~+oKGSqD2yo}?CMZk- zAM&g$dm7d**VyyY2@8?vdNM}E;b;)R4CK(TwcOX+^V;(t<_L5|UtI;A8R8pYret?N z?;-d-YttRYe>5$&oI<%;7}9yD0x+r*L4*nEr9~e+@tQY zzgJVF>_yr@ifKXO&3X!YORj28ADdyAxaB7y#?A>Q$_niMKJu1+X1rerg;DRFM5Pxw zYuw3(e?*z)i$Y`7nE^<}NECwyW$DeWDew=Q+j8%^>vMaxzff!KX;nX``H$eo&D7jT zcA4ugTAi7Ho}c-=$b+cNUZd%96BPwG!`Oq$FoAPS7OQq&0YRWEzDfbIOdN3b_03Q% zm1R$N8d7m2my?rQX%FD{1%LkhS(`nbl$zSk&MuM{myCiU?%ZO(ZBmY4l{pbgAP$dv zm;)ZJ)w)AHrp!YQ#MlL4m#du`u_0_5Hr=YD4>xkt?*Dvs|K-%U8L_IXY8HGpARu*s zzy?r&JiSbXK7BrXXfV6$W{hS<%6b^xH^lu^=>5$nH{a@U6D3Hcg%Fk>wfs1oFyY9y z(+Ch)zNA#mvEKye(PBA3$XR2Fo@)bS*{XJUTs;UAtT6)^0rMhI;*wp#KWWo(Yr4ah zFl?1};Q?zuoj=-$EQ?GY;-s#j0myI}w#mwj!!~^5I2=CX00D%& z{}GBZ<7r5~T|Iza46Tl0iQ=S`&l+mA4H|!@_SH(THgmuH6Fb~%mC^nQ)BQxtY=Zt` z!rJvNxVS2nXHNEUm4aLAMD;Y+S~7D?aA=TESYq*Ckt=r57VFPE!jG-3B3`YO7lBl! z2!MMFC)8W@re$T3{rU5!CGdiMYisKat(g`}*kc35z3V~HxXHy5z+@(!!DMVqohST~ zlAVQY6jHal44h+D4i2oEa(j1@z(X3=Au6P>xI5qs4lAJx9z{4%TdH z6}hb=XMsElc&$6@NW%`F-vvDooizvmD*}2M<&b4yV0K|49U%1O==~q!y?O{JdHkR& z`V&wDJ5Rh17hd=BGJc&%(JTkVkTRocuiYs+q8QLcVD8!3neRK7`c}p+WFW&Rk_voX~w1~6<8<9 zSc=`lRA{nimF$l<520&8o$lALpf#A-0KDs|*v4JyeqI5DHLi6%I zM!sraa)+Y2-MfUb-lQq0hnvY@YiPv%d6x!h3}bI#O{7Ebje0#OCu>-<2Q@RQ|=Z+Wv!e zM?lRRW5wbG-k0I&hT#WEA%9LTLSoWYpDj0SwN&qt!_RO3obO*9rcW9sBs%W4`xf>$ zeO9C^KCM4ECyku!Z=dM8-V_8Yd|%3%nH#b74m&x1!VpXOSfkpKy*4TtJp)lV4YbN| zT`mJ^s@eDGN`^PVut9gQ9SrwBvIo0lgVaYsABZ)8bKGltJPwAvU z4{6SJYuoCb+x68g42Bc>Z;x_aT%0oMSz;N^m&l$(c&7CC&=}l*K7Sh+vi|AYbtHAL zjdV6x3?`cdKT&ax#WthH>Qb$?2c!Zxkvp^RDg;pFS<_VkJr9ual9iLxnE_$2%TpAxi-$L$qEdX(ox6({bHrJ-%w5Sd4VKxxA_%Q%2o@hJ^R;$8%Ue2#_4-DRE3Q72o!??_mG+ zfJyvU5Dla|H{$wEshJzQKe1EnSsQfZu^8ekUfxP5>qz{o+r`@8;!=S<-Dv)aGLkNn z+f-Hu?0d4NLE7YPt(dPVZ#ylJYs-h*YI1cdJJQ@#Nvaq$Cr=1EYE82}hec_4g|@_3}=4MN+0-uzTQM7*wHhsiDyzP|ery%#^t9NTVdhzc zLPcHK?=su&rV@Xmzo%%rT@?*c9=T*s6w@W6vK*jK?b? zltaIP(&NYFZ{SLxhbiIVXP%LK0taw`nKvNV_foK#4oY{NIJmOa?Fzm`${b}$}t0$+T{e%shWLmO~Cg1v*y+i!NUO zcBjSc5JVp6{x#dDJIMpv+!{OuIyL9#GnoNvIV@+}Z`+PLD10R(B&aY+vdb0`-)eJu z`ku>@e}J!>eLt;MQd*8fm|(7Z|A6NYsxhiaoteC)&)fOTQPmjlp8W&Yz(52r@co_U zBD7#%xNv*8K>m3!TDi(mui+?cW3v>y^YRe8|J1tKRCp}_mT5|Y6R{w*LWUP0=6h7c zx0+(|9da{YT!eX-Ftz>Z@D^A6tnj6kTo5+RBwSLnn z(}=2mk(`Cna0cc8DH;+QMThT81I#iq^0rV~wBQb&ZtTT-))I=7J|qr;oUZ2PK;WPFLiby?SDM)p)zUHV@`Jd=NKr1o4x}hsz?{kiVvFfJW|5So;Tui^MbYbf;~E{AzH_vO?k%NJ z?!3U;05kSoWitAmU%93hwcq)v1kOxkteJ=l}9ZE zSuB2*%?%7_-7DC~Fx(pe{;Tw0r;hv~x7_C4xFvSKXkiiSJ;8&HBHlu_wh9@}lTT1& zxMRNyK}KEcWS!y?NA`Y4#&v7$5~GWksJN__4MdxDG9+AMy*Va+q+bE7Mlx> zxP3Ft)BTC9SC*U=KbB`bONcoQYH1h#lQp|O}c%e0m&`?QkG4DUP<(j#%GfZghXM+^&cgH_HS%F4h zj+kFU^Lpr4-HO?PU+xGPAao)Rb-@vK^&@*T%U-$fv2D7tcPsD&WJMc43 zC#v<@my=>Ar}M_{16LXv8fS6l_w?LxKa8B#AZ&Tm7S^Yi1feYf3l~|7NuN=R9V4_= zAIHSF4dqVV^Lel1v?TZx#X>gJf{tBNrLrB7gkqAFf8WFXO}>Q(BcmsM&yJf-}nibcQ4K@Yc?Cx_d5Wi)gM>tf0 zzCP3WDd^2zllnErv?hGwmPp9?XP~F~;zi)G@fph4R;P|v10%$U_X_AJ?_6nhD)|@4 zx{4nMu_*ap%)PZ%`^AtbUT5{@Bj!6YfTUQvxP0fBvefhWJ6)`_r6GM{yEFu62yC`& z-`cp1m)+J~0s5;ce=*-ltspcL+BFD%B=1gaK@-oUPnY~zyPq(v;UMQym4iFrvmX={ M1x Date: Mon, 29 Aug 2022 11:15:06 +0200 Subject: [PATCH 068/232] There was an error in the docs around continual learning and thread count (#7314) * Error in the docs --- docs/freqai.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/freqai.md b/docs/freqai.md index eb76850b6..140e8acf9 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -129,8 +129,8 @@ Mandatory parameters are marked as **Required**, which means that they are requi | `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. | `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. | `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. -| `continual_learning` | Number of threads to dedicate to the Reinforcement Learning training process.
**Datatype:** int. -| `thread_count` | If true, the agent will start new trainings from the model selected during the previous training. If false, a new agent is trained from scratch for each training.
**Datatype:** Bool. +| `continual_learning` | If true, the agent will start new trainings from the model selected during the previous training. If false, a new agent is trained from scratch for each training.
**Datatype:** Bool. +| `thread_count` | Number of threads to dedicate to the Reinforcement Learning training process.
**Datatype:** int. | `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | | **Extraneous parameters** | `keras` | If your model makes use of keras (typical of Tensorflow based prediction models), activate this flag so that the model save/loading follows keras standards. Default value `false`
**Datatype:** boolean. From 2493e0c8a53447c552601a645f18ea0f5172b5b7 Mon Sep 17 00:00:00 2001 From: Richard Jozsa <38407205+richardjozsa@users.noreply.github.com> Date: Wed, 31 Aug 2022 16:37:02 +0200 Subject: [PATCH 069/232] Unnecessary lines in Base4, and changes for box space, to fit better for our needs (#7324) --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 4 ---- freqtrade/freqai/RL/BaseEnvironment.py | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index ef5b1c107..d2b92a954 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -66,10 +66,6 @@ class Base4ActionRLEnv(BaseEnvironment): self._position = Positions.Neutral trade_type = "neutral" self._last_trade_tick = None - elif action == Actions.Exit.value: - self._position = Positions.Neutral - trade_type = "neutral" - self._last_trade_tick = None else: print("case not defined") diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index bba3c4a1b..bb43f5300 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -57,7 +57,7 @@ class BaseEnvironment(gym.Env): self.shape = (window_size, self.signal_features.shape[1] + 3) self.set_action_space() self.observation_space = spaces.Box( - low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32) + low=-1, high=1, shape=self.shape, dtype=np.float32) # episode self._start_tick: int = self.window_size From 240b52953355602f096230c7063916905745edce Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 31 Aug 2022 16:50:39 +0200 Subject: [PATCH 070/232] fix tensorboard path so that users can track all historical models --- config_examples/config_freqai-rl.example.json | 4 ++-- freqtrade/freqai/prediction_models/ReinforcementLearner.py | 4 ++-- .../prediction_models/ReinforcementLearner_multiproc.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index dc7c62e4a..0ba71cdca 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -72,8 +72,8 @@ "5m", "30m" ], - "indicator_max_period_candles": 10, - "indicator_periods_candles": [5] + "indicator_max_period_candles": 20, + "indicator_periods_candles": [14] }, "data_split_parameters": { "test_size": 0.5, diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 0e156d28e..2e359d924 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -28,14 +28,14 @@ class ReinforcementLearner(BaseReinforcementLearningModel): if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path(dk.data_path / "tensorboard"), + tensorboard_log=Path( + dk.full_path / "tensorboard" / dk.pair.split('/')[0]), **self.freqai_info['model_training_parameters'] ) else: logger.info('Continual training activated - starting training from previously ' 'trained agent.') model = self.dd.model_dictionary[dk.pair] - model.tensorboard_log = Path(dk.data_path / "tensorboard") model.set_env(self.train_env) model.learn( diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 9f6a66729..e74423a98 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -31,14 +31,14 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path(dk.full_path / "tensorboard"), + tensorboard_log=Path( + dk.full_path / "tensorboard" / dk.pair.split('/')[0]), **self.freqai_info['model_training_parameters'] ) else: logger.info('Continual learning activated - starting training from previously ' 'trained agent.') model = self.dd.model_dictionary[dk.pair] - model.tensorboard_log = Path(dk.data_path / "tensorboard") model.set_env(self.train_env) model.learn( From 27dce20b294e2388804992882dece3e33d4a4fa7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 4 Sep 2022 11:21:54 +0200 Subject: [PATCH 071/232] fix bug in Base4ActionRLEnv, improve example strats --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 2 +- ...c.py => ReinforcementLearningExample4ac.py} | 18 +++++++----------- .../ReinforcementLearningExample5ac.py | 2 +- 3 files changed, 9 insertions(+), 13 deletions(-) rename freqtrade/freqai/example_strats/{ReinforcementLearningExample3ac.py => ReinforcementLearningExample4ac.py} (92%) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index d2b92a954..70a625136 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -31,7 +31,7 @@ class Base4ActionRLEnv(BaseEnvironment): if self._current_tick == self._end_tick: self._done = True - self.update_portfolio_log_returns(action) + self._update_unrealized_total_profit() self._update_profit(action) step_reward = self.calculate_reward(action) diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py similarity index 92% rename from freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py rename to freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py index ec0977455..d9932eea7 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample3ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py @@ -11,7 +11,7 @@ from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_ logger = logging.getLogger(__name__) -class ReinforcementLearningExample3ac(IStrategy): +class ReinforcementLearningExample4ac(IStrategy): """ Test strategy - used for testing freqAI functionalities. DO not use in production. @@ -106,8 +106,8 @@ class ReinforcementLearningExample3ac(IStrategy): # For RL, this is not a target, it is simply a filler until actions come out # of the model. - # for Base3ActionEnv, 2 is netural (hold) - df["&-action"] = 2 + # for Base4ActionEnv, 0 is netural (hold) + df["&-action"] = 0 return df @@ -119,14 +119,14 @@ class ReinforcementLearningExample3ac(IStrategy): def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] if enter_long_conditions: df.loc[ reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] ] = (1, "long") - enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] if enter_short_conditions: df.loc[ @@ -136,12 +136,8 @@ class ReinforcementLearningExample3ac(IStrategy): return df def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] if exit_long_conditions: - df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 - - exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 1] - if exit_short_conditions: - df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 + df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit"] = 1 return df diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py index 15a263b94..2118e1221 100644 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py @@ -107,7 +107,7 @@ class ReinforcementLearningExample5ac(IStrategy): # For RL, there are no direct targets to set. This is filler (neutral) # until the agent sends an action. - df["&-action"] = 2 + df["&-action"] = 0 return df From 48140bff91e9d0c5ae20518ed8dd2963b89506d5 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 14 Sep 2022 22:53:53 +0200 Subject: [PATCH 072/232] fix bug in 4ActRLEnv --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 1 - 1 file changed, 1 deletion(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 70a625136..bd5785b85 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -33,7 +33,6 @@ class Base4ActionRLEnv(BaseEnvironment): self._update_unrealized_total_profit() - self._update_profit(action) step_reward = self.calculate_reward(action) self.total_reward += step_reward From 8aac644009dd7a8ab8f006594b547abddad5aca9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 15 Sep 2022 00:46:35 +0200 Subject: [PATCH 073/232] add tests. add guardrails. --- config_examples/config_freqai-rl.example.json | 2 +- .../RL/BaseReinforcementLearningModel.py | 41 +++++++++++++------ freqtrade/freqai/freqai_interface.py | 3 +- .../prediction_models/ReinforcementLearner.py | 12 +++--- .../ReinforcementLearner_multiproc.py | 4 +- tests/freqai/conftest.py | 7 ++-- tests/freqai/test_freqai_datadrawer.py | 2 +- tests/freqai/test_freqai_datakitchen.py | 10 ++--- tests/freqai/test_freqai_interface.py | 40 +++++++++++++++--- 9 files changed, 84 insertions(+), 37 deletions(-) diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json index 0ba71cdca..9dfea932d 100644 --- a/config_examples/config_freqai-rl.example.json +++ b/config_examples/config_freqai-rl.example.json @@ -62,6 +62,7 @@ "train_period_days": 5, "backtest_period_days": 2, "identifier": "unique-id", + "continual_learning": false, "data_kitchen_thread_count": 2, "feature_parameters": { "include_corr_pairlist": [ @@ -91,7 +92,6 @@ "max_trade_duration_candles": 300, "model_type": "PPO", "policy_type": "MlpPolicy", - "continual_learning": false, "max_training_drawdown_pct": 0.5, "model_reward_parameters": { "rr": 1, diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 77db9c655..f822208f8 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -21,7 +21,7 @@ from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions from freqtrade.persistence import Trade - +import pytest logger = logging.getLogger(__name__) @@ -45,7 +45,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_callback: EvalCallback = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] - self.continual_learning = self.rl_config.get('continual_learning', False) + self.continual_learning = self.freqai_info.get('continual_learning', False) if self.model_type in SB3_MODELS: import_str = 'stable_baselines3' elif self.model_type in SB3_CONTRIB_MODELS: @@ -59,14 +59,30 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.model_type]) self.MODELCLASS = getattr(mod, self.model_type) self.policy_type = self.freqai_info['rl_config']['policy_type'] + self.unset_outlier_removal() + + def unset_outlier_removal(self): + """ + If user has activated any function that may remove training points, this + function will set them to false and warn them + """ + if self.ft_params.get('use_SVM_to_remove_outliers', False): + self.ft_params.update({'use_SVM_to_remove_outliers': False}) + logger.warning('User tried to use SVM with RL. Deactivating SVM.') + if self.ft_params.get('use_DBSCAN_to_remove_outliers', False): + self.ft_params.update({'use_SVM_to_remove_outliers': False}) + logger.warning('User tried to use DBSCAN with RL. Deactivating DBSCAN.') + if self.freqai_info['data_split_parameters'].get('shuffle', False): + self.freqai_info['data_split_parameters'].update('shuffle', False) + logger.warning('User tried to shuffle training data. Setting shuffle to False') def train( - self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen + self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs ) -> Any: """ Filter the training data and train a model to it. Train makes heavy use of the datakitchen for storing, saving, loading, and analyzing the data. - :param unfiltered_dataframe: Full dataframe for the current training period + :param unfiltered_df: Full dataframe for the current training period :param metadata: pair metadata from strategy. :returns: :model: Trained model which can be used to inference (self.predict) @@ -75,7 +91,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): logger.info("--------------------Starting training " f"{pair} --------------------") features_filtered, labels_filtered = dk.filter_features( - unfiltered_dataframe, + unfiltered_df, dk.training_features_list, dk.label_list, training_filter=True, @@ -99,7 +115,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test, dk) - model = self.fit_rl(data_dictionary, dk) + model = self.fit(data_dictionary, dk) logger.info(f"--------------------done training {pair}--------------------") @@ -124,7 +140,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): best_model_save_path=str(dk.data_path)) @abstractmethod - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): + def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): """ Agent customizations and abstract Reinforcement Learning customizations go in here. Abstract method, so this function must be overridden by @@ -142,6 +158,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): # FIXME: mypy typing doesnt like that strategy may be "None" (it never will be) # FIXME: get_rate and trade_udration shouldn't work with backtesting, # we need to use candle dates and prices to compute that. + pytest.set_trace() current_value = self.strategy.dp._exchange.get_rate( pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate @@ -162,7 +179,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): return market_side, current_profit, int(trade_duration) def predict( - self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False + self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: """ Filter the prediction features data and predict with it. @@ -173,9 +190,9 @@ class BaseReinforcementLearningModel(IFreqaiModel): data (NaNs) or felt uncertain about data (PCA and DI index) """ - dk.find_features(unfiltered_dataframe) + dk.find_features(unfiltered_df) filtered_dataframe, _ = dk.filter_features( - unfiltered_dataframe, dk.training_features_list, training_filter=False + unfiltered_df, dk.training_features_list, training_filter=False ) filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) dk.data_dictionary["prediction_features"] = filtered_dataframe @@ -305,8 +322,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): # But FreqaiRL needs more objects passed to fit() (like DK) and we dont want to go refactor # all the other existing fit() functions to include dk argument. For now we instantiate and # leave it. - def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: - return + # def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: + # return def make_env(MyRLEnv: BaseEnvironment, env_id: str, rank: int, diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 3b10933dd..7b35cd918 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -553,7 +553,8 @@ class IFreqaiModel(ABC): # find the features indicated by strategy and store in datakitchen dk.find_features(unfiltered_dataframe) - + # import pytest + # pytest.set_trace() model = self.train(unfiltered_dataframe, pair, dk) self.dd.pair_dict[pair]["trained_timestamp"] = new_trained_timerange.stopts diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 2e359d924..2e5c9f97b 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -18,13 +18,13 @@ class ReinforcementLearner(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): + def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): train_df = data_dictionary["train_features"] total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[512, 512, 256]) + net_arch=[128, 128]) if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, @@ -69,8 +69,8 @@ class ReinforcementLearner(BaseReinforcementLearningModel): factor = 100 # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ - and self._position == Positions.Neutral: + if (action in (Actions.Long_enter.value, Actions.Short_enter.value) + and self._position == Positions.Neutral): return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: @@ -85,8 +85,8 @@ class ReinforcementLearner(BaseReinforcementLearningModel): factor *= 0.5 # discourage sitting in position - if self._position in (Positions.Short, Positions.Long) and \ - action == Actions.Neutral.value: + if (self._position in (Positions.Short, Positions.Long) and + action == Actions.Neutral.value): return -1 * trade_duration / max_trade_duration # close long diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index e74423a98..c14511921 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -20,14 +20,14 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): User created Reinforcement Learning Model prediction model. """ - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): + def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): train_df = data_dictionary["train_features"] total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) + net_arch=[128, 128]) if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, diff --git a/tests/freqai/conftest.py b/tests/freqai/conftest.py index 2c6210a0e..026b45afc 100644 --- a/tests/freqai/conftest.py +++ b/tests/freqai/conftest.py @@ -29,15 +29,16 @@ def freqai_conf(default_conf, tmpdir): "enabled": True, "startup_candles": 10000, "purge_old_models": True, - "train_period_days": 5, + "train_period_days": 2, "backtest_period_days": 2, "live_retrain_hours": 0, "expiration_hours": 1, "identifier": "uniqe-id100", "live_trained_timestamp": 0, + "data_kitchen_thread_count": 2, "feature_parameters": { "include_timeframes": ["5m"], - "include_corr_pairlist": ["ADA/BTC", "DASH/BTC"], + "include_corr_pairlist": ["ADA/BTC"], "label_period_candles": 20, "include_shifted_candles": 1, "DI_threshold": 0.9, @@ -47,7 +48,7 @@ def freqai_conf(default_conf, tmpdir): "stratify_training_data": 0, "indicator_periods_candles": [10], }, - "data_split_parameters": {"test_size": 0.33, "random_state": 1}, + "data_split_parameters": {"test_size": 0.33, "shuffle": False}, "model_training_parameters": {"n_estimators": 100}, }, "config_files": [Path('config_examples', 'config_freqai.example.json')] diff --git a/tests/freqai/test_freqai_datadrawer.py b/tests/freqai/test_freqai_datadrawer.py index a6df60e61..1d1c44a1e 100644 --- a/tests/freqai/test_freqai_datadrawer.py +++ b/tests/freqai/test_freqai_datadrawer.py @@ -90,5 +90,5 @@ def test_use_strategy_to_populate_indicators(mocker, freqai_conf): df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, 'LTC/BTC') - assert len(df.columns) == 45 + assert len(df.columns) == 33 shutil.rmtree(Path(freqai.dk.full_path)) diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index a9e7eac51..74e8cc42f 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -72,7 +72,7 @@ def test_use_DBSCAN_to_remove_outliers(mocker, freqai_conf, caplog): # freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1}) freqai.dk.use_DBSCAN_to_remove_outliers(predict=False) assert log_has_re( - "DBSCAN found eps of 2.36.", + "DBSCAN found eps of 1.75.", caplog, ) @@ -81,7 +81,7 @@ def test_compute_distances(mocker, freqai_conf): freqai = make_data_dictionary(mocker, freqai_conf) freqai_conf['freqai']['feature_parameters'].update({"DI_threshold": 1}) avg_mean_dist = freqai.dk.compute_distances() - assert round(avg_mean_dist, 2) == 2.54 + assert round(avg_mean_dist, 2) == 1.99 def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, caplog): @@ -89,7 +89,7 @@ def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 0.1}) freqai.dk.use_SVM_to_remove_outliers(predict=False) assert log_has_re( - "SVM detected 8.09%", + "SVM detected 7.36%", caplog, ) @@ -128,7 +128,7 @@ def test_normalize_data(mocker, freqai_conf): freqai = make_data_dictionary(mocker, freqai_conf) data_dict = freqai.dk.data_dictionary freqai.dk.normalize_data(data_dict) - assert len(freqai.dk.data) == 56 + assert len(freqai.dk.data) == 32 def test_filter_features(mocker, freqai_conf): @@ -142,7 +142,7 @@ def test_filter_features(mocker, freqai_conf): training_filter=True, ) - assert len(filtered_df.columns) == 26 + assert len(filtered_df.columns) == 14 def test_make_train_test_datasets(mocker, freqai_conf): diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 2a7cfeb73..ac2d5446d 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -21,15 +21,40 @@ def is_arm() -> bool: 'LightGBMRegressor', 'XGBoostRegressor', 'CatboostRegressor', + 'ReinforcementLearner', + 'ReinforcementLearner_multiproc' ]) -def test_extract_data_and_train_model_Regressors(mocker, freqai_conf, model): +def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): if is_arm() and model == 'CatboostRegressor': pytest.skip("CatBoost is not supported on ARM") + model_save_ext = 'joblib' freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"strategy": "freqai_test_strat"}) + if 'ReinforcementLearner' in model: + model_save_ext = 'zip' + freqai_conf.update({"strategy": "freqai_rl_test_strat"}) + freqai_conf["freqai"].update({"model_training_parameters": { + "learning_rate": 0.00025, + "gamma": 0.9, + "verbose": 1 + }}) + freqai_conf["freqai"].update({"model_save_type": 'stable_baselines'}) + freqai_conf["freqai"]["rl_config"] = { + "train_cycles": 1, + "thread_count": 2, + "max_trade_duration_candles": 300, + "model_type": "PPO", + "policy_type": "MlpPolicy", + "max_training_drawdown_pct": 0.5, + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.02, + "win_reward_factor": 2 + }} + strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) @@ -42,16 +67,19 @@ def test_extract_data_and_train_model_Regressors(mocker, freqai_conf, model): freqai.dd.pair_dict = MagicMock() - data_load_timerange = TimeRange.parse_timerange("20180110-20180130") - new_timerange = TimeRange.parse_timerange("20180120-20180130") + data_load_timerange = TimeRange.parse_timerange("20180125-20180130") + new_timerange = TimeRange.parse_timerange("20180127-20180130") freqai.extract_data_and_train_model( new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) - assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file() + assert Path(freqai.dk.data_path / + f"{freqai.dk.model_filename}_model.{model_save_ext}").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file() - assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").is_file() + # if 'ReinforcementLearner' not in model: + # assert Path(freqai.dk.data_path / + # f"{freqai.dk.model_filename}_svm_model.joblib").is_file() shutil.rmtree(Path(freqai.dk.full_path)) @@ -91,7 +119,7 @@ def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model): assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").is_file() - assert len(freqai.dk.data['training_features_list']) == 26 + assert len(freqai.dk.data['training_features_list']) == 14 shutil.rmtree(Path(freqai.dk.full_path)) From 3b97b3d5c8158905e81fddb2ab36306bccf07e70 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 15 Sep 2022 00:56:51 +0200 Subject: [PATCH 074/232] fix mypy error for strategy --- .../RL/BaseReinforcementLearningModel.py | 9 +- freqtrade/freqai/freqai_interface.py | 2 + freqtrade/strategy/interface.py | 1 - tests/strategy/strats/freqai_rl_test_strat.py | 139 ++++++++++++++++++ 4 files changed, 146 insertions(+), 5 deletions(-) create mode 100644 tests/strategy/strats/freqai_rl_test_strat.py diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index f822208f8..a583fc9cd 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -155,12 +155,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): trade_duration = 0 for trade in open_trades: if trade.pair == pair: - # FIXME: mypy typing doesnt like that strategy may be "None" (it never will be) # FIXME: get_rate and trade_udration shouldn't work with backtesting, # we need to use candle dates and prices to compute that. - pytest.set_trace() - current_value = self.strategy.dp._exchange.get_rate( - pair, refresh=False, side="exit", is_short=trade.is_short) + if self.strategy.dp._exchange is None: + logger.error('No exchange available.') + else: + current_value = self.strategy.dp._exchange.get_rate( + pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate now = datetime.now(timezone.utc).timestamp() trade_duration = int((now - trade.open_date.timestamp()) / self.base_tf_seconds) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 7b35cd918..7550f1884 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -92,6 +92,7 @@ class IFreqaiModel(ABC): self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() + self.strategy: IStrategy = None def __getstate__(self): """ @@ -119,6 +120,7 @@ class IFreqaiModel(ABC): self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE) self.dd.set_pair_dict_info(metadata) + self.strategy = strategy if self.live: self.inference_timer('start') diff --git a/freqtrade/strategy/interface.py b/freqtrade/strategy/interface.py index 03ca4af70..9401ebebe 100644 --- a/freqtrade/strategy/interface.py +++ b/freqtrade/strategy/interface.py @@ -160,7 +160,6 @@ class IStrategy(ABC, HyperStrategyMixin): "already on disk." ) download_all_data_for_training(self.dp, self.config) - self.freqai.strategy = self else: # Gracious failures if freqAI is disabled but "start" is called. class DummyClass(): diff --git a/tests/strategy/strats/freqai_rl_test_strat.py b/tests/strategy/strats/freqai_rl_test_strat.py new file mode 100644 index 000000000..7b36dc6be --- /dev/null +++ b/tests/strategy/strats/freqai_rl_test_strat.py @@ -0,0 +1,139 @@ +import logging +from functools import reduce + +import pandas as pd +import talib.abstract as ta +from pandas import DataFrame + +from freqtrade.strategy import IStrategy, merge_informative_pair + + +logger = logging.getLogger(__name__) + + +class freqai_rl_test_strat(IStrategy): + """ + Test strategy - used for testing freqAI functionalities. + DO not use in production. + """ + + minimal_roi = {"0": 0.1, "240": -1} + + plot_config = { + "main_plot": {}, + "subplots": { + "prediction": {"prediction": {"color": "blue"}}, + "target_roi": { + "target_roi": {"color": "brown"}, + }, + "do_predict": { + "do_predict": {"color": "brown"}, + }, + }, + } + + process_only_new_candles = True + stoploss = -0.05 + use_exit_signal = True + startup_candle_count: int = 30 + can_short = False + + def informative_pairs(self): + whitelist_pairs = self.dp.current_whitelist() + corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] + informative_pairs = [] + for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: + for pair in whitelist_pairs: + informative_pairs.append((pair, tf)) + for pair in corr_pairs: + if pair in whitelist_pairs: + continue # avoid duplication + informative_pairs.append((pair, tf)) + return informative_pairs + + def populate_any_indicators( + self, pair, df, tf, informative=None, set_generalized_indicators=False + ): + + coin = pair.split('/')[0] + + if informative is None: + informative = self.dp.get_pair_dataframe(pair, tf) + + # first loop is automatically duplicating indicators for time periods + for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: + + t = int(t) + informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + + # FIXME: add these outside the user strategy? + # The following columns are necessary for RL models. + informative[f"%-{coin}raw_close"] = informative["close"] + informative[f"%-{coin}raw_open"] = informative["open"] + informative[f"%-{coin}raw_high"] = informative["high"] + informative[f"%-{coin}raw_low"] = informative["low"] + + indicators = [col for col in informative if col.startswith("%")] + # This loop duplicates and shifts all indicators to add a sense of recency to data + for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): + if n == 0: + continue + informative_shift = informative[indicators].shift(n) + informative_shift = informative_shift.add_suffix("_shift-" + str(n)) + informative = pd.concat((informative, informative_shift), axis=1) + + df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) + skip_columns = [ + (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] + ] + df = df.drop(columns=skip_columns) + + # Add generalized indicators here (because in live, it will call this + # function to populate indicators during training). Notice how we ensure not to + # add them multiple times + if set_generalized_indicators: + df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 + df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 + + # For RL, there are no direct targets to set. This is filler (neutral) + # until the agent sends an action. + df["&-action"] = 0 + + return df + + def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: + + dataframe = self.freqai.start(dataframe, metadata, self) + + return dataframe + + def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + + enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + + if enter_long_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] + ] = (1, "long") + + enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] + + if enter_short_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] + ] = (1, "short") + + return df + + def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + if exit_long_conditions: + df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 + + exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4] + if exit_short_conditions: + df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 + + return df From 025b98decd7ca0d5ca0713d9b989bf37e941e7ab Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 15 Sep 2022 01:01:33 +0200 Subject: [PATCH 075/232] bring back doc sentence --- docs/freqai.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai.md b/docs/freqai.md index f9e7546c3..86cf6558d 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -123,7 +123,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi | `test_size` | Fraction of data that should be used for testing instead of training.
**Datatype:** Positive float < 1. | `shuffle` | Shuffle the training data points during training. Typically, for time-series forecasting, this is set to `False`.
**Datatype:** Boolean. | | **Model training parameters** -| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, this dictionary can contain any parameter from that model.
**Datatype:** Dictionary. +| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, such as `PPO` from stable_baselines3, this dictionary can contain any parameter from that model.
**Datatype:** Dictionary. | `n_estimators` | The number of boosted trees to fit in regression.
**Datatype:** Integer. | `learning_rate` | Boosting learning rate during regression.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. From d056d766ed02ed15ab206f8f5a4fcc20a465c847 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 17 Sep 2022 17:46:47 +0200 Subject: [PATCH 076/232] make tests pass --- tests/rpc/test_rpc_apiserver.py | 1 + tests/strategy/test_strategy_loading.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/rpc/test_rpc_apiserver.py b/tests/rpc/test_rpc_apiserver.py index 5dfa77d8b..898ab4767 100644 --- a/tests/rpc/test_rpc_apiserver.py +++ b/tests/rpc/test_rpc_apiserver.py @@ -1428,6 +1428,7 @@ def test_api_strategies(botclient): 'StrategyTestV2', 'StrategyTestV3', 'StrategyTestV3Futures', + 'freqai_rl_test_strat', 'freqai_test_classifier', 'freqai_test_multimodel_strat', 'freqai_test_strat' diff --git a/tests/strategy/test_strategy_loading.py b/tests/strategy/test_strategy_loading.py index bf81cd068..c728a81b0 100644 --- a/tests/strategy/test_strategy_loading.py +++ b/tests/strategy/test_strategy_loading.py @@ -34,7 +34,7 @@ def test_search_all_strategies_no_failed(): directory = Path(__file__).parent / "strats" strategies = StrategyResolver.search_all_objects(directory, enum_failed=False) assert isinstance(strategies, list) - assert len(strategies) == 9 + assert len(strategies) == 10 assert isinstance(strategies[0], dict) @@ -42,10 +42,10 @@ def test_search_all_strategies_with_failed(): directory = Path(__file__).parent / "strats" strategies = StrategyResolver.search_all_objects(directory, enum_failed=True) assert isinstance(strategies, list) - assert len(strategies) == 10 + assert len(strategies) == 11 # with enum_failed=True search_all_objects() shall find 2 good strategies # and 1 which fails to load - assert len([x for x in strategies if x['class'] is not None]) == 9 + assert len([x for x in strategies if x['class'] is not None]) == 10 assert len([x for x in strategies if x['class'] is None]) == 1 directory = Path(__file__).parent / "strats_nonexistingdir" From 7b1d409c9814ad7450c9cfe28ec31a732b86f233 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 17 Sep 2022 17:51:06 +0200 Subject: [PATCH 077/232] fix mypy/flake8 --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 6 +++--- freqtrade/freqai/freqai_interface.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a583fc9cd..69ae52f38 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -21,7 +21,7 @@ from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions from freqtrade.persistence import Trade -import pytest + logger = logging.getLogger(__name__) @@ -157,10 +157,10 @@ class BaseReinforcementLearningModel(IFreqaiModel): if trade.pair == pair: # FIXME: get_rate and trade_udration shouldn't work with backtesting, # we need to use candle dates and prices to compute that. - if self.strategy.dp._exchange is None: + if self.strategy.dp._exchange is None: # type: ignore logger.error('No exchange available.') else: - current_value = self.strategy.dp._exchange.get_rate( + current_value = self.strategy.dp._exchange.get_rate( # type: ignore pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate now = datetime.now(timezone.utc).timestamp() diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 7550f1884..7e952d981 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -92,7 +92,7 @@ class IFreqaiModel(ABC): self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() - self.strategy: IStrategy = None + self.strategy: Optional[IStrategy] = None def __getstate__(self): """ From eeebb78a5c772b0c3e569fd476587facb1f8a9dc Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 22 Sep 2022 21:16:21 +0200 Subject: [PATCH 078/232] skip darwin in RL tests, remove example scripts, improve doc --- docs/freqai.md | 126 +++++ .../RL/ReinforcementLearnerCustomAgent.py | 456 +++++++++--------- .../ReinforcementLearningExample4ac.py | 143 ------ .../ReinforcementLearningExample5ac.py | 147 ------ tests/freqai/test_freqai_interface.py | 8 + 5 files changed, 362 insertions(+), 518 deletions(-) delete mode 100644 freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py delete mode 100644 freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py diff --git a/docs/freqai.md b/docs/freqai.md index 028a7623e..a2058b0ed 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -805,3 +805,129 @@ Code review, software architecture brainstorming: Beta testing and bug reporting: @bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm, Juha Nykänen @suikula, Wagner Costa @wagnercosta + + +## Reinforcement Learning + +Setting up and running a Reinforcement Learning model is as quick and simple as running a Regressor. Users can start training and trading live from example files using: + +```bash +freqtrade trade --freqaimodel ReinforcementLearner --strategy ReinforcementLearningExample5ac --strategy-path freqtrade/freqai/example_strats --config config_examples/config_freqai-rl.example.json +``` + +As users begin to modify the strategy and the prediction model, they will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, the user sets a `calculate_reward()` function inside their custom `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to give users the necessary building blocks to start their own models. It is inside the `calculate_reward()` where users express their creative theories about the market. For example, the user wants to reward their agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: + +```python + class MyRLEnv(Base5ActionRLEnv): + """ + User made custom environment. This class inherits from BaseEnvironment and gym.env. + Users can override any functions from those parent classes. Here is an example + of a user customized `calculate_reward()` function. + """ + def calculate_reward(self, action): + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and \ + action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + return 0. +``` + +After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: + +```python + def populate_any_indicators( + self, pair, df, tf, informative=None, set_generalized_indicators=False + ): + ... + + if set_generalized_indicators: + # For RL, there are no direct targets to set. This sets the base action to neutral + # until the agent sends an action. + df["&-action"] = 0 + + return df + +``` + +and then the `&-action` will be used in `populate_entry/exit` functions: + +```python + def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + + enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + + if enter_long_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] + ] = (1, "long") + + enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] + + if enter_short_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] + ] = (1, "short") + + return df + + def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + if exit_long_conditions: + df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 + + exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4] + if exit_short_conditions: + df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 + + return df +``` + +Users should be careful to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short. + +### Using Tensorboard + +Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer: + +```bash +pip3 install tensorboard +``` + +Next, the user can activate Tensorboard with the following command: + +```bash +cd freqtrade +tensorboard --logdir user_data/models/unique-id +``` + +where `unique-id` is the `identifier` set in the `freqai` configuration file. + +![tensorboard](assets/tensorboard.png) \ No newline at end of file diff --git a/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py index 4ad95c214..31d21d459 100644 --- a/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py +++ b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py @@ -1,262 +1,262 @@ -import logging -from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Type, Union +# import logging +# from pathlib import Path +# from typing import Any, Dict, List, Optional, Tuple, Type, Union -import gym -import torch as th -from stable_baselines3 import DQN -from stable_baselines3.common.buffers import ReplayBuffer -from stable_baselines3.common.policies import BasePolicy -from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor -from stable_baselines3.common.type_aliases import GymEnv, Schedule -from stable_baselines3.dqn.policies import CnnPolicy, DQNPolicy, MlpPolicy, QNetwork -from torch import nn +# import gym +# import torch as th +# from stable_baselines3 import DQN +# from stable_baselines3.common.buffers import ReplayBuffer +# from stable_baselines3.common.policies import BasePolicy +# from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor +# from stable_baselines3.common.type_aliases import GymEnv, Schedule +# from stable_baselines3.dqn.policies import CnnPolicy, DQNPolicy, MlpPolicy, QNetwork +# from torch import nn -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +# from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +# from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel -logger = logging.getLogger(__name__) +# logger = logging.getLogger(__name__) -class ReinforcementLearnerCustomAgent(BaseReinforcementLearningModel): - """ - User can customize agent by defining the class and using it directly. - Here the example is "TDQN" +# class ReinforcementLearnerCustomAgent(BaseReinforcementLearningModel): +# """ +# User can customize agent by defining the class and using it directly. +# Here the example is "TDQN" - Warning! - This is an advanced example of how a user may create and use a highly - customized model class (which can inherit from existing classes, - similar to how the example below inherits from DQN). - This file is for example purposes only, and should not be run. - """ +# Warning! +# This is an advanced example of how a user may create and use a highly +# customized model class (which can inherit from existing classes, +# similar to how the example below inherits from DQN). +# This file is for example purposes only, and should not be run. +# """ - def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): +# def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): - train_df = data_dictionary["train_features"] - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) +# train_df = data_dictionary["train_features"] +# total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[256, 256, 128]) +# policy_kwargs = dict(activation_fn=th.nn.ReLU, +# net_arch=[256, 256, 128]) - # TDQN is a custom agent defined below - model = TDQN(self.policy_type, self.train_env, - tensorboard_log=str(Path(dk.data_path / "tensorboard")), - policy_kwargs=policy_kwargs, - **self.freqai_info['model_training_parameters'] - ) +# # TDQN is a custom agent defined below +# model = TDQN(self.policy_type, self.train_env, +# tensorboard_log=str(Path(dk.data_path / "tensorboard")), +# policy_kwargs=policy_kwargs, +# **self.freqai_info['model_training_parameters'] +# ) - model.learn( - total_timesteps=int(total_timesteps), - callback=self.eval_callback - ) +# model.learn( +# total_timesteps=int(total_timesteps), +# callback=self.eval_callback +# ) - if Path(dk.data_path / "best_model.zip").is_file(): - logger.info('Callback found a best model.') - best_model = self.MODELCLASS.load(dk.data_path / "best_model") - return best_model +# if Path(dk.data_path / "best_model.zip").is_file(): +# logger.info('Callback found a best model.') +# best_model = self.MODELCLASS.load(dk.data_path / "best_model") +# return best_model - logger.info('Couldnt find best model, using final model instead.') +# logger.info('Couldnt find best model, using final model instead.') - return model +# return model -# User creates their custom agent and networks as shown below +# # User creates their custom agent and networks as shown below -def create_mlp_( - input_dim: int, - output_dim: int, - net_arch: List[int], - activation_fn: Type[nn.Module] = nn.ReLU, - squash_output: bool = False, -) -> List[nn.Module]: - dropout = 0.2 - if len(net_arch) > 0: - number_of_neural = net_arch[0] +# def create_mlp_( +# input_dim: int, +# output_dim: int, +# net_arch: List[int], +# activation_fn: Type[nn.Module] = nn.ReLU, +# squash_output: bool = False, +# ) -> List[nn.Module]: +# dropout = 0.2 +# if len(net_arch) > 0: +# number_of_neural = net_arch[0] - modules = [ - nn.Linear(input_dim, number_of_neural), - nn.BatchNorm1d(number_of_neural), - nn.LeakyReLU(), - nn.Dropout(dropout), - nn.Linear(number_of_neural, number_of_neural), - nn.BatchNorm1d(number_of_neural), - nn.LeakyReLU(), - nn.Dropout(dropout), - nn.Linear(number_of_neural, number_of_neural), - nn.BatchNorm1d(number_of_neural), - nn.LeakyReLU(), - nn.Dropout(dropout), - nn.Linear(number_of_neural, number_of_neural), - nn.BatchNorm1d(number_of_neural), - nn.LeakyReLU(), - nn.Dropout(dropout), - nn.Linear(number_of_neural, output_dim) - ] - return modules +# modules = [ +# nn.Linear(input_dim, number_of_neural), +# nn.BatchNorm1d(number_of_neural), +# nn.LeakyReLU(), +# nn.Dropout(dropout), +# nn.Linear(number_of_neural, number_of_neural), +# nn.BatchNorm1d(number_of_neural), +# nn.LeakyReLU(), +# nn.Dropout(dropout), +# nn.Linear(number_of_neural, number_of_neural), +# nn.BatchNorm1d(number_of_neural), +# nn.LeakyReLU(), +# nn.Dropout(dropout), +# nn.Linear(number_of_neural, number_of_neural), +# nn.BatchNorm1d(number_of_neural), +# nn.LeakyReLU(), +# nn.Dropout(dropout), +# nn.Linear(number_of_neural, output_dim) +# ] +# return modules -class TDQNetwork(QNetwork): - def __init__(self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - features_extractor: nn.Module, - features_dim: int, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - normalize_images: bool = True - ): - super().__init__( - observation_space=observation_space, - action_space=action_space, - features_extractor=features_extractor, - features_dim=features_dim, - net_arch=net_arch, - activation_fn=activation_fn, - normalize_images=normalize_images - ) - action_dim = self.action_space.n - q_net = create_mlp_(self.features_dim, action_dim, self.net_arch, self.activation_fn) - self.q_net = nn.Sequential(*q_net).apply(self.init_weights) +# class TDQNetwork(QNetwork): +# def __init__(self, +# observation_space: gym.spaces.Space, +# action_space: gym.spaces.Space, +# features_extractor: nn.Module, +# features_dim: int, +# net_arch: Optional[List[int]] = None, +# activation_fn: Type[nn.Module] = nn.ReLU, +# normalize_images: bool = True +# ): +# super().__init__( +# observation_space=observation_space, +# action_space=action_space, +# features_extractor=features_extractor, +# features_dim=features_dim, +# net_arch=net_arch, +# activation_fn=activation_fn, +# normalize_images=normalize_images +# ) +# action_dim = self.action_space.n +# q_net = create_mlp_(self.features_dim, action_dim, self.net_arch, self.activation_fn) +# self.q_net = nn.Sequential(*q_net).apply(self.init_weights) - def init_weights(self, m): - if type(m) == nn.Linear: - th.nn.init.kaiming_uniform_(m.weight) +# def init_weights(self, m): +# if type(m) == nn.Linear: +# th.nn.init.kaiming_uniform_(m.weight) -class TDQNPolicy(DQNPolicy): +# class TDQNPolicy(DQNPolicy): - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super().__init__( - observation_space=observation_space, - action_space=action_space, - lr_schedule=lr_schedule, - net_arch=net_arch, - activation_fn=activation_fn, - features_extractor_class=features_extractor_class, - features_extractor_kwargs=features_extractor_kwargs, - normalize_images=normalize_images, - optimizer_class=optimizer_class, - optimizer_kwargs=optimizer_kwargs - ) +# def __init__( +# self, +# observation_space: gym.spaces.Space, +# action_space: gym.spaces.Space, +# lr_schedule: Schedule, +# net_arch: Optional[List[int]] = None, +# activation_fn: Type[nn.Module] = nn.ReLU, +# features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, +# features_extractor_kwargs: Optional[Dict[str, Any]] = None, +# normalize_images: bool = True, +# optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, +# optimizer_kwargs: Optional[Dict[str, Any]] = None, +# ): +# super().__init__( +# observation_space=observation_space, +# action_space=action_space, +# lr_schedule=lr_schedule, +# net_arch=net_arch, +# activation_fn=activation_fn, +# features_extractor_class=features_extractor_class, +# features_extractor_kwargs=features_extractor_kwargs, +# normalize_images=normalize_images, +# optimizer_class=optimizer_class, +# optimizer_kwargs=optimizer_kwargs +# ) - @staticmethod - def init_weights(module: nn.Module, gain: float = 1) -> None: - """ - Orthogonal initialization (used in PPO and A2C) - """ - if isinstance(module, (nn.Linear, nn.Conv2d)): - nn.init.kaiming_uniform_(module.weight) - if module.bias is not None: - module.bias.data.fill_(0.0) +# @staticmethod +# def init_weights(module: nn.Module, gain: float = 1) -> None: +# """ +# Orthogonal initialization (used in PPO and A2C) +# """ +# if isinstance(module, (nn.Linear, nn.Conv2d)): +# nn.init.kaiming_uniform_(module.weight) +# if module.bias is not None: +# module.bias.data.fill_(0.0) - def make_q_net(self) -> TDQNetwork: - # Make sure we always have separate networks for features extractors etc - net_args = self._update_features_extractor(self.net_args, features_extractor=None) - return TDQNetwork(**net_args).to(self.device) +# def make_q_net(self) -> TDQNetwork: +# # Make sure we always have separate networks for features extractors etc +# net_args = self._update_features_extractor(self.net_args, features_extractor=None) +# return TDQNetwork(**net_args).to(self.device) -class TMultiInputPolicy(TDQNPolicy): - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super().__init__( - observation_space, - action_space, - lr_schedule, - net_arch, - activation_fn, - features_extractor_class, - features_extractor_kwargs, - normalize_images, - optimizer_class, - optimizer_kwargs, - ) +# class TMultiInputPolicy(TDQNPolicy): +# def __init__( +# self, +# observation_space: gym.spaces.Space, +# action_space: gym.spaces.Space, +# lr_schedule: Schedule, +# net_arch: Optional[List[int]] = None, +# activation_fn: Type[nn.Module] = nn.ReLU, +# features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, +# features_extractor_kwargs: Optional[Dict[str, Any]] = None, +# normalize_images: bool = True, +# optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, +# optimizer_kwargs: Optional[Dict[str, Any]] = None, +# ): +# super().__init__( +# observation_space, +# action_space, +# lr_schedule, +# net_arch, +# activation_fn, +# features_extractor_class, +# features_extractor_kwargs, +# normalize_images, +# optimizer_class, +# optimizer_kwargs, +# ) -class TDQN(DQN): +# class TDQN(DQN): - policy_aliases: Dict[str, Type[BasePolicy]] = { - "MlpPolicy": MlpPolicy, - "CnnPolicy": CnnPolicy, - "TMultiInputPolicy": TMultiInputPolicy, - } +# policy_aliases: Dict[str, Type[BasePolicy]] = { +# "MlpPolicy": MlpPolicy, +# "CnnPolicy": CnnPolicy, +# "TMultiInputPolicy": TMultiInputPolicy, +# } - def __init__( - self, - policy: Union[str, Type[TDQNPolicy]], - env: Union[GymEnv, str], - learning_rate: Union[float, Schedule] = 1e-4, - buffer_size: int = 1000000, # 1e6 - learning_starts: int = 50000, - batch_size: int = 32, - tau: float = 1.0, - gamma: float = 0.99, - train_freq: Union[int, Tuple[int, str]] = 4, - gradient_steps: int = 1, - replay_buffer_class: Optional[ReplayBuffer] = None, - replay_buffer_kwargs: Optional[Dict[str, Any]] = None, - optimize_memory_usage: bool = False, - target_update_interval: int = 10000, - exploration_fraction: float = 0.1, - exploration_initial_eps: float = 1.0, - exploration_final_eps: float = 0.05, - max_grad_norm: float = 10, - tensorboard_log: Optional[str] = None, - create_eval_env: bool = False, - policy_kwargs: Optional[Dict[str, Any]] = None, - verbose: int = 1, - seed: Optional[int] = None, - device: Union[th.device, str] = "auto", - _init_setup_model: bool = True, - ): +# def __init__( +# self, +# policy: Union[str, Type[TDQNPolicy]], +# env: Union[GymEnv, str], +# learning_rate: Union[float, Schedule] = 1e-4, +# buffer_size: int = 1000000, # 1e6 +# learning_starts: int = 50000, +# batch_size: int = 32, +# tau: float = 1.0, +# gamma: float = 0.99, +# train_freq: Union[int, Tuple[int, str]] = 4, +# gradient_steps: int = 1, +# replay_buffer_class: Optional[ReplayBuffer] = None, +# replay_buffer_kwargs: Optional[Dict[str, Any]] = None, +# optimize_memory_usage: bool = False, +# target_update_interval: int = 10000, +# exploration_fraction: float = 0.1, +# exploration_initial_eps: float = 1.0, +# exploration_final_eps: float = 0.05, +# max_grad_norm: float = 10, +# tensorboard_log: Optional[str] = None, +# create_eval_env: bool = False, +# policy_kwargs: Optional[Dict[str, Any]] = None, +# verbose: int = 1, +# seed: Optional[int] = None, +# device: Union[th.device, str] = "auto", +# _init_setup_model: bool = True, +# ): - super().__init__( - policy=policy, - env=env, - learning_rate=learning_rate, - buffer_size=buffer_size, - learning_starts=learning_starts, - batch_size=batch_size, - tau=tau, - gamma=gamma, - train_freq=train_freq, - gradient_steps=gradient_steps, - replay_buffer_class=replay_buffer_class, # No action noise - replay_buffer_kwargs=replay_buffer_kwargs, - optimize_memory_usage=optimize_memory_usage, - target_update_interval=target_update_interval, - exploration_fraction=exploration_fraction, - exploration_initial_eps=exploration_initial_eps, - exploration_final_eps=exploration_final_eps, - max_grad_norm=max_grad_norm, - tensorboard_log=tensorboard_log, - create_eval_env=create_eval_env, - policy_kwargs=policy_kwargs, - verbose=verbose, - seed=seed, - device=device, - _init_setup_model=_init_setup_model - ) +# super().__init__( +# policy=policy, +# env=env, +# learning_rate=learning_rate, +# buffer_size=buffer_size, +# learning_starts=learning_starts, +# batch_size=batch_size, +# tau=tau, +# gamma=gamma, +# train_freq=train_freq, +# gradient_steps=gradient_steps, +# replay_buffer_class=replay_buffer_class, # No action noise +# replay_buffer_kwargs=replay_buffer_kwargs, +# optimize_memory_usage=optimize_memory_usage, +# target_update_interval=target_update_interval, +# exploration_fraction=exploration_fraction, +# exploration_initial_eps=exploration_initial_eps, +# exploration_final_eps=exploration_final_eps, +# max_grad_norm=max_grad_norm, +# tensorboard_log=tensorboard_log, +# create_eval_env=create_eval_env, +# policy_kwargs=policy_kwargs, +# verbose=verbose, +# seed=seed, +# device=device, +# _init_setup_model=_init_setup_model +# ) diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py deleted file mode 100644 index d9932eea7..000000000 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample4ac.py +++ /dev/null @@ -1,143 +0,0 @@ -import logging -from functools import reduce - -import pandas as pd -import talib.abstract as ta -from pandas import DataFrame - -from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_informative_pair - - -logger = logging.getLogger(__name__) - - -class ReinforcementLearningExample4ac(IStrategy): - """ - Test strategy - used for testing freqAI functionalities. - DO not use in production. - """ - - minimal_roi = {"0": 0.1, "240": -1} - - plot_config = { - "main_plot": {}, - "subplots": { - "prediction": {"prediction": {"color": "blue"}}, - "target_roi": { - "target_roi": {"color": "brown"}, - }, - "do_predict": { - "do_predict": {"color": "brown"}, - }, - }, - } - - process_only_new_candles = True - stoploss = -0.05 - use_exit_signal = True - startup_candle_count: int = 300 - can_short = True - - linear_roi_offset = DecimalParameter( - 0.00, 0.02, default=0.005, space="sell", optimize=False, load=True - ) - max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True) - - def informative_pairs(self): - whitelist_pairs = self.dp.current_whitelist() - corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] - informative_pairs = [] - for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: - for pair in whitelist_pairs: - informative_pairs.append((pair, tf)) - for pair in corr_pairs: - if pair in whitelist_pairs: - continue # avoid duplication - informative_pairs.append((pair, tf)) - return informative_pairs - - def populate_any_indicators( - self, pair, df, tf, informative=None, set_generalized_indicators=False - ): - - coin = pair.split('/')[0] - - if informative is None: - informative = self.dp.get_pair_dataframe(pair, tf) - - # first loop is automatically duplicating indicators for time periods - for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: - - t = int(t) - informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) - - informative[f"%-{coin}pct-change"] = informative["close"].pct_change() - informative[f"%-{coin}raw_volume"] = informative["volume"] - - # The following features are necessary for RL models - informative[f"%-{coin}raw_close"] = informative["close"] - informative[f"%-{coin}raw_open"] = informative["open"] - informative[f"%-{coin}raw_high"] = informative["high"] - informative[f"%-{coin}raw_low"] = informative["low"] - - indicators = [col for col in informative if col.startswith("%")] - # This loop duplicates and shifts all indicators to add a sense of recency to data - for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): - if n == 0: - continue - informative_shift = informative[indicators].shift(n) - informative_shift = informative_shift.add_suffix("_shift-" + str(n)) - informative = pd.concat((informative, informative_shift), axis=1) - - df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) - skip_columns = [ - (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] - ] - df = df.drop(columns=skip_columns) - - # Add generalized indicators here (because in live, it will call this - # function to populate indicators during training). Notice how we ensure not to - # add them multiple times - if set_generalized_indicators: - df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 - df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 - - # For RL, this is not a target, it is simply a filler until actions come out - # of the model. - # for Base4ActionEnv, 0 is netural (hold) - df["&-action"] = 0 - - return df - - def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: - - dataframe = self.freqai.start(dataframe, metadata, self) - - return dataframe - - def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - - enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] - - if enter_long_conditions: - df.loc[ - reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] - ] = (1, "long") - - enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] - - if enter_short_conditions: - df.loc[ - reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] - ] = (1, "short") - - return df - - def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] - if exit_long_conditions: - df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit"] = 1 - - return df diff --git a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py b/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py deleted file mode 100644 index 2118e1221..000000000 --- a/freqtrade/freqai/example_strats/ReinforcementLearningExample5ac.py +++ /dev/null @@ -1,147 +0,0 @@ -import logging -from functools import reduce - -import pandas as pd -import talib.abstract as ta -from pandas import DataFrame - -from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_informative_pair - - -logger = logging.getLogger(__name__) - - -class ReinforcementLearningExample5ac(IStrategy): - """ - Test strategy - used for testing freqAI functionalities. - DO not use in production. - """ - - minimal_roi = {"0": 0.1, "240": -1} - - plot_config = { - "main_plot": {}, - "subplots": { - "prediction": {"prediction": {"color": "blue"}}, - "target_roi": { - "target_roi": {"color": "brown"}, - }, - "do_predict": { - "do_predict": {"color": "brown"}, - }, - }, - } - - process_only_new_candles = True - stoploss = -0.05 - use_exit_signal = True - startup_candle_count: int = 300 - can_short = True - - linear_roi_offset = DecimalParameter( - 0.00, 0.02, default=0.005, space="sell", optimize=False, load=True - ) - max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True) - - def informative_pairs(self): - whitelist_pairs = self.dp.current_whitelist() - corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] - informative_pairs = [] - for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: - for pair in whitelist_pairs: - informative_pairs.append((pair, tf)) - for pair in corr_pairs: - if pair in whitelist_pairs: - continue # avoid duplication - informative_pairs.append((pair, tf)) - return informative_pairs - - def populate_any_indicators( - self, pair, df, tf, informative=None, set_generalized_indicators=False - ): - - coin = pair.split('/')[0] - - if informative is None: - informative = self.dp.get_pair_dataframe(pair, tf) - - # first loop is automatically duplicating indicators for time periods - for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: - - t = int(t) - informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) - - informative[f"%-{coin}pct-change"] = informative["close"].pct_change() - informative[f"%-{coin}raw_volume"] = informative["volume"] - - # FIXME: add these outside the user strategy? - # The following columns are necessary for RL models. - informative[f"%-{coin}raw_close"] = informative["close"] - informative[f"%-{coin}raw_open"] = informative["open"] - informative[f"%-{coin}raw_high"] = informative["high"] - informative[f"%-{coin}raw_low"] = informative["low"] - - indicators = [col for col in informative if col.startswith("%")] - # This loop duplicates and shifts all indicators to add a sense of recency to data - for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): - if n == 0: - continue - informative_shift = informative[indicators].shift(n) - informative_shift = informative_shift.add_suffix("_shift-" + str(n)) - informative = pd.concat((informative, informative_shift), axis=1) - - df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) - skip_columns = [ - (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] - ] - df = df.drop(columns=skip_columns) - - # Add generalized indicators here (because in live, it will call this - # function to populate indicators during training). Notice how we ensure not to - # add them multiple times - if set_generalized_indicators: - df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 - df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 - - # For RL, there are no direct targets to set. This is filler (neutral) - # until the agent sends an action. - df["&-action"] = 0 - - return df - - def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: - - dataframe = self.freqai.start(dataframe, metadata, self) - - return dataframe - - def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - - enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] - - if enter_long_conditions: - df.loc[ - reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] - ] = (1, "long") - - enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] - - if enter_short_conditions: - df.loc[ - reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] - ] = (1, "short") - - return df - - def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] - if exit_long_conditions: - df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 - - exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4] - if exit_short_conditions: - df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 - - return df diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 52577f2d3..a50e7e04c 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -18,6 +18,11 @@ def is_arm() -> bool: return "arm" in machine or "aarch64" in machine +def is_mac() -> bool: + machine = platform.system() + return "Darwin" in machine + + @pytest.mark.parametrize('model', [ 'LightGBMRegressor', 'XGBoostRegressor', @@ -29,6 +34,9 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): if is_arm() and model == 'CatboostRegressor': pytest.skip("CatBoost is not supported on ARM") + if is_mac(): + pytest.skip("Reinforcement learning module not available on intel based Mac OS") + model_save_ext = 'joblib' freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180110-20180130"}) From f6e9753c990d4a697e88d37e43ed8d963c301767 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 22 Sep 2022 21:18:09 +0200 Subject: [PATCH 079/232] show advanced users how they can customize agent indepth` --- docs/freqai.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/freqai.md b/docs/freqai.md index a2058b0ed..938fb70f4 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -913,6 +913,10 @@ and then the `&-action` will be used in `populate_entry/exit` functions: Users should be careful to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short. +### Creating a custom agent + +Users can inherit from `stable_baselines3` and customize anything they wish about their agent. Doing this is for advanced users only, an example is presented in `freqai/RL/ReinforcementLearnerCustomAgent.py` + ### Using Tensorboard Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer: From 7295ba0fb2c408c3ce5ec413edae75b93805d829 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 22 Sep 2022 23:42:33 +0200 Subject: [PATCH 080/232] add test for Base4ActionEnv --- .../RL/BaseReinforcementLearningModel.py | 7 - .../RL/ReinforcementLearnerCustomAgent.py | 262 ------------------ tests/freqai/test_freqai_interface.py | 9 +- 3 files changed, 5 insertions(+), 273 deletions(-) delete mode 100644 freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 69ae52f38..d10bf4dc3 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -319,13 +319,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): return 0. - # TODO take care of this appendage. Right now it needs to be called because FreqAI enforces it. - # But FreqaiRL needs more objects passed to fit() (like DK) and we dont want to go refactor - # all the other existing fit() functions to include dk argument. For now we instantiate and - # leave it. - # def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any: - # return - def make_env(MyRLEnv: BaseEnvironment, env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, diff --git a/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py b/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py deleted file mode 100644 index 31d21d459..000000000 --- a/freqtrade/freqai/RL/ReinforcementLearnerCustomAgent.py +++ /dev/null @@ -1,262 +0,0 @@ -# import logging -# from pathlib import Path -# from typing import Any, Dict, List, Optional, Tuple, Type, Union - -# import gym -# import torch as th -# from stable_baselines3 import DQN -# from stable_baselines3.common.buffers import ReplayBuffer -# from stable_baselines3.common.policies import BasePolicy -# from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor -# from stable_baselines3.common.type_aliases import GymEnv, Schedule -# from stable_baselines3.dqn.policies import CnnPolicy, DQNPolicy, MlpPolicy, QNetwork -# from torch import nn - -# from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -# from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel - - -# logger = logging.getLogger(__name__) - - -# class ReinforcementLearnerCustomAgent(BaseReinforcementLearningModel): -# """ -# User can customize agent by defining the class and using it directly. -# Here the example is "TDQN" - -# Warning! -# This is an advanced example of how a user may create and use a highly -# customized model class (which can inherit from existing classes, -# similar to how the example below inherits from DQN). -# This file is for example purposes only, and should not be run. -# """ - -# def fit_rl(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen): - -# train_df = data_dictionary["train_features"] -# total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - -# policy_kwargs = dict(activation_fn=th.nn.ReLU, -# net_arch=[256, 256, 128]) - -# # TDQN is a custom agent defined below -# model = TDQN(self.policy_type, self.train_env, -# tensorboard_log=str(Path(dk.data_path / "tensorboard")), -# policy_kwargs=policy_kwargs, -# **self.freqai_info['model_training_parameters'] -# ) - -# model.learn( -# total_timesteps=int(total_timesteps), -# callback=self.eval_callback -# ) - -# if Path(dk.data_path / "best_model.zip").is_file(): -# logger.info('Callback found a best model.') -# best_model = self.MODELCLASS.load(dk.data_path / "best_model") -# return best_model - -# logger.info('Couldnt find best model, using final model instead.') - -# return model - -# # User creates their custom agent and networks as shown below - - -# def create_mlp_( -# input_dim: int, -# output_dim: int, -# net_arch: List[int], -# activation_fn: Type[nn.Module] = nn.ReLU, -# squash_output: bool = False, -# ) -> List[nn.Module]: -# dropout = 0.2 -# if len(net_arch) > 0: -# number_of_neural = net_arch[0] - -# modules = [ -# nn.Linear(input_dim, number_of_neural), -# nn.BatchNorm1d(number_of_neural), -# nn.LeakyReLU(), -# nn.Dropout(dropout), -# nn.Linear(number_of_neural, number_of_neural), -# nn.BatchNorm1d(number_of_neural), -# nn.LeakyReLU(), -# nn.Dropout(dropout), -# nn.Linear(number_of_neural, number_of_neural), -# nn.BatchNorm1d(number_of_neural), -# nn.LeakyReLU(), -# nn.Dropout(dropout), -# nn.Linear(number_of_neural, number_of_neural), -# nn.BatchNorm1d(number_of_neural), -# nn.LeakyReLU(), -# nn.Dropout(dropout), -# nn.Linear(number_of_neural, output_dim) -# ] -# return modules - - -# class TDQNetwork(QNetwork): -# def __init__(self, -# observation_space: gym.spaces.Space, -# action_space: gym.spaces.Space, -# features_extractor: nn.Module, -# features_dim: int, -# net_arch: Optional[List[int]] = None, -# activation_fn: Type[nn.Module] = nn.ReLU, -# normalize_images: bool = True -# ): -# super().__init__( -# observation_space=observation_space, -# action_space=action_space, -# features_extractor=features_extractor, -# features_dim=features_dim, -# net_arch=net_arch, -# activation_fn=activation_fn, -# normalize_images=normalize_images -# ) -# action_dim = self.action_space.n -# q_net = create_mlp_(self.features_dim, action_dim, self.net_arch, self.activation_fn) -# self.q_net = nn.Sequential(*q_net).apply(self.init_weights) - -# def init_weights(self, m): -# if type(m) == nn.Linear: -# th.nn.init.kaiming_uniform_(m.weight) - - -# class TDQNPolicy(DQNPolicy): - -# def __init__( -# self, -# observation_space: gym.spaces.Space, -# action_space: gym.spaces.Space, -# lr_schedule: Schedule, -# net_arch: Optional[List[int]] = None, -# activation_fn: Type[nn.Module] = nn.ReLU, -# features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, -# features_extractor_kwargs: Optional[Dict[str, Any]] = None, -# normalize_images: bool = True, -# optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, -# optimizer_kwargs: Optional[Dict[str, Any]] = None, -# ): -# super().__init__( -# observation_space=observation_space, -# action_space=action_space, -# lr_schedule=lr_schedule, -# net_arch=net_arch, -# activation_fn=activation_fn, -# features_extractor_class=features_extractor_class, -# features_extractor_kwargs=features_extractor_kwargs, -# normalize_images=normalize_images, -# optimizer_class=optimizer_class, -# optimizer_kwargs=optimizer_kwargs -# ) - -# @staticmethod -# def init_weights(module: nn.Module, gain: float = 1) -> None: -# """ -# Orthogonal initialization (used in PPO and A2C) -# """ -# if isinstance(module, (nn.Linear, nn.Conv2d)): -# nn.init.kaiming_uniform_(module.weight) -# if module.bias is not None: -# module.bias.data.fill_(0.0) - -# def make_q_net(self) -> TDQNetwork: -# # Make sure we always have separate networks for features extractors etc -# net_args = self._update_features_extractor(self.net_args, features_extractor=None) -# return TDQNetwork(**net_args).to(self.device) - - -# class TMultiInputPolicy(TDQNPolicy): -# def __init__( -# self, -# observation_space: gym.spaces.Space, -# action_space: gym.spaces.Space, -# lr_schedule: Schedule, -# net_arch: Optional[List[int]] = None, -# activation_fn: Type[nn.Module] = nn.ReLU, -# features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, -# features_extractor_kwargs: Optional[Dict[str, Any]] = None, -# normalize_images: bool = True, -# optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, -# optimizer_kwargs: Optional[Dict[str, Any]] = None, -# ): -# super().__init__( -# observation_space, -# action_space, -# lr_schedule, -# net_arch, -# activation_fn, -# features_extractor_class, -# features_extractor_kwargs, -# normalize_images, -# optimizer_class, -# optimizer_kwargs, -# ) - - -# class TDQN(DQN): - -# policy_aliases: Dict[str, Type[BasePolicy]] = { -# "MlpPolicy": MlpPolicy, -# "CnnPolicy": CnnPolicy, -# "TMultiInputPolicy": TMultiInputPolicy, -# } - -# def __init__( -# self, -# policy: Union[str, Type[TDQNPolicy]], -# env: Union[GymEnv, str], -# learning_rate: Union[float, Schedule] = 1e-4, -# buffer_size: int = 1000000, # 1e6 -# learning_starts: int = 50000, -# batch_size: int = 32, -# tau: float = 1.0, -# gamma: float = 0.99, -# train_freq: Union[int, Tuple[int, str]] = 4, -# gradient_steps: int = 1, -# replay_buffer_class: Optional[ReplayBuffer] = None, -# replay_buffer_kwargs: Optional[Dict[str, Any]] = None, -# optimize_memory_usage: bool = False, -# target_update_interval: int = 10000, -# exploration_fraction: float = 0.1, -# exploration_initial_eps: float = 1.0, -# exploration_final_eps: float = 0.05, -# max_grad_norm: float = 10, -# tensorboard_log: Optional[str] = None, -# create_eval_env: bool = False, -# policy_kwargs: Optional[Dict[str, Any]] = None, -# verbose: int = 1, -# seed: Optional[int] = None, -# device: Union[th.device, str] = "auto", -# _init_setup_model: bool = True, -# ): - -# super().__init__( -# policy=policy, -# env=env, -# learning_rate=learning_rate, -# buffer_size=buffer_size, -# learning_starts=learning_starts, -# batch_size=batch_size, -# tau=tau, -# gamma=gamma, -# train_freq=train_freq, -# gradient_steps=gradient_steps, -# replay_buffer_class=replay_buffer_class, # No action noise -# replay_buffer_kwargs=replay_buffer_kwargs, -# optimize_memory_usage=optimize_memory_usage, -# target_update_interval=target_update_interval, -# exploration_fraction=exploration_fraction, -# exploration_initial_eps=exploration_initial_eps, -# exploration_final_eps=exploration_final_eps, -# max_grad_norm=max_grad_norm, -# tensorboard_log=tensorboard_log, -# create_eval_env=create_eval_env, -# policy_kwargs=policy_kwargs, -# verbose=verbose, -# seed=seed, -# device=device, -# _init_setup_model=_init_setup_model -# ) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index a50e7e04c..252b8fc37 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -28,7 +28,8 @@ def is_mac() -> bool: 'XGBoostRegressor', 'CatboostRegressor', 'ReinforcementLearner', - 'ReinforcementLearner_multiproc' + 'ReinforcementLearner_multiproc', + 'ReinforcementLearner_test_4ac' ]) def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): if is_arm() and model == 'CatboostRegressor': @@ -64,6 +65,9 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): "win_reward_factor": 2 }} + if 'test_4ac' in model: + freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") + strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) @@ -86,9 +90,6 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): f"{freqai.dk.model_filename}_model.{model_save_ext}").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file() - # if 'ReinforcementLearner' not in model: - # assert Path(freqai.dk.data_path / - # f"{freqai.dk.model_filename}_svm_model.joblib").is_file() shutil.rmtree(Path(freqai.dk.full_path)) From 1c56fa034f908ae005e0167830e18ef54667f1a4 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 23 Sep 2022 09:19:16 +0200 Subject: [PATCH 081/232] add test_models folder --- .../ReinforcementLearner_test_4ac.py | 104 ++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 tests/freqai/test_models/ReinforcementLearner_test_4ac.py diff --git a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py new file mode 100644 index 000000000..9a8f800bd --- /dev/null +++ b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py @@ -0,0 +1,104 @@ +import logging +from pathlib import Path +from typing import Any, Dict + +import numpy as np +import torch as th + +from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.RL.Base4ActionRLEnv import Actions, Base4ActionRLEnv, Positions +from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel + + +logger = logging.getLogger(__name__) + + +class ReinforcementLearner_test_4ac(BaseReinforcementLearningModel): + """ + User created Reinforcement Learning Model prediction model. + """ + + def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): + + train_df = data_dictionary["train_features"] + total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) + + policy_kwargs = dict(activation_fn=th.nn.ReLU, + net_arch=[128, 128]) + + if dk.pair not in self.dd.model_dictionary or not self.continual_learning: + model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, + tensorboard_log=Path( + dk.full_path / "tensorboard" / dk.pair.split('/')[0]), + **self.freqai_info['model_training_parameters'] + ) + else: + logger.info('Continual training activated - starting training from previously ' + 'trained agent.') + model = self.dd.model_dictionary[dk.pair] + model.set_env(self.train_env) + + model.learn( + total_timesteps=int(total_timesteps), + callback=self.eval_callback + ) + + if Path(dk.data_path / "best_model.zip").is_file(): + logger.info('Callback found a best model.') + best_model = self.MODELCLASS.load(dk.data_path / "best_model") + return best_model + + logger.info('Couldnt find best model, using final model instead.') + + return model + + class MyRLEnv(Base4ActionRLEnv): + """ + User can override any function in BaseRLEnv and gym.Env. Here the user + sets a custom reward based on profit and trade duration. + """ + + def calculate_reward(self, action): + + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + + # reward agent for entering trades + if (action in (Actions.Long_enter.value, Actions.Short_enter.value) + and self._position == Positions.Neutral): + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick + + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + + # discourage sitting in position + if (self._position in (Positions.Short, Positions.Long) and + action == Actions.Neutral.value): + return -1 * trade_duration / max_trade_duration + + # close long + if action == Actions.Exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + + # close short + if action == Actions.Exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + + return 0. From f5cd8f62c6d406372f13bd1626dc586e11919953 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Fri, 23 Sep 2022 10:24:39 +0200 Subject: [PATCH 082/232] Remove unused code from BaseEnv --- freqtrade/freqai/RL/BaseEnvironment.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index bb43f5300..200b7d138 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -195,8 +195,6 @@ class BaseEnvironment(gym.Env): be inherited/edited by the user made ReinforcementLearner file. """ - return 0. - def _update_unrealized_total_profit(self): """ Update the unrealized total profit incase of episode end. @@ -250,21 +248,8 @@ class BaseEnvironment(gym.Env): return 0 - def get_portfolio_log_returns(self): - return self.portfolio_log_returns[1:self._current_tick + 1] - def update_portfolio_log_returns(self, action): self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) def current_price(self) -> float: return self.prices.iloc[self._current_tick].open - - def prev_price(self) -> float: - return self.prices.iloc[self._current_tick - 1].open - - def sharpe_ratio(self): - if len(self.close_trade_profit) == 0: - return 0. - returns = np.array(self.close_trade_profit) - reward = (np.mean(returns) - 0. + 1e-9) / (np.std(returns) + 1e-9) - return reward From f7dd3045f7287f3480f4452b4d969ac734f43080 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Fri, 23 Sep 2022 10:30:52 +0200 Subject: [PATCH 083/232] Parameterize backtesting test --- tests/freqai/test_freqai_interface.py | 47 ++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 252b8fc37..592499a34 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -173,9 +173,54 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): shutil.rmtree(Path(freqai.dk.full_path)) +@pytest.mark.parametrize('model', [ + 'LightGBMRegressor', + 'XGBoostRegressor', + 'CatboostRegressor', + 'ReinforcementLearner', + 'ReinforcementLearner_multiproc', + 'ReinforcementLearner_test_4ac' + ]) def test_start_backtesting(mocker, freqai_conf): - freqai_conf.update({"timerange": "20180120-20180130"}) freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) + + if is_arm() and model == 'CatboostRegressor': + pytest.skip("CatBoost is not supported on ARM") + + if is_mac(): + pytest.skip("Reinforcement learning module not available on intel based Mac OS") + + model_save_ext = 'joblib' + freqai_conf.update({"freqaimodel": model}) + freqai_conf.update({"timerange": "20180110-20180130"}) + freqai_conf.update({"strategy": "freqai_test_strat"}) + + if 'ReinforcementLearner' in model: + model_save_ext = 'zip' + freqai_conf.update({"strategy": "freqai_rl_test_strat"}) + freqai_conf["freqai"].update({"model_training_parameters": { + "learning_rate": 0.00025, + "gamma": 0.9, + "verbose": 1 + }}) + freqai_conf["freqai"].update({"model_save_type": 'stable_baselines'}) + freqai_conf["freqai"]["rl_config"] = { + "train_cycles": 1, + "thread_count": 2, + "max_trade_duration_candles": 300, + "model_type": "PPO", + "policy_type": "MlpPolicy", + "max_training_drawdown_pct": 0.5, + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.02, + "win_reward_factor": 2 + }} + + if 'test_4ac' in model: + freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") + + strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) From 95121550efd6417583f6bfa2ab251c564e22d5d8 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Fri, 23 Sep 2022 10:37:34 +0200 Subject: [PATCH 084/232] Remove unnecessary models, add model arg --- tests/freqai/test_freqai_interface.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 592499a34..ac8fd2b42 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -177,11 +177,9 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): 'LightGBMRegressor', 'XGBoostRegressor', 'CatboostRegressor', - 'ReinforcementLearner', - 'ReinforcementLearner_multiproc', - 'ReinforcementLearner_test_4ac' + 'ReinforcementLearner' ]) -def test_start_backtesting(mocker, freqai_conf): +def test_start_backtesting(mocker, freqai_conf, model): freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) if is_arm() and model == 'CatboostRegressor': From 9c361f442262007ef77e2f899224eca1874ad298 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 23 Sep 2022 18:04:43 +0200 Subject: [PATCH 085/232] increase test coverage for RL and FreqAI --- config_examples/config_freqai-rl.example.json | 109 ------------------ freqtrade/freqai/data_drawer.py | 19 --- tests/freqai/test_freqai_interface.py | 79 ++++++++++--- 3 files changed, 61 insertions(+), 146 deletions(-) delete mode 100644 config_examples/config_freqai-rl.example.json diff --git a/config_examples/config_freqai-rl.example.json b/config_examples/config_freqai-rl.example.json deleted file mode 100644 index 9dfea932d..000000000 --- a/config_examples/config_freqai-rl.example.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "trading_mode": "futures", - "new_pairs_days": 30, - "margin_mode": "isolated", - "max_open_trades": 8, - "stake_currency": "USDT", - "stake_amount": 1000, - "tradable_balance_ratio": 1, - "fiat_display_currency": "USD", - "dry_run": true, - "timeframe": "5m", - "dataformat_ohlcv": "json", - "dry_run_wallet": 12000, - "cancel_open_orders_on_exit": true, - "unfilledtimeout": { - "entry": 10, - "exit": 30 - }, - "exchange": { - "name": "binance", - "key": "", - "secret": "", - "ccxt_config": { - "enableRateLimit": true - }, - "ccxt_async_config": { - "enableRateLimit": true, - "rateLimit": 200 - }, - "pair_whitelist": [ - "1INCH/USDT", - "AAVE/USDT" - ], - "pair_blacklist": [] - }, - "entry_pricing": { - "price_side": "same", - "use_order_book": true, - "order_book_top": 1, - "price_last_balance": 0.0, - "check_depth_of_market": { - "enabled": false, - "bids_to_ask_delta": 1 - } - }, - "exit_pricing": { - "price_side": "other", - "use_order_book": true, - "order_book_top": 1 - }, - "pairlists": [ - { - "method": "StaticPairList" - } - ], - "freqai": { - "enabled": true, - "model_save_type": "stable_baselines", - "conv_width": 4, - "purge_old_models": true, - "limit_ram_usage": false, - "train_period_days": 5, - "backtest_period_days": 2, - "identifier": "unique-id", - "continual_learning": false, - "data_kitchen_thread_count": 2, - "feature_parameters": { - "include_corr_pairlist": [ - "BTC/USDT", - "ETH/USDT" - ], - "include_timeframes": [ - "5m", - "30m" - ], - "indicator_max_period_candles": 20, - "indicator_periods_candles": [14] - }, - "data_split_parameters": { - "test_size": 0.5, - "random_state": 1, - "shuffle": false - }, - "model_training_parameters": { - "learning_rate": 0.00025, - "gamma": 0.9, - "verbose": 1 - }, - "rl_config": { - "train_cycles": 6, - "thread_count": 4, - "max_trade_duration_candles": 300, - "model_type": "PPO", - "policy_type": "MlpPolicy", - "max_training_drawdown_pct": 0.5, - "model_reward_parameters": { - "rr": 1, - "profit_aim": 0.02, - "win_reward_factor": 2 - } - } - }, - "bot_name": "RL_test", - "force_entry_enable": true, - "initial_state": "running", - "internals": { - "process_throttle_secs": 5 - } -} diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index b58bed9ba..03840317f 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -602,22 +602,3 @@ class FreqaiDataDrawer: ) return corr_dataframes, base_dataframes - - # to be used if we want to send predictions directly to the follower instead of forcing - # follower to load models and inference - # def save_model_return_values_to_disk(self) -> None: - # with open(self.full_path / str('model_return_values.json'), "w") as fp: - # json.dump(self.model_return_values, fp, default=self.np_encoder) - - # def load_model_return_values_from_disk(self, dk: FreqaiDataKitchen) -> FreqaiDataKitchen: - # exists = Path(self.full_path / str('model_return_values.json')).resolve().exists() - # if exists: - # with open(self.full_path / str('model_return_values.json'), "r") as fp: - # self.model_return_values = json.load(fp) - # elif not self.follow_mode: - # logger.info("Could not find existing datadrawer, starting from scratch") - # else: - # logger.warning(f'Follower could not find pair_dictionary at {self.full_path} ' - # 'sending null values back to strategy') - - # return exists, dk diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index ac8fd2b42..f0af90f18 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -4,13 +4,15 @@ from pathlib import Path from unittest.mock import MagicMock import pytest - +from freqtrade.enums import RunMode from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.plugins.pairlistmanager import PairListManager from tests.conftest import get_patched_exchange, log_has_re from tests.freqai.conftest import get_patched_freqai_strategy +from freqtrade.persistence import Trade +from freqtrade.freqai.utils import download_all_data_for_training, get_required_data_timerange def is_arm() -> bool: @@ -173,29 +175,34 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): shutil.rmtree(Path(freqai.dk.full_path)) -@pytest.mark.parametrize('model', [ - 'LightGBMRegressor', - 'XGBoostRegressor', - 'CatboostRegressor', - 'ReinforcementLearner' - ]) -def test_start_backtesting(mocker, freqai_conf, model): +@pytest.mark.parametrize( + "model, num_files, strat", + [ + ("LightGBMRegressor", 6, "freqai_test_strat"), + ("XGBoostRegressor", 6, "freqai_test_strat"), + ("CatboostRegressor", 6, "freqai_test_strat"), + ("ReinforcementLearner", 7, "freqai_rl_test_strat"), + ("XGBoostClassifier", 6, "freqai_test_classifier"), + ("LightGBMClassifier", 6, "freqai_test_classifier"), + ("CatboostClassifier", 6, "freqai_test_classifier") + ], + ) +def test_start_backtesting(mocker, freqai_conf, model, num_files, strat): freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) - - if is_arm() and model == 'CatboostRegressor': + freqai_conf['runmode'] = RunMode.BACKTEST + Trade.use_db = False + if is_arm() and "Catboost" in model: pytest.skip("CatBoost is not supported on ARM") if is_mac(): pytest.skip("Reinforcement learning module not available on intel based Mac OS") - model_save_ext = 'joblib' freqai_conf.update({"freqaimodel": model}) - freqai_conf.update({"timerange": "20180110-20180130"}) - freqai_conf.update({"strategy": "freqai_test_strat"}) + freqai_conf.update({"timerange": "20180120-20180130"}) + freqai_conf.update({"strategy": strat}) if 'ReinforcementLearner' in model: - model_save_ext = 'zip' - freqai_conf.update({"strategy": "freqai_rl_test_strat"}) + freqai_conf["freqai"].update({"model_training_parameters": { "learning_rate": 0.00025, "gamma": 0.9, @@ -217,8 +224,7 @@ def test_start_backtesting(mocker, freqai_conf, model): if 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") - - + strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) @@ -237,7 +243,7 @@ def test_start_backtesting(mocker, freqai_conf, model): freqai.start_backtesting(df, metadata, freqai.dk) model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] - assert len(model_folders) == 6 + assert len(model_folders) == num_files shutil.rmtree(Path(freqai.dk.full_path)) @@ -455,3 +461,40 @@ def test_freqai_informative_pairs(mocker, freqai_conf, timeframes, corr_pairs): pairs_b = strategy.gather_informative_pairs() # we expect unique pairs * timeframes assert len(pairs_b) == len(set(pairlist + corr_pairs)) * len(timeframes) + + +def test_start_set_train_queue(mocker, freqai_conf, caplog): + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + pairlist = PairListManager(exchange, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange, pairlist) + strategy.freqai_info = freqai_conf.get("freqai", {}) + freqai = strategy.freqai + freqai.live = False + + freqai.train_queue = freqai._set_train_queue() + + assert log_has_re( + "Set fresh train queue from whitelist.", + caplog, + ) + + +def test_get_required_data_timerange(mocker, freqai_conf): + time_range = get_required_data_timerange(freqai_conf) + assert (time_range.stopts - time_range.startts) == 177300 + + +def test_download_all_data_for_training(mocker, freqai_conf, caplog, tmpdir): + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + pairlist = PairListManager(exchange, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange, pairlist) + freqai_conf['pairs'] = freqai_conf['exchange']['pair_whitelist'] + freqai_conf['datadir'] = Path(tmpdir) + download_all_data_for_training(strategy.dp, freqai_conf) + + assert log_has_re( + "Downloading", + caplog, + ) From 77c360b264c9dee489081c2761cc3be4ba0b01d1 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 23 Sep 2022 19:17:27 +0200 Subject: [PATCH 086/232] improve typing, improve docstrings, ensure global tests pass --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 13 +++- freqtrade/freqai/RL/Base5ActionRLEnv.py | 11 +++ freqtrade/freqai/RL/BaseEnvironment.py | 22 ++++-- .../RL/BaseReinforcementLearningModel.py | 75 +++++++++++++------ .../prediction_models/ReinforcementLearner.py | 20 ++++- .../ReinforcementLearner_multiproc.py | 19 +++-- tests/freqai/test_freqai_interface.py | 4 +- 7 files changed, 124 insertions(+), 40 deletions(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index bd5785b85..b4fe78b71 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -25,6 +25,17 @@ class Base4ActionRLEnv(BaseEnvironment): self.action_space = spaces.Discrete(len(Actions)) def step(self, action: int): + """ + Logic for a single step (incrementing one candle in time) + by the agent + :param: action: int = the action type that the agent plans + to take for the current step. + :returns: + observation = current state of environment + step_reward = the reward from `calculate_reward()` + _done = if the agent "died" or if the candles finished + info = dict passed back to openai gym lib + """ self._done = False self._current_tick += 1 @@ -92,7 +103,6 @@ class Base4ActionRLEnv(BaseEnvironment): return observation, step_reward, self._done, info def is_tradesignal(self, action: int): - # trade signal """ Determine if the signal is a trade signal e.g.: agent wants a Actions.Long_exit while it is in a Positions.short @@ -107,7 +117,6 @@ class Base4ActionRLEnv(BaseEnvironment): (action == Actions.Long_enter.value and self._position == Positions.Short)) def _is_valid(self, action: int): - # trade signal """ Determine if the signal is valid. e.g.: agent wants a Actions.Long_exit while it is in a Positions.short diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index e0a38f9d1..80543bf72 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -60,6 +60,17 @@ class Base5ActionRLEnv(BaseEnvironment): return self._get_observation() def step(self, action: int): + """ + Logic for a single step (incrementing one candle in time) + by the agent + :param: action: int = the action type that the agent plans + to take for the current step. + :returns: + observation = current state of environment + step_reward = the reward from `calculate_reward()` + _done = if the agent "died" or if the candles finished + info = dict passed back to openai gym lib + """ self._done = False self._current_tick += 1 diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 200b7d138..6474483c6 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -43,6 +43,10 @@ class BaseEnvironment(gym.Env): def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, reward_kwargs: dict, starting_point=True): + """ + Resets the environment when the agent fails (in our case, if the drawdown + exceeds the user set max_training_drawdown_pct) + """ self.df = df self.signal_features = self.df self.prices = prices @@ -133,13 +137,18 @@ class BaseEnvironment(gym.Env): return features_and_state def get_trade_duration(self): + """ + Get the trade duration if the agent is in a trade + """ if self._last_trade_tick is None: return 0 else: return self._current_tick - self._last_trade_tick def get_unrealized_profit(self): - + """ + Get the unrealized profit if the agent is in a trade + """ if self._last_trade_tick is None: return 0. @@ -158,7 +167,6 @@ class BaseEnvironment(gym.Env): @abstractmethod def is_tradesignal(self, action: int): - # trade signal """ Determine if the signal is a trade signal. This is unique to the actions in the environment, and therefore must be @@ -167,7 +175,6 @@ class BaseEnvironment(gym.Env): return def _is_valid(self, action: int): - # trade signal """ Determine if the signal is valid.This is unique to the actions in the environment, and therefore must be @@ -191,8 +198,13 @@ class BaseEnvironment(gym.Env): @abstractmethod def calculate_reward(self, action): """ - Reward is created by BaseReinforcementLearningModel and can - be inherited/edited by the user made ReinforcementLearner file. + An example reward function. This is the one function that users will likely + wish to inject their own creativity into. + :params: + action: int = The action made by the agent for the current candle. + :returns: + float = the reward to give to the agent for current step (used for optimization + of weights in NN) """ def _update_unrealized_total_profit(self): diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index d10bf4dc3..c82fd1ea9 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -2,7 +2,7 @@ import logging from abc import abstractmethod from datetime import datetime, timezone from pathlib import Path -from typing import Any, Callable, Dict, Tuple +from typing import Any, Callable, Dict, Tuple, Type, Union import gym import numpy as np @@ -19,8 +19,9 @@ from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv -from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions +from freqtrade.freqai.RL.BaseEnvironment import Positions from freqtrade.persistence import Trade +from stable_baselines3.common.vec_env import SubprocVecEnv logger = logging.getLogger(__name__) @@ -33,15 +34,15 @@ SB3_CONTRIB_MODELS = ['TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO'] class BaseReinforcementLearningModel(IFreqaiModel): """ - User created Reinforcement Learning Model prediction model. + User created Reinforcement Learning Model prediction class """ def __init__(self, **kwargs): super().__init__(config=kwargs['config']) th.set_num_threads(self.freqai_info['rl_config'].get('thread_count', 4)) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] - self.train_env: BaseEnvironment = None - self.eval_env: BaseEnvironment = None + self.train_env: Union[SubprocVecEnv, gym.Env] = None + self.eval_env: Union[SubprocVecEnv, gym.Env] = None self.eval_callback: EvalCallback = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] @@ -126,6 +127,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): dk: FreqaiDataKitchen): """ User can override this if they are using a custom MyRLEnv + :params: + data_dictionary: dict = common data dictionary containing train and test + features/labels/weights. + prices_train/test: DataFrame = dataframe comprised of the prices to be used in the + environment during training + or testing + dk: FreqaiDataKitchen = the datakitchen for the current pair """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] @@ -148,15 +156,24 @@ class BaseReinforcementLearningModel(IFreqaiModel): """ return - def get_state_info(self, pair: str): + def get_state_info(self, pair: str) -> Tuple[float, float, int]: + """ + State info during dry/live/backtesting which is fed back + into the model. + :param: + pair: str = COIN/STAKE to get the environment information for + :returns: + market_side: float = representing short, long, or neutral for + pair + trade_duration: int = the number of candles that the trade has + been open for + """ open_trades = Trade.get_trades_proxy(is_open=True) market_side = 0.5 current_profit: float = 0 trade_duration = 0 for trade in open_trades: if trade.pair == pair: - # FIXME: get_rate and trade_udration shouldn't work with backtesting, - # we need to use candle dates and prices to compute that. if self.strategy.dp._exchange is None: # type: ignore logger.error('No exchange available.') else: @@ -172,11 +189,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): market_side = 0 current_profit = (openrate - current_value) / openrate - # total_profit = 0 - # closed_trades = Trade.get_trades_proxy(pair=pair, is_open=False) - # for trade in closed_trades: - # total_profit += trade.close_profit - return market_side, current_profit, int(trade_duration) def predict( @@ -209,7 +221,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): def rl_model_predict(self, dataframe: DataFrame, dk: FreqaiDataKitchen, model: Any) -> DataFrame: - + """ + A helper function to make predictions in the Reinforcement learning module. + :params: + dataframe: DataFrame = the dataframe of features to make the predictions on + dk: FreqaiDatakitchen = data kitchen for the current pair + model: Any = the trained model used to inference the features. + """ output = pd.DataFrame(np.zeros(len(dataframe)), columns=dk.label_list) def _predict(window): @@ -274,26 +292,37 @@ class BaseReinforcementLearningModel(IFreqaiModel): sets a custom reward based on profit and trade duration. """ - def calculate_reward(self, action): - + def calculate_reward(self, action: int) -> float: + """ + An example reward function. This is the one function that users will likely + wish to inject their own creativity into. + :params: + action: int = The action made by the agent for the current candle. + :returns: + float = the reward to give to the agent for current step (used for optimization + of weights in NN) + """ # first, penalize if the action is not valid if not self._is_valid(action): return -2 pnl = self.get_unrealized_profit() rew = np.sign(pnl) * (pnl + 1) - factor = 100 + factor = 100. # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ - and self._position == Positions.Neutral: + if (action in (Actions.Long_enter.value, Actions.Short_enter.value) + and self._position == Positions.Neutral): return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) - trade_duration = self._current_tick - self._last_trade_tick + if self._last_trade_tick: + trade_duration = self._current_tick - self._last_trade_tick + else: + trade_duration = 0 if trade_duration <= max_trade_duration: factor *= 1.5 @@ -301,8 +330,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): factor *= 0.5 # discourage sitting in position - if self._position in (Positions.Short, Positions.Long) and \ - action == Actions.Neutral.value: + if (self._position in (Positions.Short, Positions.Long) and + action == Actions.Neutral.value): return -1 * trade_duration / max_trade_duration # close long @@ -320,7 +349,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): return 0. -def make_env(MyRLEnv: BaseEnvironment, env_id: str, rank: int, +def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, reward_params: Dict[str, int], window_size: int, monitor: bool = False, config: Dict[str, Any] = {}) -> Callable: diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 2e5c9f97b..00afd61d4 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -19,7 +19,15 @@ class ReinforcementLearner(BaseReinforcementLearningModel): """ def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): - + """ + User customizable fit method + :params: + data_dictionary: dict = common data dictionary containing all train/test + features/labels/weights. + dk: FreqaiDatakitchen = data kitchen for current pair. + :returns: + model: Any = trained model to be used for inference in dry/live/backtesting + """ train_df = data_dictionary["train_features"] total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) @@ -59,7 +67,15 @@ class ReinforcementLearner(BaseReinforcementLearningModel): """ def calculate_reward(self, action): - + """ + An example reward function. This is the one function that users will likely + wish to inject their own creativity into. + :params: + action: int = The action made by the agent for the current candle. + :returns: + float = the reward to give to the agent for current step (used for optimization + of weights in NN) + """ # first, penalize if the action is not valid if not self._is_valid(action): return -2 diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index c14511921..5b2ea2ef5 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -6,7 +6,7 @@ from typing import Any, Dict # , Tuple import torch as th from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv - +from pandas import DataFrame from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.BaseReinforcementLearningModel import (BaseReinforcementLearningModel, make_env) @@ -55,11 +55,18 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): return model - def set_train_and_eval_environments(self, data_dictionary, prices_train, prices_test, dk): + def set_train_and_eval_environments(self, data_dictionary: Dict[str, Any], + prices_train: DataFrame, prices_test: DataFrame, + dk: FreqaiDataKitchen): """ - If user has particular environment configuration needs, they can do that by - overriding this function. In the present case, the user wants to setup training - environments for multiple workers. + User can override this if they are using a custom MyRLEnv + :params: + data_dictionary: dict = common data dictionary containing train and test + features/labels/weights. + prices_train/test: DataFrame = dataframe comprised of the prices to be used in + the environment during training + or testing + dk: FreqaiDataKitchen = the datakitchen for the current pair """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] @@ -79,4 +86,4 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): in range(num_cpu)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), - best_model_save_path=dk.data_path) + best_model_save_path=str(dk.data_path)) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index f0af90f18..1bc30a670 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -244,7 +244,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat): model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] assert len(model_folders) == num_files - + Trade.use_db = True shutil.rmtree(Path(freqai.dk.full_path)) @@ -297,7 +297,7 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): assert len(model_folders) == 6 - # without deleting the exiting folder structure, re-run + # without deleting the existing folder structure, re-run freqai_conf.update({"timerange": "20180120-20180130"}) strategy = get_patched_freqai_strategy(mocker, freqai_conf) From 647200e8a72ad20c4eb7d890486fb9c869ff0b3f Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 23 Sep 2022 19:30:56 +0200 Subject: [PATCH 087/232] isort --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- .../prediction_models/ReinforcementLearner_multiproc.py | 3 ++- tests/freqai/test_freqai_interface.py | 7 ++++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index c82fd1ea9..70b3e58ef 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -14,6 +14,7 @@ from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed +from stable_baselines3.common.vec_env import SubprocVecEnv from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen @@ -21,7 +22,6 @@ from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.BaseEnvironment import Positions from freqtrade.persistence import Trade -from stable_baselines3.common.vec_env import SubprocVecEnv logger = logging.getLogger(__name__) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 5b2ea2ef5..0e6449dcd 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -4,9 +4,10 @@ from typing import Any, Dict # , Tuple # import numpy.typing as npt import torch as th +from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv -from pandas import DataFrame + from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.BaseReinforcementLearningModel import (BaseReinforcementLearningModel, make_env) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 1bc30a670..3a200e0af 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -4,15 +4,16 @@ from pathlib import Path from unittest.mock import MagicMock import pytest -from freqtrade.enums import RunMode + from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider +from freqtrade.enums import RunMode from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.utils import download_all_data_for_training, get_required_data_timerange +from freqtrade.persistence import Trade from freqtrade.plugins.pairlistmanager import PairListManager from tests.conftest import get_patched_exchange, log_has_re from tests.freqai.conftest import get_patched_freqai_strategy -from freqtrade.persistence import Trade -from freqtrade.freqai.utils import download_all_data_for_training, get_required_data_timerange def is_arm() -> bool: From caa47a2f47f6c6ce936a0762fee5bbaa39fc492d Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Wed, 28 Sep 2022 03:06:05 +0000 Subject: [PATCH 088/232] close subproc env on shutdown --- freqtrade/freqai/freqai_interface.py | 9 +++++++++ .../ReinforcementLearner_multiproc.py | 14 +++++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 1a847a25e..f8ca34ddb 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -158,6 +158,13 @@ class IFreqaiModel(ABC): self.model = None self.dk = None + def _on_stop(self): + """ + Callback for Subclasses to override to include logic for shutting down resources + when SIGINT is sent. + """ + return + def shutdown(self): """ Cleans up threads on Shutdown, set stop event. Join threads to wait @@ -166,6 +173,8 @@ class IFreqaiModel(ABC): logger.info("Stopping FreqAI") self._stop_event.set() + self._on_stop() + logger.info("Waiting on Training iteration") for _thread in self._threads: _thread.join() diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 0e6449dcd..efdd4883c 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -73,7 +73,7 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): test_df = data_dictionary["test_features"] env_id = "train_env" - num_cpu = int(self.freqai_info["rl_config"]["thread_count"]) + num_cpu = int(self.freqai_info["rl_config"].get("cpu_count", 2)) self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i @@ -88,3 +88,15 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) + + + def _on_stop(self): + """ + Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. + """ + + if hasattr(self, "train_env") and self.train_env: + self.train_env.close() + + if hasattr(self, "eval_env") and self.eval_env: + self.eval_env.close() \ No newline at end of file From 9e36b0d2ea89a1bbbcf6aab411727a9ccedb4c32 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 27 Sep 2022 22:02:33 -0600 Subject: [PATCH 089/232] fix formatting --- .../freqai/prediction_models/ReinforcementLearner_multiproc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index efdd4883c..034c752e7 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -89,7 +89,6 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) - def _on_stop(self): """ Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. @@ -99,4 +98,4 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): self.train_env.close() if hasattr(self, "eval_env") and self.eval_env: - self.eval_env.close() \ No newline at end of file + self.eval_env.close() From 099137adaca3d81f5e5cada2cb70ea159ee6ffa1 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 27 Sep 2022 22:35:15 -0600 Subject: [PATCH 090/232] remove hasattr calls --- .../prediction_models/ReinforcementLearner_multiproc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 034c752e7..d01c409c3 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -94,8 +94,8 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. """ - if hasattr(self, "train_env") and self.train_env: + if self.train_env: self.train_env.close() - if hasattr(self, "eval_env") and self.eval_env: + if self.eval_env: self.eval_env.close() From 83343dc2f11988cc2ee384ebdcba2731d156e26d Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 29 Sep 2022 00:10:18 +0200 Subject: [PATCH 091/232] control number of threads, update doc --- docs/freqai.md | 2 +- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 4 +++- freqtrade/freqai/data_kitchen.py | 6 +++++- freqtrade/freqai/freqai_interface.py | 2 ++ .../prediction_models/ReinforcementLearner_multiproc.py | 5 ++--- 5 files changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/freqai.md b/docs/freqai.md index 938fb70f4..20562aadc 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -131,7 +131,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi | | *Reinforcement Learning Parameters** | `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. | `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. -| `thread_count` | Number of threads to dedicate to the Reinforcement Learning training process.
**Datatype:** int. +| `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process.
**Datatype:** int. | `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. | `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. | `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 70b3e58ef..8785192f4 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -39,7 +39,9 @@ class BaseReinforcementLearningModel(IFreqaiModel): def __init__(self, **kwargs): super().__init__(config=kwargs['config']) - th.set_num_threads(self.freqai_info['rl_config'].get('thread_count', 4)) + self.max_threads = max(self.freqai_info['rl_config'].get( + 'cpu_count', 0), int(self.max_system_threads / 2)) + th.set_num_threads(self.max_threads) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] self.train_env: Union[SubprocVecEnv, gym.Env] = None self.eval_env: Union[SubprocVecEnv, gym.Env] = None diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 005005368..9f84e63b7 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -9,6 +9,7 @@ from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt import pandas as pd +import psutil from pandas import DataFrame from scipy import stats from sklearn import linear_model @@ -95,7 +96,10 @@ class FreqaiDataKitchen: ) self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {}) - self.thread_count = self.freqai_config.get("data_kitchen_thread_count", -1) + if not self.freqai_config.get("data_kitchen_thread_count", 0): + self.thread_count = int(psutil.cpu_count() * 2 - 2) + else: + self.thread_count = self.freqai_config["data_kitchen_thread_count"] self.train_dates: DataFrame = pd.DataFrame() self.unique_classes: Dict[str, list] = {} self.unique_class_list: list = [] diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index f8ca34ddb..5fe3c318c 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -11,6 +11,7 @@ from typing import Any, Dict, List, Optional, Tuple import numpy as np import pandas as pd +import psutil from numpy.typing import NDArray from pandas import DataFrame @@ -96,6 +97,7 @@ class IFreqaiModel(ABC): self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() self.strategy: Optional[IStrategy] = None + self.max_system_threads = int(psutil.cpu_count() * 2 - 2) def __getstate__(self): """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index d01c409c3..a644c0c04 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -73,18 +73,17 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): test_df = data_dictionary["test_features"] env_id = "train_env" - num_cpu = int(self.freqai_info["rl_config"].get("cpu_count", 2)) self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i - in range(num_cpu)]) + in range(self.max_threads)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, test_df, prices_test, self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i - in range(num_cpu)]) + in range(self.max_threads)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) From dcf6ebe273729bf9634c44804f016941805d68d9 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Thu, 29 Sep 2022 00:37:03 +0200 Subject: [PATCH 092/232] Update BaseReinforcementLearningModel.py --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 8785192f4..33568fa0b 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -39,7 +39,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def __init__(self, **kwargs): super().__init__(config=kwargs['config']) - self.max_threads = max(self.freqai_info['rl_config'].get( + self.max_threads = min(self.freqai_info['rl_config'].get( 'cpu_count', 0), int(self.max_system_threads / 2)) th.set_num_threads(self.max_threads) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] From 555cc4263003fc57599896f912beba66a46376b1 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Thu, 29 Sep 2022 14:00:09 +0200 Subject: [PATCH 093/232] Ensure 1 thread is available (for testing purposes) --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 33568fa0b..705c35297 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -40,7 +40,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def __init__(self, **kwargs): super().__init__(config=kwargs['config']) self.max_threads = min(self.freqai_info['rl_config'].get( - 'cpu_count', 0), int(self.max_system_threads / 2)) + 'cpu_count', 1), max(int(self.max_system_threads / 2), 1)) th.set_num_threads(self.max_threads) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] self.train_env: Union[SubprocVecEnv, gym.Env] = None From 7ef56e30296ad3a32fb88a01feb58b5b9b236944 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Thu, 29 Sep 2022 14:01:22 +0200 Subject: [PATCH 094/232] Ensure at least 1 thread is available --- freqtrade/freqai/freqai_interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 5fe3c318c..44535f191 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -97,7 +97,7 @@ class IFreqaiModel(ABC): self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() self.strategy: Optional[IStrategy] = None - self.max_system_threads = int(psutil.cpu_count() * 2 - 2) + self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1) def __getstate__(self): """ From 6e74d46660ac47aa44fc26a5c1c439d88d96576e Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Thu, 29 Sep 2022 14:02:00 +0200 Subject: [PATCH 095/232] Ensure 1 thread available --- freqtrade/freqai/data_kitchen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 9f84e63b7..73717abce 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -97,7 +97,7 @@ class FreqaiDataKitchen: self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {}) if not self.freqai_config.get("data_kitchen_thread_count", 0): - self.thread_count = int(psutil.cpu_count() * 2 - 2) + self.thread_count = max(int(psutil.cpu_count() * 2 - 2), 1) else: self.thread_count = self.freqai_config["data_kitchen_thread_count"] self.train_dates: DataFrame = pd.DataFrame() From ab9d781b06c44ff331e7d094193963c0960d7dfb Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 1 Oct 2022 17:50:05 +0200 Subject: [PATCH 096/232] add reinforcement learning page to docs --- docs/freqai-parameter-table.md | 10 ++ docs/freqai-reinforcement-learning.md | 200 ++++++++++++++++++++++++++ mkdocs.yml | 1 + 3 files changed, 211 insertions(+) create mode 100644 docs/freqai-reinforcement-learning.md diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 8e19226ba..e80ab6fb6 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -46,6 +46,16 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `n_estimators` | The number of boosted trees to fit in regression.
**Datatype:** Integer. | `learning_rate` | Boosting learning rate during regression.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. +| | *Reinforcement Learning Parameters** +| `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. +| `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. +| `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process.
**Datatype:** int. +| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. +| `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. +| `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. +| `continual_learning` | If true, the agent will start new trainings from the model selected during the previous training. If false, a new agent is trained from scratch for each training.
**Datatype:** Bool. +| `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not).
**Datatype:** int. +| `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | | **Extraneous parameters** | `keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards.
**Datatype:** Boolean.
Default: `False`. | `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction.
**Datatype:** Integer.
Default: 2. diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md new file mode 100644 index 000000000..0aef015ed --- /dev/null +++ b/docs/freqai-reinforcement-learning.md @@ -0,0 +1,200 @@ +# Reinforcement Learning + +Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line: + +```bash +freqtrade trade --freqaimodel ReinforcementLearner --strategy MyRLStrategy --config config.json +``` + +where `ReinforcementLearner` will use the templated `ReinforcementLearner` from `freqai/prediction_models/ReinforcementLearner`. The strategy, on the other hand, follows the same base [feature engineering](freqai-feature-engineering.md) with `populate_any_indicators` as a typical Regressor: + +```python + def populate_any_indicators( + self, pair, df, tf, informative=None, set_generalized_indicators=False + ): + + coin = pair.split('/')[0] + + if informative is None: + informative = self.dp.get_pair_dataframe(pair, tf) + + # first loop is automatically duplicating indicators for time periods + for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: + + t = int(t) + informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + + # The following features are necessary for RL models + informative[f"%-{coin}raw_close"] = informative["close"] + informative[f"%-{coin}raw_open"] = informative["open"] + informative[f"%-{coin}raw_high"] = informative["high"] + informative[f"%-{coin}raw_low"] = informative["low"] + + indicators = [col for col in informative if col.startswith("%")] + # This loop duplicates and shifts all indicators to add a sense of recency to data + for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1): + if n == 0: + continue + informative_shift = informative[indicators].shift(n) + informative_shift = informative_shift.add_suffix("_shift-" + str(n)) + informative = pd.concat((informative, informative_shift), axis=1) + + df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True) + skip_columns = [ + (s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"] + ] + df = df.drop(columns=skip_columns) + + # Add generalized indicators here (because in live, it will call this + # function to populate indicators during training). Notice how we ensure not to + # add them multiple times + if set_generalized_indicators: + + # For RL, there are no direct targets to set. This is filler (neutral) + # until the agent sends an action. + df["&-action"] = 0 + + return df +``` + +Most of the function remains the same as for typical Regressors, however, the function above shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environent: + +```python + # The following features are necessary for RL models + informative[f"%-{coin}raw_close"] = informative["close"] + informative[f"%-{coin}raw_open"] = informative["open"] + informative[f"%-{coin}raw_high"] = informative["high"] + informative[f"%-{coin}raw_low"] = informative["low"] +``` + +Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the user set the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. + +After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: + +```python + def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + + enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1] + + if enter_long_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] + ] = (1, "long") + + enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3] + + if enter_short_conditions: + df.loc[ + reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] + ] = (1, "short") + + return df + + def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: + exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2] + if exit_long_conditions: + df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 + + exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4] + if exit_short_conditions: + df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 + + return df +``` + +It is important to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short. + +## Configuring the Reinforcement Learner + +In order to configure the `Reinforcement Learner` the following dictionary to their `freqai` config: + +```json + "rl_config": { + "train_cycles": 25, + "max_trade_duration_candles": 300, + "max_training_drawdown_pct": 0.02, + "cpu_count": 8, + "model_type": "PPO", + "policy_type": "MlpPolicy", + "continual_learning": false, + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.025 + } + } +``` + +Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environemtn to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link). + +## Creating the reward + +As users begin to modify the strategy and the prediction model, they will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, the user sets a `calculate_reward()` function inside their custom `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to give users the necessary building blocks to start their own models. It is inside the `calculate_reward()` where users express their creative theories about the market. For example, the user wants to reward their agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: + +```python + class MyRLEnv(Base5ActionRLEnv): + """ + User made custom environment. This class inherits from BaseEnvironment and gym.env. + Users can override any functions from those parent classes. Here is an example + of a user customized `calculate_reward()` function. + """ + def calculate_reward(self, action): + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + pnl = self.get_unrealized_profit() + rew = np.sign(pnl) * (pnl + 1) + factor = 100 + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and \ + action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(rew * factor) + return 0. +``` + +### Creating a custom agent + +Users can inherit from `stable_baselines3` and customize anything they wish about their agent. Doing this is for advanced users only, an example is presented in `freqai/RL/ReinforcementLearnerCustomAgent.py` + +### Using Tensorboard + +Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer: + +```bash +pip3 install tensorboard +``` + +Next, the user can activate Tensorboard with the following command: + +```bash +cd freqtrade +tensorboard --logdir user_data/models/unique-id +``` + +where `unique-id` is the `identifier` set in the `freqai` configuration file. + +![tensorboard](assets/tensorboard.png) \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 6477c1feb..81f2b7b0b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -29,6 +29,7 @@ nav: - Parameter table: freqai-parameter-table.md - Feature engineering: freqai-feature-engineering.md - Running FreqAI: freqai-running.md + - Reinforcement Learning: freqai-reinforcement-learning.md - Developer guide: freqai-developers.md - Short / Leverage: leverage.md - Utility Sub-commands: utils.md From cf882fa84eccd3ce3418451224c1621bf79ee689 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 1 Oct 2022 20:26:41 +0200 Subject: [PATCH 097/232] fix tests --- docs/freqai-reinforcement-learning.md | 2 +- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- tests/freqai/test_freqai_datakitchen.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 0aef015ed..742b2fb97 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -197,4 +197,4 @@ tensorboard --logdir user_data/models/unique-id where `unique-id` is the `identifier` set in the `freqai` configuration file. -![tensorboard](assets/tensorboard.png) \ No newline at end of file +![tensorboard](assets/tensorboard.png) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 705c35297..115ee59ce 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -213,7 +213,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): dk.data_dictionary["prediction_features"] = filtered_dataframe # optional additional data cleaning/analysis - self.data_cleaning_predict(dk, filtered_dataframe) + self.data_cleaning_predict(dk) pred_df = self.rl_model_predict( dk.data_dictionary["prediction_features"], dk, self.model) diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index 4a0eadeb5..023193818 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -71,7 +71,7 @@ def test_use_DBSCAN_to_remove_outliers(mocker, freqai_conf, caplog): freqai = make_data_dictionary(mocker, freqai_conf) # freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1}) freqai.dk.use_DBSCAN_to_remove_outliers(predict=False) - assert log_has_re(r"DBSCAN found eps of 2\.3\d\.", caplog) + assert log_has_re(r"DBSCAN found eps of 1.75", caplog) def test_compute_distances(mocker, freqai_conf): @@ -86,7 +86,7 @@ def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 0.1}) freqai.dk.use_SVM_to_remove_outliers(predict=False) assert log_has_re( - "SVM detected 8.66%", + "SVM detected 7.36%", caplog, ) From 292d72d59325b35c047569f88892364bc2e9c027 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 3 Oct 2022 18:42:20 +0200 Subject: [PATCH 098/232] automatically handle model_save_type for user --- docs/freqai-parameter-table.md | 2 +- docs/freqai-reinforcement-learning.md | 1 - freqtrade/freqai/data_drawer.py | 20 ++++++++++++-------- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index e80ab6fb6..2fa54b590 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -53,7 +53,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. | `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. | `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. -| `continual_learning` | If true, the agent will start new trainings from the model selected during the previous training. If false, a new agent is trained from scratch for each training.
**Datatype:** Bool. +| `max_training_drawdown_pct` | The maximum drawdown that the agent is allowed to experience during training.
**Datatype:** float.
Default: 0.8 | `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not).
**Datatype:** int. | `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | | **Extraneous parameters** diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 742b2fb97..87a4a7646 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -118,7 +118,6 @@ In order to configure the `Reinforcement Learner` the following dictionary to th "cpu_count": 8, "model_type": "PPO", "policy_type": "MlpPolicy", - "continual_learning": false, "model_reward_parameters": { "rr": 1, "profit_aim": 0.025 diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 9bbcdad8b..143b4c172 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -92,6 +92,12 @@ class FreqaiDataDrawer: "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False) + if 'rl_config' in self.freqai_info: + self.model_type = 'stable_baselines' + logger.warning('User indicated rl_config, FreqAI will now use stable_baselines3' + ' to save models.') + else: + self.model_type = self.freqai_info.get('model_save_type', 'joblib') def load_drawer_from_disk(self): """ @@ -414,12 +420,11 @@ class FreqaiDataDrawer: save_path = Path(dk.data_path) # Save the trained model - model_type = self.freqai_info.get('model_save_type', 'joblib') - if model_type == 'joblib': + if self.model_type == 'joblib': dump(model, save_path / f"{dk.model_filename}_model.joblib") - elif model_type == 'keras': + elif self.model_type == 'keras': model.save(save_path / f"{dk.model_filename}_model.h5") - elif 'stable_baselines' in model_type: + elif 'stable_baselines' in self.model_type: model.save(save_path / f"{dk.model_filename}_model.zip") if dk.svm_model is not None: @@ -496,16 +501,15 @@ class FreqaiDataDrawer: dk.data_path / f"{dk.model_filename}_trained_df.pkl" ) - model_type = self.freqai_info.get('model_save_type', 'joblib') # try to access model in memory instead of loading object from disk to save time if dk.live and coin in self.model_dictionary and not self.limit_ram_use: model = self.model_dictionary[coin] - elif model_type == 'joblib': + elif self.model_type == 'joblib': model = load(dk.data_path / f"{dk.model_filename}_model.joblib") - elif model_type == 'keras': + elif self.model_type == 'keras': from tensorflow import keras model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") - elif model_type == 'stable_baselines': + elif self.model_type == 'stable_baselines': mod = __import__('stable_baselines3', fromlist=[ self.freqai_info['rl_config']['model_type']]) MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) From 8c7f478724fcc1f897c20b727fa64bf304bc350e Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Wed, 5 Oct 2022 10:59:33 +0200 Subject: [PATCH 099/232] Update requirements-freqai.txt --- requirements-freqai.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements-freqai.txt b/requirements-freqai.txt index dae13ced0..0affbc238 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -11,5 +11,4 @@ torch==1.12.1 stable-baselines3==1.6.0 gym==0.21.0 tensorboard==2.9.1 -optuna==2.10.1 -sb3-contrib==1.6.0 \ No newline at end of file +sb3-contrib==1.6.0 From 936ca244821960ed0b7fc8ae92588f9819aaffd0 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 15:58:54 +0200 Subject: [PATCH 100/232] separate RL install from general FAI install, update docs --- docs/freqai-reinforcement-learning.md | 11 +++++++---- .../freqai/prediction_models/ReinforcementLearner.py | 6 ++---- requirements-freqai-rl.txt | 8 ++++++++ requirements-freqai.txt | 10 ++++------ setup.sh | 9 ++++++++- tests/freqai/test_freqai_interface.py | 1 - 6 files changed, 29 insertions(+), 16 deletions(-) create mode 100644 requirements-freqai-rl.txt diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 87a4a7646..8a390ac34 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -1,5 +1,8 @@ # Reinforcement Learning +!!! Note + Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?" Users who prefer docker should ensure they use the docker image appended with `_freqaiRL`. + Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line: ```bash @@ -143,7 +146,7 @@ As users begin to modify the strategy and the prediction model, they will quickl if not self._is_valid(action): return -2 pnl = self.get_unrealized_profit() - rew = np.sign(pnl) * (pnl + 1) + factor = 100 # reward agent for entering trades if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ @@ -166,12 +169,12 @@ As users begin to modify the strategy and the prediction model, they will quickl if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) + return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) + return float(pnl * factor) return 0. ``` @@ -194,6 +197,6 @@ cd freqtrade tensorboard --logdir user_data/models/unique-id ``` -where `unique-id` is the `identifier` set in the `freqai` configuration file. +where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if the user wishes to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). ![tensorboard](assets/tensorboard.png) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 00afd61d4..48519c34c 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -2,7 +2,6 @@ import logging from pathlib import Path from typing import Any, Dict -import numpy as np import torch as th from freqtrade.freqai.data_kitchen import FreqaiDataKitchen @@ -81,7 +80,6 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return -2 pnl = self.get_unrealized_profit() - rew = np.sign(pnl) * (pnl + 1) factor = 100 # reward agent for entering trades @@ -109,12 +107,12 @@ class ReinforcementLearner(BaseReinforcementLearningModel): if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) + return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) + return float(pnl * factor) return 0. diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt new file mode 100644 index 000000000..e29df34ac --- /dev/null +++ b/requirements-freqai-rl.txt @@ -0,0 +1,8 @@ +# Include all requirements to run the bot. +-r requirements-freqai.txt + +# Required for freqai-rl +torch==1.12.1 +stable-baselines3==1.6.1 +gym==0.26.2 +sb3-contrib==1.6.1 diff --git a/requirements-freqai.txt b/requirements-freqai.txt index dae13ced0..d4a741c29 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -1,5 +1,5 @@ # Include all requirements to run the bot. --r requirements-hyperopt.txt +-r requirements.txt # Required for freqai scikit-learn==1.1.2 @@ -8,8 +8,6 @@ catboost==1.1; platform_machine != 'aarch64' lightgbm==3.3.2 xgboost==1.6.2 torch==1.12.1 -stable-baselines3==1.6.0 -gym==0.21.0 -tensorboard==2.9.1 -optuna==2.10.1 -sb3-contrib==1.6.0 \ No newline at end of file +stable-baselines3==1.6.1 +gym==0.26.2 +sb3-contrib==1.6.1 diff --git a/setup.sh b/setup.sh index 1a4a285a3..f57e820af 100755 --- a/setup.sh +++ b/setup.sh @@ -78,14 +78,21 @@ function updateenv() { fi REQUIREMENTS_FREQAI="" + REQUIREMENTS_FREQAI_RL="" read -p "Do you want to install dependencies for freqai [y/N]? " dev=$REPLY if [[ $REPLY =~ ^[Yy]$ ]] then REQUIREMENTS_FREQAI="-r requirements-freqai.txt" + read -p "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]? " + dev=$REPLY + if [[ $REPLY =~ ^[Yy]$ ]] + then + REQUIREMENTS_FREQAI="-r requirements-freqai-rl.txt" + fi fi - ${PYTHON} -m pip install --upgrade -r ${REQUIREMENTS} ${REQUIREMENTS_HYPEROPT} ${REQUIREMENTS_PLOT} ${REQUIREMENTS_FREQAI} + ${PYTHON} -m pip install --upgrade -r ${REQUIREMENTS} ${REQUIREMENTS_HYPEROPT} ${REQUIREMENTS_PLOT} ${REQUIREMENTS_FREQAI} ${REQUIREMENTS_FREQAI_RL} if [ $? -ne 0 ]; then echo "Failed installing dependencies" exit 1 diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 1f05f881e..b3e61b590 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -8,7 +8,6 @@ import pytest from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider from freqtrade.enums import RunMode -from freqtrade.enums import RunMode from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.utils import download_all_data_for_training, get_required_data_timerange from freqtrade.optimize.backtesting import Backtesting From b5dd92f85ae7f9a0a3eac96541bdf4707136ec4f Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 16:25:24 +0200 Subject: [PATCH 101/232] remove RL reqs from general FAI reqs --- requirements-freqai.txt | 4 ---- 1 file changed, 4 deletions(-) diff --git a/requirements-freqai.txt b/requirements-freqai.txt index d4a741c29..cf0d2eb07 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -7,7 +7,3 @@ joblib==1.2.0 catboost==1.1; platform_machine != 'aarch64' lightgbm==3.3.2 xgboost==1.6.2 -torch==1.12.1 -stable-baselines3==1.6.1 -gym==0.26.2 -sb3-contrib==1.6.1 From ab4705efd23f89326f59ede44bcb4192850b9d67 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 16:39:38 +0200 Subject: [PATCH 102/232] provide background and goals for RL in doc --- docs/freqai-reinforcement-learning.md | 28 ++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 8a390ac34..a59c5b9d3 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -3,6 +3,28 @@ !!! Note Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?" Users who prefer docker should ensure they use the docker image appended with `_freqaiRL`. + +## Background and terminology + +### What is RL and why does FreqAI need it? + +Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-the-reward)). The reward is used to train weights in a neural network. + +A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live. *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployements.* + +Reinforcement learning is a natural progression for FreqAI, since it adds a new layer of adaptivity and market reactivity that Classifiers and Regressors cannot match. However, Classifiers and Regressors have strengths that RL does not have such as robust predictions. Improperly trained RL agents may find "cheats" and "tricks" to maximize reward without actually winning any trades. For this reason, RL is more complex and demands a higher level of understanding than typical Classifiers and Regressors. + +### The RL interface + +With the current framework, we aim to expose the training environment to the user via the common "prediction model" file (i.e. CatboostClassifier, LightGBMRegressor, etc.). Users inherit our base environment in this file, which allows them to override as much or as little of the environment as they wish. + +We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configruation settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. + +The framework is built on stable_baselines3 (torch) and openai gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework (albeit with some basic assistance from core-dev). For the environment, it is inheriting from `gym.env` which means that a user would need to write an entirely new environment if they wish to switch to a different library. + + +## Running Reinforcement Learning + Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line: ```bash @@ -178,10 +200,6 @@ As users begin to modify the strategy and the prediction model, they will quickl return 0. ``` -### Creating a custom agent - -Users can inherit from `stable_baselines3` and customize anything they wish about their agent. Doing this is for advanced users only, an example is presented in `freqai/RL/ReinforcementLearnerCustomAgent.py` - ### Using Tensorboard Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer: @@ -199,4 +217,4 @@ tensorboard --logdir user_data/models/unique-id where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if the user wishes to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). -![tensorboard](assets/tensorboard.png) +![tensorboard](assets/tensorboard.jpg) From 17fb7f7a3b35319e7717e9ebdcfa79bde0a64fe9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 16:46:02 +0200 Subject: [PATCH 103/232] gym needs 0.21 to match stable_baselines3 --- requirements-freqai-rl.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index e29df34ac..b6bd7ef15 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -4,5 +4,5 @@ # Required for freqai-rl torch==1.12.1 stable-baselines3==1.6.1 -gym==0.26.2 +gym==0.21 sb3-contrib==1.6.1 From cf10a76a2a6796dde396afd53c6ab984a24e58e2 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 17:06:18 +0200 Subject: [PATCH 104/232] bring back Trades.use_db = True --- tests/freqai/test_freqai_interface.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index b3e61b590..65a79a580 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -246,6 +246,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat): model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] assert len(model_folders) == num_files + Trade.use_db = True Backtesting.cleanup() shutil.rmtree(Path(freqai.dk.full_path)) From 017e476f49b48938c5a9b5415417d0e0f6122c4e Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 17:20:40 +0200 Subject: [PATCH 105/232] add extras to setup.py for RL --- setup.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b3693c9f9..304567bcc 100644 --- a/setup.py +++ b/setup.py @@ -15,6 +15,14 @@ freqai = [ 'scikit-learn', 'catboost; platform_machine != "aarch64"', 'lightgbm', + 'xgboost' +] + +freqai_rl = [ + 'torch', + 'stable-baselines3', + 'gym==0.21', + 'sb3-contrib' ] develop = [ @@ -36,7 +44,7 @@ jupyter = [ 'nbconvert', ] -all_extra = plot + develop + jupyter + hyperopt + freqai +all_extra = plot + develop + jupyter + hyperopt + freqai + freqai_rl setup( tests_require=[ @@ -90,6 +98,7 @@ setup( 'jupyter': jupyter, 'hyperopt': hyperopt, 'freqai': freqai, + 'freqai_rl': freqai_rl, 'all': all_extra, }, ) From 488739424d07b5902569ef53e9e8a09a9a301718 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 20:55:50 +0200 Subject: [PATCH 106/232] fix reward inconsistency in template --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 115ee59ce..e89320668 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -309,7 +309,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): return -2 pnl = self.get_unrealized_profit() - rew = np.sign(pnl) * (pnl + 1) factor = 100. # reward agent for entering trades @@ -340,13 +339,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) + return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(rew * factor) + return float(pnl * factor) return 0. From e5204101d9a1bc938d4b9312cdc0ddd9fd35d803 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 5 Oct 2022 21:34:10 +0200 Subject: [PATCH 107/232] add tensorboard back to reqs to keep default integration working (and for docker) --- docs/freqai-reinforcement-learning.md | 8 +------- requirements-freqai-rl.txt | 1 + 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index a59c5b9d3..8b775e046 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -202,13 +202,7 @@ As users begin to modify the strategy and the prediction model, they will quickl ### Using Tensorboard -Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer: - -```bash -pip3 install tensorboard -``` - -Next, the user can activate Tensorboard with the following command: +Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command: ```bash cd freqtrade diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index b6bd7ef15..22e077241 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -6,3 +6,4 @@ torch==1.12.1 stable-baselines3==1.6.1 gym==0.21 sb3-contrib==1.6.1 +tensorboard==2.10.1 From b9f1872d518349c1686b2db2e1ebc9c5ccd7fcc7 Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 6 Oct 2022 08:28:15 +0200 Subject: [PATCH 108/232] Install RL dependencies as dev dependency --- .gitignore | 1 - requirements-dev.txt | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 2d2d526d9..e400c01f5 100644 --- a/.gitignore +++ b/.gitignore @@ -113,4 +113,3 @@ target/ !config_examples/config_full.example.json !config_examples/config_kraken.example.json !config_examples/config_freqai.example.json -!config_examples/config_freqai-rl.example.json diff --git a/requirements-dev.txt b/requirements-dev.txt index ebe278e10..dccd5baba 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,6 +3,7 @@ -r requirements-plot.txt -r requirements-hyperopt.txt -r requirements-freqai.txt +-r requirements-freqai-rl.txt -r docs/requirements-docs.txt coveralls==3.3.1 From 3e258e000ebed48a10c90c40b060af69e5cb3470 Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 7 Oct 2022 07:05:56 +0200 Subject: [PATCH 109/232] Don't set use_db without resetting it --- tests/freqai/test_freqai_interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 65a79a580..bd7c62c5f 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -192,12 +192,12 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): def test_start_backtesting(mocker, freqai_conf, model, num_files, strat): freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) freqai_conf['runmode'] = RunMode.BACKTEST - Trade.use_db = False if is_arm() and "Catboost" in model: pytest.skip("CatBoost is not supported on ARM") if is_mac(): pytest.skip("Reinforcement learning module not available on intel based Mac OS") + Trade.use_db = False freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180120-20180130"}) From 8d7adfabe97e7e7db23df2108e181452fd9f14ac Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 8 Oct 2022 12:10:38 +0200 Subject: [PATCH 110/232] clean RL tests to avoid dir pollution and increase speed --- .../RL/BaseReinforcementLearningModel.py | 12 ++++++ .../prediction_models/ReinforcementLearner.py | 2 +- .../ReinforcementLearner_multiproc.py | 13 +----- tests/freqai/conftest.py | 24 +++++++++++ tests/freqai/test_freqai_interface.py | 43 ++----------------- .../ReinforcementLearner_test_4ac.py | 2 +- 6 files changed, 43 insertions(+), 53 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index e89320668..64af31c45 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -63,6 +63,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.MODELCLASS = getattr(mod, self.model_type) self.policy_type = self.freqai_info['rl_config']['policy_type'] self.unset_outlier_removal() + self.net_arch = self.rl_config.get('net_arch', [128, 128]) def unset_outlier_removal(self): """ @@ -287,6 +288,17 @@ class BaseReinforcementLearningModel(IFreqaiModel): return model + def _on_stop(self): + """ + Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. + """ + + if self.train_env: + self.train_env.close() + + if self.eval_env: + self.eval_env.close() + # Nested class which can be overridden by user to customize further class MyRLEnv(Base5ActionRLEnv): """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 48519c34c..4bf990172 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -31,7 +31,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[128, 128]) + net_arch=self.net_arch) if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index a644c0c04..41345b967 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -28,7 +28,7 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): # model arch policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[128, 128]) + net_arch=self.net_arch) if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, @@ -87,14 +87,3 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) - - def _on_stop(self): - """ - Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. - """ - - if self.train_env: - self.train_env.close() - - if self.eval_env: - self.eval_env.close() diff --git a/tests/freqai/conftest.py b/tests/freqai/conftest.py index 026b45afc..7f4897439 100644 --- a/tests/freqai/conftest.py +++ b/tests/freqai/conftest.py @@ -58,6 +58,30 @@ def freqai_conf(default_conf, tmpdir): return freqaiconf +def make_rl_config(conf): + conf.update({"strategy": "freqai_rl_test_strat"}) + conf["freqai"].update({"model_training_parameters": { + "learning_rate": 0.00025, + "gamma": 0.9, + "verbose": 1 + }}) + conf["freqai"]["rl_config"] = { + "train_cycles": 1, + "thread_count": 2, + "max_trade_duration_candles": 300, + "model_type": "PPO", + "policy_type": "MlpPolicy", + "max_training_drawdown_pct": 0.5, + "net_arch": [32, 32], + "model_reward_parameters": { + "rr": 1, + "profit_aim": 0.02, + "win_reward_factor": 2 + }} + + return conf + + def get_patched_data_kitchen(mocker, freqaiconf): dk = FreqaiDataKitchen(freqaiconf) return dk diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index bd7c62c5f..40a573547 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -14,7 +14,7 @@ from freqtrade.optimize.backtesting import Backtesting from freqtrade.persistence import Trade from freqtrade.plugins.pairlistmanager import PairListManager from tests.conftest import get_patched_exchange, log_has_re -from tests.freqai.conftest import get_patched_freqai_strategy +from tests.freqai.conftest import get_patched_freqai_strategy, make_rl_config def is_arm() -> bool: @@ -49,25 +49,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): if 'ReinforcementLearner' in model: model_save_ext = 'zip' - freqai_conf.update({"strategy": "freqai_rl_test_strat"}) - freqai_conf["freqai"].update({"model_training_parameters": { - "learning_rate": 0.00025, - "gamma": 0.9, - "verbose": 1 - }}) - freqai_conf["freqai"].update({"model_save_type": 'stable_baselines'}) - freqai_conf["freqai"]["rl_config"] = { - "train_cycles": 1, - "thread_count": 2, - "max_trade_duration_candles": 300, - "model_type": "PPO", - "policy_type": "MlpPolicy", - "max_training_drawdown_pct": 0.5, - "model_reward_parameters": { - "rr": 1, - "profit_aim": 0.02, - "win_reward_factor": 2 - }} + freqai_conf = make_rl_config(freqai_conf) if 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") @@ -79,6 +61,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model): freqai = strategy.freqai freqai.live = True freqai.dk = FreqaiDataKitchen(freqai_conf) + freqai.dk.set_paths('ADA/BTC', 10000) timerange = TimeRange.parse_timerange("20180110-20180130") freqai.dd.load_all_pair_histories(timerange, freqai.dk) @@ -204,25 +187,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat): freqai_conf.update({"strategy": strat}) if 'ReinforcementLearner' in model: - - freqai_conf["freqai"].update({"model_training_parameters": { - "learning_rate": 0.00025, - "gamma": 0.9, - "verbose": 1 - }}) - freqai_conf["freqai"].update({"model_save_type": 'stable_baselines'}) - freqai_conf["freqai"]["rl_config"] = { - "train_cycles": 1, - "thread_count": 2, - "max_trade_duration_candles": 300, - "model_type": "PPO", - "policy_type": "MlpPolicy", - "max_training_drawdown_pct": 0.5, - "model_reward_parameters": { - "rr": 1, - "profit_aim": 0.02, - "win_reward_factor": 2 - }} + freqai_conf = make_rl_config(freqai_conf) if 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") diff --git a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py index 9a8f800bd..13e5af02f 100644 --- a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py +++ b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py @@ -24,7 +24,7 @@ class ReinforcementLearner_test_4ac(BaseReinforcementLearningModel): total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[128, 128]) + net_arch=[64, 64]) if dk.pair not in self.dd.model_dictionary or not self.continual_learning: model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, From a2843165e18e9a4ab46b686a825a1456ea45fbf8 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 30 Oct 2022 10:31:38 +0100 Subject: [PATCH 111/232] fix leftovers from merge --- freqtrade/freqai/data_drawer.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index a4d5b5d5c..5640dcb55 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -98,6 +98,13 @@ class FreqaiDataDrawer: "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} self.metric_tracker: Dict[str, Dict[str, Dict[str, list]]] = {} + self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False) + if 'rl_config' in self.freqai_info: + self.model_type = 'stable_baselines' + logger.warning('User indicated rl_config, FreqAI will now use stable_baselines3' + ' to save models.') + else: + self.model_type = self.freqai_info.get('model_save_type', 'joblib') def update_metric_tracker(self, metric: str, value: float, pair: str) -> None: """ @@ -124,13 +131,6 @@ class FreqaiDataDrawer: self.update_metric_tracker('cpu_load1min', load1 / cpus, pair) self.update_metric_tracker('cpu_load5min', load5 / cpus, pair) self.update_metric_tracker('cpu_load15min', load15 / cpus, pair) - self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False) - if 'rl_config' in self.freqai_info: - self.model_type = 'stable_baselines' - logger.warning('User indicated rl_config, FreqAI will now use stable_baselines3' - ' to save models.') - else: - self.model_type = self.freqai_info.get('model_save_type', 'joblib') def load_drawer_from_disk(self): """ From a11d579bc2338fa3087fc5f7d079fa43314cecd5 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 22 Oct 2022 16:22:55 +0200 Subject: [PATCH 112/232] Verify order fills on "detail" timeframe --- freqtrade/optimize/backtesting.py | 69 ++++++++++++++++--------------- 1 file changed, 36 insertions(+), 33 deletions(-) diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index 4d98f1f5a..b3395a2c3 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -688,10 +688,11 @@ class Backtesting: trade.orders.append(order) return trade - def _get_exit_trade_entry(self, trade: LocalTrade, row: Tuple) -> Optional[LocalTrade]: + def _get_exit_trade_entry( + self, trade: LocalTrade, row: Tuple, is_first: bool) -> Optional[LocalTrade]: exit_candle_time: datetime = row[DATE_IDX].to_pydatetime() - if self.trading_mode == TradingMode.FUTURES: + if is_first and self.trading_mode == TradingMode.FUTURES: trade.funding_fees = self.exchange.calculate_funding_fees( self.futures_data[trade.pair], amount=trade.amount, @@ -700,32 +701,7 @@ class Backtesting: close_date=exit_candle_time, ) - if self.timeframe_detail and trade.pair in self.detail_data: - exit_candle_end = exit_candle_time + timedelta(minutes=self.timeframe_min) - - detail_data = self.detail_data[trade.pair] - detail_data = detail_data.loc[ - (detail_data['date'] >= exit_candle_time) & - (detail_data['date'] < exit_candle_end) - ].copy() - if len(detail_data) == 0: - # Fall back to "regular" data if no detail data was found for this candle - return self._get_exit_trade_entry_for_candle(trade, row) - detail_data.loc[:, 'enter_long'] = row[LONG_IDX] - detail_data.loc[:, 'exit_long'] = row[ELONG_IDX] - detail_data.loc[:, 'enter_short'] = row[SHORT_IDX] - detail_data.loc[:, 'exit_short'] = row[ESHORT_IDX] - detail_data.loc[:, 'enter_tag'] = row[ENTER_TAG_IDX] - detail_data.loc[:, 'exit_tag'] = row[EXIT_TAG_IDX] - for det_row in detail_data[HEADERS].values.tolist(): - res = self._get_exit_trade_entry_for_candle(trade, det_row) - if res: - return res - - return None - - else: - return self._get_exit_trade_entry_for_candle(trade, row) + return self._get_exit_trade_entry_for_candle(trade, row) def get_valid_price_and_stake( self, pair: str, row: Tuple, propose_rate: float, stake_amount: float, @@ -1070,7 +1046,7 @@ class Backtesting: def backtest_loop( self, row: Tuple, pair: str, current_time: datetime, end_date: datetime, - max_open_trades: int, open_trade_count_start: int) -> int: + max_open_trades: int, open_trade_count_start: int, is_first: bool = True) -> int: """ NOTE: This method is used by Hyperopt at each iteration. Please keep it optimized. @@ -1088,9 +1064,11 @@ class Backtesting: # without positionstacking, we can only have one open trade per pair. # max_open_trades must be respected # don't open on the last row + # We only open trades on the initial candle. trade_dir = self.check_for_trade_entry(row) if ( (self._position_stacking or len(LocalTrade.bt_trades_open_pp[pair]) == 0) + and is_first and self.trade_slot_available(max_open_trades, open_trade_count_start) and current_time != end_date and trade_dir is not None @@ -1116,7 +1094,7 @@ class Backtesting: # 4. Create exit orders (if any) if not trade.open_order_id: - self._get_exit_trade_entry(trade, row) # Place exit order if necessary + self._get_exit_trade_entry(trade, row, is_first) # Place exit order if necessary # 5. Process exit orders. order = trade.select_order(trade.exit_side, is_open=True) @@ -1167,7 +1145,6 @@ class Backtesting: self.progress.init_step(BacktestState.BACKTEST, int( (end_date - start_date) / timedelta(minutes=self.timeframe_min))) - # Loop timerange and get candle for each pair at that point in time while current_time <= end_date: open_trade_count_start = LocalTrade.bt_open_open_trade_count @@ -1181,9 +1158,35 @@ class Backtesting: row_index += 1 indexes[pair] = row_index self.dataprovider._set_dataframe_max_index(row_index) + current_detail_time: datetime = row[DATE_IDX].to_pydatetime() + if self.timeframe_detail and pair in self.detail_data: + exit_candle_end = current_detail_time + timedelta(minutes=self.timeframe_min) - open_trade_count_start = self.backtest_loop( - row, pair, current_time, end_date, max_open_trades, open_trade_count_start) + detail_data = self.detail_data[pair] + detail_data = detail_data.loc[ + (detail_data['date'] >= current_detail_time) & + (detail_data['date'] < exit_candle_end) + ].copy() + if len(detail_data) == 0: + # Fall back to "regular" data if no detail data was found for this candle + open_trade_count_start = self.backtest_loop( + row, pair, current_time, end_date, max_open_trades, + open_trade_count_start) + detail_data.loc[:, 'enter_long'] = row[LONG_IDX] + detail_data.loc[:, 'exit_long'] = row[ELONG_IDX] + detail_data.loc[:, 'enter_short'] = row[SHORT_IDX] + detail_data.loc[:, 'exit_short'] = row[ESHORT_IDX] + detail_data.loc[:, 'enter_tag'] = row[ENTER_TAG_IDX] + detail_data.loc[:, 'exit_tag'] = row[EXIT_TAG_IDX] + is_first = True + for det_row in detail_data[HEADERS].values.tolist(): + open_trade_count_start = self.backtest_loop( + det_row, pair, current_time, end_date, max_open_trades, + open_trade_count_start, is_first) + is_first = False + else: + open_trade_count_start = self.backtest_loop( + row, pair, current_time, end_date, max_open_trades, open_trade_count_start) # Move time one configured time_interval ahead. self.progress.increment() From 29ba263c3c19a96abecf50015dcc9f6017fa6ee5 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 22 Oct 2022 16:23:03 +0200 Subject: [PATCH 113/232] Update some test parameters --- tests/optimize/test_backtesting.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/optimize/test_backtesting.py b/tests/optimize/test_backtesting.py index 140cc3394..21d9d25cc 100644 --- a/tests/optimize/test_backtesting.py +++ b/tests/optimize/test_backtesting.py @@ -686,7 +686,7 @@ def test_backtest__get_sell_trade_entry(default_conf, fee, mocker) -> None: ) # No data available. - res = backtesting._get_exit_trade_entry(trade, row_sell) + res = backtesting._get_exit_trade_entry(trade, row_sell, True) assert res is not None assert res.exit_reason == ExitType.ROI.value assert res.close_date_utc == datetime(2020, 1, 1, 5, 0, tzinfo=timezone.utc) @@ -699,13 +699,13 @@ def test_backtest__get_sell_trade_entry(default_conf, fee, mocker) -> None: [], columns=['date', 'open', 'high', 'low', 'close', 'enter_long', 'exit_long', 'enter_short', 'exit_short', 'long_tag', 'short_tag', 'exit_tag']) - res = backtesting._get_exit_trade_entry(trade, row) + res = backtesting._get_exit_trade_entry(trade, row, True) assert res is None # Assign backtest-detail data backtesting.detail_data[pair] = row_detail - res = backtesting._get_exit_trade_entry(trade, row_sell) + res = backtesting._get_exit_trade_entry(trade, row_sell, True) assert res is not None assert res.exit_reason == ExitType.ROI.value # Sell at minute 3 (not available above!) From 0888b53b5a080c069006dc30d479126b17e56979 Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 4 Nov 2022 07:07:56 +0100 Subject: [PATCH 114/232] Udpate current_time handling for detail loop --- freqtrade/optimize/backtesting.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index b3395a2c3..fa45e9dd4 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -1179,9 +1179,11 @@ class Backtesting: detail_data.loc[:, 'enter_tag'] = row[ENTER_TAG_IDX] detail_data.loc[:, 'exit_tag'] = row[EXIT_TAG_IDX] is_first = True + current_time_det = current_time for det_row in detail_data[HEADERS].values.tolist(): + current_time_det += timedelta(minutes=self.timeframe_detail_min) open_trade_count_start = self.backtest_loop( - det_row, pair, current_time, end_date, max_open_trades, + det_row, pair, current_time_det, end_date, max_open_trades, open_trade_count_start, is_first) is_first = False else: From 5bd3e54b17424daec79c3208589b807f92890b7a Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 5 Nov 2022 20:01:05 +0100 Subject: [PATCH 115/232] Add test for detail backtesting --- tests/optimize/test_backtesting.py | 87 ++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+), 3 deletions(-) diff --git a/tests/optimize/test_backtesting.py b/tests/optimize/test_backtesting.py index 21d9d25cc..26c31efef 100644 --- a/tests/optimize/test_backtesting.py +++ b/tests/optimize/test_backtesting.py @@ -787,17 +787,98 @@ def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None: for _, t in results.iterrows(): assert len(t['orders']) == 2 ln = data_pair.loc[data_pair["date"] == t["open_date"]] - # Check open trade rate alignes to open rate + # Check open trade rate aligns to open rate assert not ln.empty assert round(ln.iloc[0]["open"], 6) == round(t["open_rate"], 6) - # check close trade rate alignes to close rate or is between high and low + # check close trade rate aligns to close rate or is between high and low ln1 = data_pair.loc[data_pair["date"] == t["close_date"]] - assert not ln1.empty assert (round(ln1.iloc[0]["open"], 6) == round(t["close_rate"], 6) or round(ln1.iloc[0]["low"], 6) < round( t["close_rate"], 6) < round(ln1.iloc[0]["high"], 6)) +@pytest.mark.parametrize('use_detail', [True, False]) +def test_backtest_one_detail(default_conf_usdt, fee, mocker, testdatadir, use_detail) -> None: + default_conf_usdt['use_exit_signal'] = False + mocker.patch('freqtrade.exchange.Exchange.get_fee', fee) + mocker.patch("freqtrade.exchange.Exchange.get_min_pair_stake_amount", return_value=0.00001) + mocker.patch("freqtrade.exchange.Exchange.get_max_pair_stake_amount", return_value=float('inf')) + if use_detail: + default_conf_usdt['timeframe_detail'] = '1m' + patch_exchange(mocker) + + def advise_entry(df, *args, **kwargs): + # Mock function to force several entries + df.loc[(df['rsi'] < 40), 'enter_long'] = 1 + return df + + def custom_entry_price(proposed_rate, **kwargs): + return proposed_rate * 0.997 + + backtesting = Backtesting(default_conf_usdt) + backtesting._set_strategy(backtesting.strategylist[0]) + backtesting.strategy.populate_entry_trend = advise_entry + backtesting.strategy.custom_entry_price = custom_entry_price + pair = 'XRP/ETH' + # Pick a timerange adapted to the pair we use to test + timerange = TimeRange.parse_timerange('20191010-20191013') + data = history.load_data(datadir=testdatadir, timeframe='5m', pairs=['XRP/ETH'], + timerange=timerange) + if use_detail: + data_1m = history.load_data(datadir=testdatadir, timeframe='1m', pairs=['XRP/ETH'], + timerange=timerange) + backtesting.detail_data = data_1m + processed = backtesting.strategy.advise_all_indicators(data) + min_date, max_date = get_timerange(processed) + + result = backtesting.backtest( + processed=deepcopy(processed), + start_date=min_date, + end_date=max_date, + max_open_trades=10, + ) + results = result['results'] + assert not results.empty + # Timeout settings from default_conf = entry: 10, exit: 30 + assert len(results) == (2 if use_detail else 3) + + assert 'orders' in results.columns + data_pair = processed[pair] + + data_1m_pair = data_1m[pair] if use_detail else pd.DataFrame() + late_entry = 0 + for _, t in results.iterrows(): + assert len(t['orders']) == 2 + + entryo = t['orders'][0] + entry_ts = datetime.fromtimestamp(entryo['order_filled_timestamp'] // 1000, tz=timezone.utc) + if entry_ts > t['open_date']: + late_entry += 1 + + # Get "entry fill" candle + ln = (data_1m_pair.loc[data_1m_pair["date"] == entry_ts] + if use_detail else data_pair.loc[data_pair["date"] == entry_ts]) + # Check open trade rate aligns to open rate + assert not ln.empty + + # assert round(ln.iloc[0]["open"], 6) == round(t["open_rate"], 6) + assert round(ln.iloc[0]["low"], 6) <= round( + t["open_rate"], 6) <= round(ln.iloc[0]["high"], 6) + # check close trade rate aligns to close rate or is between high and low + ln1 = data_pair.loc[data_pair["date"] == t["close_date"]] + if use_detail: + ln1_1m = data_1m_pair.loc[data_1m_pair["date"] == t["close_date"]] + assert not ln1.empty or not ln1_1m.empty + else: + assert not ln1.empty + ln2 = ln1_1m if ln1.empty else ln1 + + assert (round(ln2.iloc[0]["low"], 6) <= round( + t["close_rate"], 6) <= round(ln2.iloc[0]["high"], 6)) + + assert late_entry > 0 + + def test_backtest_timedout_entry_orders(default_conf, fee, mocker, testdatadir) -> None: # This strategy intentionally places unfillable orders. default_conf['strategy'] = 'StrategyTestV3CustomEntryPrice' From d089fdae34820726c4902911bf27fa96ea44e27b Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 5 Nov 2022 20:02:36 +0100 Subject: [PATCH 116/232] Fix current-time_det calculation --- freqtrade/optimize/backtesting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index fa45e9dd4..8faeeb9fe 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -1181,10 +1181,10 @@ class Backtesting: is_first = True current_time_det = current_time for det_row in detail_data[HEADERS].values.tolist(): - current_time_det += timedelta(minutes=self.timeframe_detail_min) open_trade_count_start = self.backtest_loop( det_row, pair, current_time_det, end_date, max_open_trades, open_trade_count_start, is_first) + current_time_det += timedelta(minutes=self.timeframe_detail_min) is_first = False else: open_trade_count_start = self.backtest_loop( From ded57fb3019e1e564cc9a6842c2183ae18de8951 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 5 Nov 2022 20:03:20 +0100 Subject: [PATCH 117/232] Remove no longer valid test part --- freqtrade/optimize/backtesting.py | 2 +- tests/optimize/test_backtesting.py | 32 ------------------------------ 2 files changed, 1 insertion(+), 33 deletions(-) diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index 8faeeb9fe..54312177c 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -1064,7 +1064,7 @@ class Backtesting: # without positionstacking, we can only have one open trade per pair. # max_open_trades must be respected # don't open on the last row - # We only open trades on the initial candle. + # We only open trades on the main candle, not on detail candles trade_dir = self.check_for_trade_entry(row) if ( (self._position_stacking or len(LocalTrade.bt_trades_open_pp[pair]) == 0) diff --git a/tests/optimize/test_backtesting.py b/tests/optimize/test_backtesting.py index 26c31efef..9a91b0c6f 100644 --- a/tests/optimize/test_backtesting.py +++ b/tests/optimize/test_backtesting.py @@ -663,27 +663,6 @@ def test_backtest__get_sell_trade_entry(default_conf, fee, mocker) -> None: '', # Exit Signal Name ] - row_detail = pd.DataFrame( - [ - [ - pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=0, tzinfo=timezone.utc), - 200, 200.1, 197, 199, 1, 0, 0, 0, '', '', '', - ], [ - pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=1, tzinfo=timezone.utc), - 199, 199.7, 199, 199.5, 0, 0, 0, 0, '', '', '', - ], [ - pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=2, tzinfo=timezone.utc), - 199.5, 200.8, 199, 200.9, 0, 0, 0, 0, '', '', '', - ], [ - pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=3, tzinfo=timezone.utc), - 200.5, 210.5, 193, 210.5, 0, 0, 0, 0, '', '', '', # ROI sell (?) - ], [ - pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=4, tzinfo=timezone.utc), - 200, 200.1, 193, 199, 0, 0, 0, 0, '', '', '', - ], - ], columns=['date', 'open', 'high', 'low', 'close', 'enter_long', 'exit_long', - 'enter_short', 'exit_short', 'long_tag', 'short_tag', 'exit_tag'] - ) # No data available. res = backtesting._get_exit_trade_entry(trade, row_sell, True) @@ -702,17 +681,6 @@ def test_backtest__get_sell_trade_entry(default_conf, fee, mocker) -> None: res = backtesting._get_exit_trade_entry(trade, row, True) assert res is None - # Assign backtest-detail data - backtesting.detail_data[pair] = row_detail - - res = backtesting._get_exit_trade_entry(trade, row_sell, True) - assert res is not None - assert res.exit_reason == ExitType.ROI.value - # Sell at minute 3 (not available above!) - assert res.close_date_utc == datetime(2020, 1, 1, 5, 3, tzinfo=timezone.utc) - sell_order = res.select_order('sell', True) - assert sell_order is not None - def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None: default_conf['use_exit_signal'] = False From 2c1330a4e29abfbea95df7e7cfa994faffe3dd81 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 6 Nov 2022 08:32:27 +0100 Subject: [PATCH 118/232] Update docs to new behavior --- docs/backtesting.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/backtesting.md b/docs/backtesting.md index e3cddb7a1..bfe0f4d07 100644 --- a/docs/backtesting.md +++ b/docs/backtesting.md @@ -583,7 +583,8 @@ To utilize this, you can append `--timeframe-detail 5m` to your regular backtest freqtrade backtesting --strategy AwesomeStrategy --timeframe 1h --timeframe-detail 5m ``` -This will load 1h data as well as 5m data for the timeframe. The strategy will be analyzed with the 1h timeframe - and for every "open trade candle" (candles where a trade is open) the 5m data will be used to simulate intra-candle movements. +This will load 1h data as well as 5m data for the timeframe. The strategy will be analyzed with the 1h timeframe, and Entry orders will only be placed at the main timeframe, however Order fills and exit signals will be evaluated at the 5m candle, simulating intra-candle movements. + All callback functions (`custom_exit()`, `custom_stoploss()`, ... ) will be running for each 5m candle once the trade is opened (so 12 times in the above example of 1h timeframe, and 5m detailed timeframe). `--timeframe-detail` must be smaller than the original timeframe, otherwise backtesting will fail to start. From 6ff0e66ddf7115a7ffc04df36d2aad299004aac8 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 12 Nov 2022 11:13:31 +0100 Subject: [PATCH 119/232] ensure strat tests are updated --- tests/strategy/test_strategy_loading.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/strategy/test_strategy_loading.py b/tests/strategy/test_strategy_loading.py index 6b831c116..5fcc75026 100644 --- a/tests/strategy/test_strategy_loading.py +++ b/tests/strategy/test_strategy_loading.py @@ -34,7 +34,7 @@ def test_search_all_strategies_no_failed(): directory = Path(__file__).parent / "strats" strategies = StrategyResolver._search_all_objects(directory, enum_failed=False) assert isinstance(strategies, list) - assert len(strategies) == 11 + assert len(strategies) == 12 assert isinstance(strategies[0], dict) @@ -42,10 +42,10 @@ def test_search_all_strategies_with_failed(): directory = Path(__file__).parent / "strats" strategies = StrategyResolver._search_all_objects(directory, enum_failed=True) assert isinstance(strategies, list) - assert len(strategies) == 12 + assert len(strategies) == 13 # with enum_failed=True search_all_objects() shall find 2 good strategies # and 1 which fails to load - assert len([x for x in strategies if x['class'] is not None]) == 11 + assert len([x for x in strategies if x['class'] is not None]) == 12 assert len([x for x in strategies if x['class'] is None]) == 1 From 6746868ea73b8f252590ce95b888a99398da470c Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 12 Nov 2022 11:33:03 +0100 Subject: [PATCH 120/232] store dataprovider to self instead of strategy --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 4 ++-- freqtrade/freqai/freqai_interface.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 64af31c45..f3da91b51 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -177,10 +177,10 @@ class BaseReinforcementLearningModel(IFreqaiModel): trade_duration = 0 for trade in open_trades: if trade.pair == pair: - if self.strategy.dp._exchange is None: # type: ignore + if self.data_provider._exchange is None: # type: ignore logger.error('No exchange available.') else: - current_value = self.strategy.dp._exchange.get_rate( # type: ignore + current_value = self.data_provider._exchange.get_rate( # type: ignore pair, refresh=False, side="exit", is_short=trade.is_short) openrate = trade.open_rate now = datetime.now(timezone.utc).timestamp() diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index d28f00dda..406d37dc3 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -15,6 +15,7 @@ from pandas import DataFrame from freqtrade.configuration import TimeRange from freqtrade.constants import DATETIME_PRINT_FORMAT, Config +from freqtrade.data.dataprovider import DataProvider from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException from freqtrade.exchange import timeframe_to_seconds @@ -99,7 +100,7 @@ class IFreqaiModel(ABC): self.get_corr_dataframes: bool = True self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() - self.strategy: Optional[IStrategy] = None + self.data_provider: Optional[DataProvider] = None self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1) record_params(config, self.full_path) @@ -129,7 +130,7 @@ class IFreqaiModel(ABC): self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE) self.dd.set_pair_dict_info(metadata) - self.strategy = strategy + self.data_provider = strategy.dp if self.live: self.inference_timer('start') From 9c6b97c67811589882123ee3d52d040590018545 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 12 Nov 2022 12:01:59 +0100 Subject: [PATCH 121/232] ensure normalization acceleration methods are employed in RL --- docs/freqai-reinforcement-learning.md | 24 +++++++++---------- .../RL/BaseReinforcementLearningModel.py | 18 ++++++++++---- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 8b775e046..7179da626 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -38,8 +38,6 @@ where `ReinforcementLearner` will use the templated `ReinforcementLearner` from self, pair, df, tf, informative=None, set_generalized_indicators=False ): - coin = pair.split('/')[0] - if informative is None: informative = self.dp.get_pair_dataframe(pair, tf) @@ -47,15 +45,15 @@ where `ReinforcementLearner` will use the templated `ReinforcementLearner` from for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: t = int(t) - informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + informative[f"%-{pair}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, window=t) # The following features are necessary for RL models - informative[f"%-{coin}raw_close"] = informative["close"] - informative[f"%-{coin}raw_open"] = informative["open"] - informative[f"%-{coin}raw_high"] = informative["high"] - informative[f"%-{coin}raw_low"] = informative["low"] + informative[f"%-{pair}raw_close"] = informative["close"] + informative[f"%-{pair}raw_open"] = informative["open"] + informative[f"%-{pair}raw_high"] = informative["high"] + informative[f"%-{pair}raw_low"] = informative["low"] indicators = [col for col in informative if col.startswith("%")] # This loop duplicates and shifts all indicators to add a sense of recency to data @@ -88,10 +86,10 @@ Most of the function remains the same as for typical Regressors, however, the fu ```python # The following features are necessary for RL models - informative[f"%-{coin}raw_close"] = informative["close"] - informative[f"%-{coin}raw_open"] = informative["open"] - informative[f"%-{coin}raw_high"] = informative["high"] - informative[f"%-{coin}raw_low"] = informative["low"] + informative[f"%-{pair}raw_close"] = informative["close"] + informative[f"%-{pair}raw_open"] = informative["open"] + informative[f"%-{pair}raw_high"] = informative["high"] + informative[f"%-{pair}raw_low"] = informative["low"] ``` Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the user set the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index f3da91b51..323cfd782 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -253,18 +253,26 @@ class BaseReinforcementLearningModel(IFreqaiModel): Builds the train prices and test prices for the environment. """ - coin = pair.split('/')[0] + pair = pair.replace(':', '') train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] # price data for model training and evaluation tf = self.config['timeframe'] - ohlc_list = [f'%-{coin}raw_open_{tf}', f'%-{coin}raw_low_{tf}', - f'%-{coin}raw_high_{tf}', f'%-{coin}raw_close_{tf}'] - rename_dict = {f'%-{coin}raw_open_{tf}': 'open', f'%-{coin}raw_low_{tf}': 'low', - f'%-{coin}raw_high_{tf}': ' high', f'%-{coin}raw_close_{tf}': 'close'} + ohlc_list = [f'%-{pair}raw_open_{tf}', f'%-{pair}raw_low_{tf}', + f'%-{pair}raw_high_{tf}', f'%-{pair}raw_close_{tf}'] + rename_dict = {f'%-{pair}raw_open_{tf}': 'open', f'%-{pair}raw_low_{tf}': 'low', + f'%-{pair}raw_high_{tf}': ' high', f'%-{pair}raw_close_{tf}': 'close'} prices_train = train_df.filter(ohlc_list, axis=1) + if prices_train.empty: + raise OperationalException('Reinforcement learning module didnt find the raw prices ' + 'assigned in populate_any_indicators. Please assign them ' + 'with:\n' + 'informative[f"%-{pair}raw_close"] = informative["close"]\n' + 'informative[f"%-{pair}raw_open"] = informative["open"]\n' + 'informative[f"%-{pair}raw_high"] = informative["high"]\n' + 'informative[f"%-{pair}raw_low"] = informative["low"]\n') prices_train.rename(columns=rename_dict, inplace=True) prices_train.reset_index(drop=True) From e71a8b8ac11faab3fdfc504ea1b26f76a9f8c203 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 12 Nov 2022 18:46:48 +0100 Subject: [PATCH 122/232] add ability to integrate state info or not, and prevent state info integration during backtesting --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 15 ---------- freqtrade/freqai/RL/BaseEnvironment.py | 28 +++++++++++++------ .../RL/BaseReinforcementLearningModel.py | 20 ++++++++++--- 3 files changed, 35 insertions(+), 28 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 80543bf72..663ecc77e 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -2,9 +2,7 @@ import logging from enum import Enum import numpy as np -import pandas as pd from gym import spaces -from pandas import DataFrame from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions @@ -145,19 +143,6 @@ class Base5ActionRLEnv(BaseEnvironment): return observation, step_reward, self._done, info - def _get_observation(self): - features_window = self.signal_features[( - self._current_tick - self.window_size):self._current_tick] - features_and_state = DataFrame(np.zeros((len(features_window), 3)), - columns=['current_profit_pct', 'position', 'trade_duration'], - index=features_window.index) - - features_and_state['current_profit_pct'] = self.get_unrealized_profit() - features_and_state['position'] = self._position.value - features_and_state['trade_duration'] = self.get_trade_duration() - features_and_state = pd.concat([features_window, features_and_state], axis=1) - return features_and_state - def get_trade_duration(self): if self._last_trade_tick is None: return 0 diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 6474483c6..6633bf3e8 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -35,6 +35,7 @@ class BaseEnvironment(gym.Env): id: str = 'baseenv-1', seed: int = 1, config: dict = {}): self.rl_config = config['freqai']['rl_config'] + self.add_state_info = self.rl_config.get('add_state_info', False) self.id = id self.seed(seed) self.reset_env(df, prices, window_size, reward_kwargs, starting_point) @@ -58,7 +59,11 @@ class BaseEnvironment(gym.Env): self.fee = 0.0015 # # spaces - self.shape = (window_size, self.signal_features.shape[1] + 3) + if self.add_state_info: + self.total_features = self.signal_features.shape[1] + 3 + else: + self.total_features = self.signal_features.shape[1] + self.shape = (window_size, self.total_features) self.set_action_space() self.observation_space = spaces.Box( low=-1, high=1, shape=self.shape, dtype=np.float32) @@ -126,15 +131,20 @@ class BaseEnvironment(gym.Env): """ features_window = self.signal_features[( self._current_tick - self.window_size):self._current_tick] - features_and_state = DataFrame(np.zeros((len(features_window), 3)), - columns=['current_profit_pct', 'position', 'trade_duration'], - index=features_window.index) + if self.add_state_info: + features_and_state = DataFrame(np.zeros((len(features_window), 3)), + columns=['current_profit_pct', + 'position', + 'trade_duration'], + index=features_window.index) - features_and_state['current_profit_pct'] = self.get_unrealized_profit() - features_and_state['position'] = self._position.value - features_and_state['trade_duration'] = self.get_trade_duration() - features_and_state = pd.concat([features_window, features_and_state], axis=1) - return features_and_state + features_and_state['current_profit_pct'] = self.get_unrealized_profit() + features_and_state['position'] = self._position.value + features_and_state['trade_duration'] = self.get_trade_duration() + features_and_state = pd.concat([features_window, features_and_state], axis=1) + return features_and_state + else: + return features_window def get_trade_duration(self): """ diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 323cfd782..885918ffb 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -234,11 +234,12 @@ class BaseReinforcementLearningModel(IFreqaiModel): output = pd.DataFrame(np.zeros(len(dataframe)), columns=dk.label_list) def _predict(window): - market_side, current_profit, trade_duration = self.get_state_info(dk.pair) observations = dataframe.iloc[window.index] - observations['current_profit_pct'] = current_profit - observations['position'] = market_side - observations['trade_duration'] = trade_duration + if self.live: # self.guard_state_info_if_backtest(): + market_side, current_profit, trade_duration = self.get_state_info(dk.pair) + observations['current_profit_pct'] = current_profit + observations['position'] = market_side + observations['trade_duration'] = trade_duration res, _ = model.predict(observations, deterministic=True) return res @@ -246,6 +247,17 @@ class BaseReinforcementLearningModel(IFreqaiModel): return output + # def guard_state_info_if_backtest(self): + # """ + # Ensure that backtesting mode doesnt try to use state information. + # """ + # if self.rl_config('add_state_info', False) and not self.live: + # logger.warning('Backtesting with state info is currently unavailable ' + # 'turning it off.') + # self.rl_config['add_state_info'] = False + + # return not self.rl_config['add_state_info'] + def build_ohlc_price_dataframes(self, data_dictionary: dict, pair: str, dk: FreqaiDataKitchen) -> Tuple[DataFrame, DataFrame]: From 259f87bd40bc8a82e214174b2e57326aa900aca9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 12 Nov 2022 19:01:40 +0100 Subject: [PATCH 123/232] fix rl test; --- tests/strategy/strats/freqai_rl_test_strat.py | 29 +++++-------------- 1 file changed, 7 insertions(+), 22 deletions(-) diff --git a/tests/strategy/strats/freqai_rl_test_strat.py b/tests/strategy/strats/freqai_rl_test_strat.py index 7b36dc6be..8d507a6da 100644 --- a/tests/strategy/strats/freqai_rl_test_strat.py +++ b/tests/strategy/strats/freqai_rl_test_strat.py @@ -38,25 +38,10 @@ class freqai_rl_test_strat(IStrategy): startup_candle_count: int = 30 can_short = False - def informative_pairs(self): - whitelist_pairs = self.dp.current_whitelist() - corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] - informative_pairs = [] - for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: - for pair in whitelist_pairs: - informative_pairs.append((pair, tf)) - for pair in corr_pairs: - if pair in whitelist_pairs: - continue # avoid duplication - informative_pairs.append((pair, tf)) - return informative_pairs - def populate_any_indicators( self, pair, df, tf, informative=None, set_generalized_indicators=False ): - coin = pair.split('/')[0] - if informative is None: informative = self.dp.get_pair_dataframe(pair, tf) @@ -64,16 +49,16 @@ class freqai_rl_test_strat(IStrategy): for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]: t = int(t) - informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + informative[f"%-{pair}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) + informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) + informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, window=t) # FIXME: add these outside the user strategy? # The following columns are necessary for RL models. - informative[f"%-{coin}raw_close"] = informative["close"] - informative[f"%-{coin}raw_open"] = informative["open"] - informative[f"%-{coin}raw_high"] = informative["high"] - informative[f"%-{coin}raw_low"] = informative["low"] + informative[f"%-{pair}raw_close"] = informative["close"] + informative[f"%-{pair}raw_open"] = informative["open"] + informative[f"%-{pair}raw_high"] = informative["high"] + informative[f"%-{pair}raw_low"] = informative["low"] indicators = [col for col in informative if col.startswith("%")] # This loop duplicates and shifts all indicators to add a sense of recency to data From 81f800a79bf07b0778744faa9574198faf92ec79 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 13:41:17 +0100 Subject: [PATCH 124/232] switch to using FT calc_profi_pct, reverse entry/exit fees --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 1 + freqtrade/freqai/RL/Base5ActionRLEnv.py | 2 -- freqtrade/freqai/RL/BaseEnvironment.py | 15 +++++++-------- .../freqai/RL/BaseReinforcementLearningModel.py | 13 ++++--------- 4 files changed, 12 insertions(+), 19 deletions(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index b4fe78b71..0c719ea92 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -74,6 +74,7 @@ class Base4ActionRLEnv(BaseEnvironment): self._last_trade_tick = self._current_tick elif action == Actions.Exit.value: self._position = Positions.Neutral + self._update_total_profit() trade_type = "neutral" self._last_trade_tick = None else: diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 663ecc77e..b6ebcf703 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -75,8 +75,6 @@ class Base5ActionRLEnv(BaseEnvironment): if self._current_tick == self._end_tick: self._done = True - self.update_portfolio_log_returns(action) - self._update_unrealized_total_profit() step_reward = self.calculate_reward(action) self.total_reward += step_reward diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 6633bf3e8..3b56fc2c4 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -165,12 +165,12 @@ class BaseEnvironment(gym.Env): if self._position == Positions.Neutral: return 0. elif self._position == Positions.Short: - current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price) / last_trade_price - elif self._position == Positions.Long: current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) return (current_price - last_trade_price) / last_trade_price else: return 0. @@ -210,9 +210,8 @@ class BaseEnvironment(gym.Env): """ An example reward function. This is the one function that users will likely wish to inject their own creativity into. - :params: - action: int = The action made by the agent for the current candle. - :returns: + :param action: int = The action made by the agent for the current candle. + :return: float = the reward to give to the agent for current step (used for optimization of weights in NN) """ @@ -234,7 +233,7 @@ class BaseEnvironment(gym.Env): def _update_total_profit(self): pnl = self.get_unrealized_profit() if self.compound_trades: - # assumes unite stake and compounding + # assumes unitestake and compounding self._total_profit = self._total_profit * (1 + pnl) else: # assumes unit stake and no compounding diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 885918ffb..85756ad8f 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -180,17 +180,12 @@ class BaseReinforcementLearningModel(IFreqaiModel): if self.data_provider._exchange is None: # type: ignore logger.error('No exchange available.') else: - current_value = self.data_provider._exchange.get_rate( # type: ignore + current_rate = self.data_provider._exchange.get_rate( # type: ignore pair, refresh=False, side="exit", is_short=trade.is_short) - openrate = trade.open_rate + now = datetime.now(timezone.utc).timestamp() - trade_duration = int((now - trade.open_date.timestamp()) / self.base_tf_seconds) - if 'long' in str(trade.enter_tag): - market_side = 1 - current_profit = (current_value - openrate) / openrate - else: - market_side = 0 - current_profit = (openrate - current_value) / openrate + trade_duration = int((now - trade.open_date_utc) / self.base_tf_seconds) + current_profit = trade.calc_profit_ratio(current_rate) return market_side, current_profit, int(trade_duration) From af9e4005626c519015f5edd37c6101e0b22305f7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 15:31:37 +0100 Subject: [PATCH 125/232] add test coverage, fix bug in base environment. Ensure proper fee is used. --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 1 - freqtrade/freqai/RL/BaseEnvironment.py | 34 +++++++++++-- .../RL/BaseReinforcementLearningModel.py | 37 +++++++------- tests/freqai/test_freqai_interface.py | 49 +++++++++++++++++-- 4 files changed, 92 insertions(+), 29 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index b6ebcf703..0d101ee9c 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -148,7 +148,6 @@ class Base5ActionRLEnv(BaseEnvironment): return self._current_tick - self._last_trade_tick def is_tradesignal(self, action: int): - # trade signal """ Determine if the signal is a trade signal e.g.: agent wants a Actions.Long_exit while it is in a Positions.short diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 3b56fc2c4..bb8cd992c 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -10,6 +10,8 @@ from gym import spaces from gym.utils import seeding from pandas import DataFrame +from freqtrade.data.dataprovider import DataProvider + logger = logging.getLogger(__name__) @@ -32,8 +34,21 @@ class BaseEnvironment(gym.Env): def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1, config: dict = {}): - + id: str = 'baseenv-1', seed: int = 1, config: dict = {}, + dp: Optional[DataProvider] = None): + """ + Initializes the training/eval environment. + :param df: dataframe of features + :param prices: dataframe of prices to be used in the training environment + :param window_size: size of window (temporal) to pass to the agent + :param reward_kwargs: extra config settings assigned by user in `rl_config` + :param starting_point: start at edge of window or not + :param id: string id of the environment (used in backend for multiprocessed env) + :param seed: Sets the seed of the environment higher in the gym.Env object + :param config: Typical user configuration file + :param dp: dataprovider from freqtrade + """ + self.config = config self.rl_config = config['freqai']['rl_config'] self.add_state_info = self.rl_config.get('add_state_info', False) self.id = id @@ -41,12 +56,23 @@ class BaseEnvironment(gym.Env): self.reset_env(df, prices, window_size, reward_kwargs, starting_point) self.max_drawdown = 1 - self.rl_config.get('max_training_drawdown_pct', 0.8) self.compound_trades = config['stake_amount'] == 'unlimited' + if self.config.get('fee', None) is not None: + self.fee = self.config['fee'] + elif dp is not None: + self.fee = self.dp.exchange.get_fee(symbol=dp.current_whitelist()[0]) + else: + self.fee = 0.0015 def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, reward_kwargs: dict, starting_point=True): """ Resets the environment when the agent fails (in our case, if the drawdown exceeds the user set max_training_drawdown_pct) + :param df: dataframe of features + :param prices: dataframe of prices to be used in the training environment + :param window_size: size of window (temporal) to pass to the agent + :param reward_kwargs: extra config settings assigned by user in `rl_config` + :param starting_point: start at edge of window or not """ self.df = df self.signal_features = self.df @@ -56,8 +82,6 @@ class BaseEnvironment(gym.Env): self.rr = reward_kwargs["rr"] self.profit_aim = reward_kwargs["profit_aim"] - self.fee = 0.0015 - # # spaces if self.add_state_info: self.total_features = self.signal_features.shape[1] + 3 @@ -233,7 +257,7 @@ class BaseEnvironment(gym.Env): def _update_total_profit(self): pnl = self.get_unrealized_profit() if self.compound_trades: - # assumes unitestake and compounding + # assumes unit stake and compounding self._total_profit = self._total_profit * (1 + pnl) else: # assumes unit stake and no compounding diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 85756ad8f..a8c79ce6e 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -74,10 +74,10 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.ft_params.update({'use_SVM_to_remove_outliers': False}) logger.warning('User tried to use SVM with RL. Deactivating SVM.') if self.ft_params.get('use_DBSCAN_to_remove_outliers', False): - self.ft_params.update({'use_SVM_to_remove_outliers': False}) + self.ft_params.update({'use_DBSCAN_to_remove_outliers': False}) logger.warning('User tried to use DBSCAN with RL. Deactivating DBSCAN.') if self.freqai_info['data_split_parameters'].get('shuffle', False): - self.freqai_info['data_split_parameters'].update('shuffle', False) + self.freqai_info['data_split_parameters'].update({'shuffle': False}) logger.warning('User tried to shuffle training data. Setting shuffle to False') def train( @@ -141,11 +141,18 @@ class BaseReinforcementLearningModel(IFreqaiModel): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config) - self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, config=self.config)) + self.train_env = self.MyRLEnv(df=train_df, + prices=prices_train, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, + config=self.config, + dp=self.data_provider) + self.eval_env = Monitor(self.MyRLEnv(df=test_df, + prices=prices_test, + window_size=self.CONV_WIDTH, + reward_kwargs=self.reward_params, + config=self.config, + dp=self.data_provider)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) @@ -179,12 +186,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): if trade.pair == pair: if self.data_provider._exchange is None: # type: ignore logger.error('No exchange available.') + return 0, 0, 0 else: current_rate = self.data_provider._exchange.get_rate( # type: ignore pair, refresh=False, side="exit", is_short=trade.is_short) now = datetime.now(timezone.utc).timestamp() - trade_duration = int((now - trade.open_date_utc) / self.base_tf_seconds) + trade_duration = int((now - trade.open_date_utc.timestamp()) / self.base_tf_seconds) current_profit = trade.calc_profit_ratio(current_rate) return market_side, current_profit, int(trade_duration) @@ -230,7 +238,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def _predict(window): observations = dataframe.iloc[window.index] - if self.live: # self.guard_state_info_if_backtest(): + if self.live and self.rl_config('add_state_info', False): market_side, current_profit, trade_duration = self.get_state_info(dk.pair) observations['current_profit_pct'] = current_profit observations['position'] = market_side @@ -242,17 +250,6 @@ class BaseReinforcementLearningModel(IFreqaiModel): return output - # def guard_state_info_if_backtest(self): - # """ - # Ensure that backtesting mode doesnt try to use state information. - # """ - # if self.rl_config('add_state_info', False) and not self.live: - # logger.warning('Backtesting with state info is currently unavailable ' - # 'turning it off.') - # self.rl_config['add_state_info'] = False - - # return not self.rl_config['add_state_info'] - def build_ohlc_price_dataframes(self, data_dictionary: dict, pair: str, dk: FreqaiDataKitchen) -> Tuple[DataFrame, DataFrame]: diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 6ed9dac3d..08f33add9 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -13,7 +13,7 @@ from freqtrade.freqai.utils import download_all_data_for_training, get_required_ from freqtrade.optimize.backtesting import Backtesting from freqtrade.persistence import Trade from freqtrade.plugins.pairlistmanager import PairListManager -from tests.conftest import get_patched_exchange, log_has_re +from tests.conftest import create_mock_trades, get_patched_exchange, log_has_re from tests.freqai.conftest import get_patched_freqai_strategy, make_rl_config @@ -32,7 +32,7 @@ def is_mac() -> bool: ('XGBoostRegressor', False, True), ('XGBoostRFRegressor', False, False), ('CatboostRegressor', False, False), - ('ReinforcementLearner', False, False), + ('ReinforcementLearner', False, True), ('ReinforcementLearner_multiproc', False, False), ('ReinforcementLearner_test_4ac', False, False) ]) @@ -40,7 +40,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, if is_arm() and model == 'CatboostRegressor': pytest.skip("CatBoost is not supported on ARM") - if is_mac(): + if is_mac() and 'Reinforcement' in model: pytest.skip("Reinforcement learning module not available on intel based Mac OS") model_save_ext = 'joblib' @@ -53,6 +53,9 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, if 'ReinforcementLearner' in model: model_save_ext = 'zip' freqai_conf = make_rl_config(freqai_conf) + # test the RL guardrails + freqai_conf['freqai']['feature_parameters'].update({"use_SVM_to_remove_outliers": True}) + freqai_conf['freqai']['data_split_parameters'].update({'shuffle': True}) if 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") @@ -497,3 +500,43 @@ def test_download_all_data_for_training(mocker, freqai_conf, caplog, tmpdir): "Downloading", caplog, ) + + +@pytest.mark.usefixtures("init_persistence") +@pytest.mark.parametrize('dp_exists', [(False), (True)]) +def test_get_state_info(mocker, freqai_conf, dp_exists, caplog, tickers): + + if is_mac(): + pytest.skip("Reinforcement learning module not available on intel based Mac OS") + + freqai_conf.update({"freqaimodel": "ReinforcementLearner"}) + freqai_conf.update({"timerange": "20180110-20180130"}) + freqai_conf.update({"strategy": "freqai_rl_test_strat"}) + freqai_conf = make_rl_config(freqai_conf) + freqai_conf['entry_pricing']['price_side'] = 'same' + freqai_conf['exit_pricing']['price_side'] = 'same' + + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + ticker_mock = MagicMock(return_value=tickers()['ETH/BTC']) + mocker.patch("freqtrade.exchange.Exchange.fetch_ticker", ticker_mock) + strategy.dp = DataProvider(freqai_conf, exchange) + + if not dp_exists: + strategy.dp._exchange = None + + strategy.freqai_info = freqai_conf.get("freqai", {}) + freqai = strategy.freqai + freqai.data_provider = strategy.dp + freqai.live = True + + Trade.use_db = True + create_mock_trades(MagicMock(return_value=0.0025), False, True) + freqai.get_state_info("ADA/BTC") + freqai.get_state_info("ETH/BTC") + + if not dp_exists: + assert log_has_re( + "No exchange available", + caplog, + ) From 3c249ba9940d9ea4b842ae6f31106b729bf43f67 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 16:11:14 +0100 Subject: [PATCH 126/232] add doc for data_kitchen_thread_count` --- docs/freqai-parameter-table.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 7d00acde8..df3dd5b53 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -19,6 +19,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models.
**Datatype:** Boolean.
Default: `False`. | `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)).
**Datatype:** Boolean.
Default: `False`. | `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file.
**Datatype:** Boolean.
Default: `False` +| `data_kitchen_thread_count` |
Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI)
**Datatype:** Positive integer. | | **Feature parameters** | `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](freqai-feature-engineering.md).
**Datatype:** Dictionary. | `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base indicators dataset.
**Datatype:** List of timeframes (strings). From 388ca2120030bfe375b91826fd11f55dc406b443 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 16:56:31 +0100 Subject: [PATCH 127/232] update docs, fix bug in environment --- docs/freqai-parameter-table.md | 1 + docs/freqai-reinforcement-learning.md | 17 ++++++++++++----- freqtrade/constants.py | 19 +++++++++++++++++++ freqtrade/freqai/RL/BaseEnvironment.py | 2 +- 4 files changed, 33 insertions(+), 6 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index df3dd5b53..925609270 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -58,6 +58,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `max_training_drawdown_pct` | The maximum drawdown that the agent is allowed to experience during training.
**Datatype:** float.
Default: 0.8 | `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not).
**Datatype:** int. | `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. +| `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. | | **Extraneous parameters** | `keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards.
**Datatype:** Boolean.
Default: `False`. | `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction.
**Datatype:** Integer.
Default: `2`. diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 7179da626..693918051 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -16,7 +16,10 @@ Reinforcement learning is a natural progression for FreqAI, since it adds a new ### The RL interface -With the current framework, we aim to expose the training environment to the user via the common "prediction model" file (i.e. CatboostClassifier, LightGBMRegressor, etc.). Users inherit our base environment in this file, which allows them to override as much or as little of the environment as they wish. +With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv`: + + + We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configruation settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. @@ -49,7 +52,7 @@ where `ReinforcementLearner` will use the templated `ReinforcementLearner` from informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, window=t) - # The following features are necessary for RL models + # The following raw price values are necessary for RL models informative[f"%-{pair}raw_close"] = informative["close"] informative[f"%-{pair}raw_open"] = informative["open"] informative[f"%-{pair}raw_high"] = informative["high"] @@ -131,11 +134,12 @@ It is important to consider that `&-action` depends on which environment they ch ## Configuring the Reinforcement Learner -In order to configure the `Reinforcement Learner` the following dictionary to their `freqai` config: +In order to configure the `Reinforcement Learner` the following dictionary must exist in the `freqai` config: ```json "rl_config": { "train_cycles": 25, + "add_state_info": true, "max_trade_duration_candles": 300, "max_training_drawdown_pct": 0.02, "cpu_count": 8, @@ -148,11 +152,14 @@ In order to configure the `Reinforcement Learner` the following dictionary to th } ``` -Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environemtn to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link). +Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environment to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link). + +!!! Note + Remember that the general `model_training_parameters` dictionary should contain all the model hyperparameter customizations for the particular `model_type`. For example, `PPO` parameters can be found [here](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html). ## Creating the reward -As users begin to modify the strategy and the prediction model, they will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, the user sets a `calculate_reward()` function inside their custom `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to give users the necessary building blocks to start their own models. It is inside the `calculate_reward()` where users express their creative theories about the market. For example, the user wants to reward their agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: +As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: ```python class MyRLEnv(Base5ActionRLEnv): diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 428f16586..e947b49e0 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -578,6 +578,25 @@ CONF_SCHEMA = { "model_training_parameters": { "type": "object" }, + "rl_config": { + "type": "object", + "properties": { + "train_cycles": {"type": "integer"}, + "max_trade_duration_candles": {"type": "integer"}, + "add_state_info": {"type": "boolean", "default": False}, + "max_training_drawdown_pct": {"type": "number", "default": 0.02}, + "cpu_count": {"type": "integer", "default": 1}, + "model_type": {"type": "string", "default": "PPO"}, + "policy_type": {"type": "string", "default": "MlpPolicy"}, + "model_reward_parameters": { + "type": "object", + "properties": { + "rr": {"type": "number", "default": 1}, + "profit_aim": {"type": "number", "default": 0.025} + } + } + }, + }, }, "required": [ "enabled", diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index bb8cd992c..6853377cb 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -59,7 +59,7 @@ class BaseEnvironment(gym.Env): if self.config.get('fee', None) is not None: self.fee = self.config['fee'] elif dp is not None: - self.fee = self.dp.exchange.get_fee(symbol=dp.current_whitelist()[0]) + self.fee = dp._exchange.get_fee(symbol=dp.current_whitelist()[0]) # type: ignore else: self.fee = 0.0015 From f8f553ec14b3793a5ae09d3b907eebeb4b253e5c Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 16:58:36 +0100 Subject: [PATCH 128/232] remove references to "the user" --- docs/freqai-reinforcement-learning.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 693918051..9cd4f7ca3 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -95,7 +95,7 @@ Most of the function remains the same as for typical Regressors, however, the fu informative[f"%-{pair}raw_low"] = informative["low"] ``` -Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the user set the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. +Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: @@ -159,7 +159,7 @@ Parameter details can be found [here](freqai-parameter-table.md), but in general ## Creating the reward -As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: +As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: ```python class MyRLEnv(Base5ActionRLEnv): @@ -214,6 +214,6 @@ cd freqtrade tensorboard --logdir user_data/models/unique-id ``` -where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if the user wishes to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). +where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). ![tensorboard](assets/tensorboard.jpg) From 90f168d1ff28f50ee8299e2439e7e28a2152bbc2 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 17:06:06 +0100 Subject: [PATCH 129/232] remove more user references. cleanup dataprovider --- docs/freqai-parameter-table.md | 4 ++-- docs/freqai-reinforcement-learning.md | 7 ++----- freqtrade/freqai/freqai_interface.py | 1 + 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 925609270..4009a280d 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -52,12 +52,12 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. | `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. | `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process.
**Datatype:** int. -| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. +| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the customizable `calculate_reward()` function.
**Datatype:** int. | `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. | `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. | `max_training_drawdown_pct` | The maximum drawdown that the agent is allowed to experience during training.
**Datatype:** float.
Default: 0.8 | `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not).
**Datatype:** int. -| `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. +| `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. | | **Extraneous parameters** | `keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards.
**Datatype:** Boolean.
Default: `False`. diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 9cd4f7ca3..77cc38cba 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -10,16 +10,13 @@ Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-the-reward)). The reward is used to train weights in a neural network. -A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live. *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployements.* +A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployements.* Reinforcement learning is a natural progression for FreqAI, since it adds a new layer of adaptivity and market reactivity that Classifiers and Regressors cannot match. However, Classifiers and Regressors have strengths that RL does not have such as robust predictions. Improperly trained RL agents may find "cheats" and "tricks" to maximize reward without actually winning any trades. For this reason, RL is more complex and demands a higher level of understanding than typical Classifiers and Regressors. ### The RL interface -With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv`: - - - +With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-the-reward). We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configruation settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 406d37dc3..2e455a347 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -168,6 +168,7 @@ class IFreqaiModel(ABC): """ self.model = None self.dk = None + self.data_provider = None def _on_stop(self): """ From b421521be34c1bbef5e9203eebfcd9ba20aaef28 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 17:12:17 +0100 Subject: [PATCH 130/232] help default ReinforcementLearner users by assigning the model_type automatically --- freqtrade/freqai/data_drawer.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index d6ad3047d..d41675f89 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -99,10 +99,9 @@ class FreqaiDataDrawer: "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False) - if 'rl_config' in self.freqai_info: - self.model_type = 'stable_baselines' - logger.warning('User indicated rl_config, FreqAI will now use stable_baselines3' - ' to save models.') + if 'Reinforcement' in self.config['freqaimodel']: + logger.warning('User passed a ReinforcementLearner model, FreqAI will ' + 'now use stable_baselines3 to save models.') else: self.model_type = self.freqai_info.get('model_save_type', 'joblib') From 96fafb7f5690c6a07bdeef6959a8e92c2ddebef0 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 17:14:47 +0100 Subject: [PATCH 131/232] remove limit_ram_use --- freqtrade/freqai/data_drawer.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index d41675f89..590439697 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -98,7 +98,6 @@ class FreqaiDataDrawer: self.empty_pair_dict: pair_info = { "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} - self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False) if 'Reinforcement' in self.config['freqaimodel']: logger.warning('User passed a ReinforcementLearner model, FreqAI will ' 'now use stable_baselines3 to save models.') @@ -514,10 +513,10 @@ class FreqaiDataDrawer: dk.pca, open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "wb") ) - if not self.limit_ram_use: - self.model_dictionary[coin] = model + self.model_dictionary[coin] = model self.pair_dict[coin]["model_filename"] = dk.model_filename self.pair_dict[coin]["data_path"] = str(dk.data_path) + if coin not in self.meta_data_dictionary: self.meta_data_dictionary[coin] = {} self.meta_data_dictionary[coin]["train_df"] = dk.data_dictionary["train_features"] @@ -565,7 +564,7 @@ class FreqaiDataDrawer: dk.label_list = dk.data["label_list"] # try to access model in memory instead of loading object from disk to save time - if dk.live and coin in self.model_dictionary and not self.limit_ram_use: + if dk.live and coin in self.model_dictionary: model = self.model_dictionary[coin] elif self.model_type == 'joblib': model = load(dk.data_path / f"{dk.model_filename}_model.joblib") @@ -587,7 +586,7 @@ class FreqaiDataDrawer: ) # load it into ram if it was loaded from disk - if coin not in self.model_dictionary and not self.limit_ram_use: + if coin not in self.model_dictionary: self.model_dictionary[coin] = model if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]: From c76afc255a838a9d7603b7bdef97ded54e3d3bf9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 17:26:11 +0100 Subject: [PATCH 132/232] explain how to choose environments, and how to customize them --- docs/freqai-reinforcement-learning.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 77cc38cba..e7c3576fc 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -214,3 +214,13 @@ tensorboard --logdir user_data/models/unique-id where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). ![tensorboard](assets/tensorboard.jpg) + + +### Choosing a base environment + +FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 4 or 5 actions. In the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Meanwhile, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include: + +* the actions available in the `calculate_reward` +* the actions consumed by the user strategy + +Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-the-reward)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. From c8d3e5771235598b51cfe969cdf67866cbe01612 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 17:30:56 +0100 Subject: [PATCH 133/232] add note that these environments are designed for short-long bots only. --- docs/freqai-reinforcement-learning.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index e7c3576fc..c4e70130b 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -224,3 +224,6 @@ FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5Action * the actions consumed by the user strategy Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-the-reward)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. + +!!! Note + FreqAI does not provide by default, a long-only training environment. However, creating one should be as simple as copy-pasting one of the built in environments and removing the `short` actions (and all associated references to those). From 6394ef45589d4724134c3e1f47f150dcc7a38ce4 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 17:43:52 +0100 Subject: [PATCH 134/232] fix docstrings --- .../RL/BaseReinforcementLearningModel.py | 19 ++++++++----------- .../prediction_models/ReinforcementLearner.py | 14 ++++++-------- .../ReinforcementLearner_multiproc.py | 7 +++---- 3 files changed, 17 insertions(+), 23 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a8c79ce6e..d0ddce294 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -130,13 +130,12 @@ class BaseReinforcementLearningModel(IFreqaiModel): dk: FreqaiDataKitchen): """ User can override this if they are using a custom MyRLEnv - :params: - data_dictionary: dict = common data dictionary containing train and test + :param data_dictionary: dict = common data dictionary containing train and test features/labels/weights. - prices_train/test: DataFrame = dataframe comprised of the prices to be used in the + :param prices_train/test: DataFrame = dataframe comprised of the prices to be used in the environment during training or testing - dk: FreqaiDataKitchen = the datakitchen for the current pair + :param dk: FreqaiDataKitchen = the datakitchen for the current pair """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] @@ -229,10 +228,9 @@ class BaseReinforcementLearningModel(IFreqaiModel): dk: FreqaiDataKitchen, model: Any) -> DataFrame: """ A helper function to make predictions in the Reinforcement learning module. - :params: - dataframe: DataFrame = the dataframe of features to make the predictions on - dk: FreqaiDatakitchen = data kitchen for the current pair - model: Any = the trained model used to inference the features. + :param dataframe: DataFrame = the dataframe of features to make the predictions on + :param dk: FreqaiDatakitchen = data kitchen for the current pair + :param model: Any = the trained model used to inference the features. """ output = pd.DataFrame(np.zeros(len(dataframe)), columns=dk.label_list) @@ -322,9 +320,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): """ An example reward function. This is the one function that users will likely wish to inject their own creativity into. - :params: - action: int = The action made by the agent for the current candle. - :returns: + :param action: int = The action made by the agent for the current candle. + :return: float = the reward to give to the agent for current step (used for optimization of weights in NN) """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 4bf990172..063af5ff5 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -20,12 +20,11 @@ class ReinforcementLearner(BaseReinforcementLearningModel): def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): """ User customizable fit method - :params: - data_dictionary: dict = common data dictionary containing all train/test + :param data_dictionary: dict = common data dictionary containing all train/test features/labels/weights. - dk: FreqaiDatakitchen = data kitchen for current pair. - :returns: - model: Any = trained model to be used for inference in dry/live/backtesting + :param dk: FreqaiDatakitchen = data kitchen for current pair. + :return: + model Any = trained model to be used for inference in dry/live/backtesting """ train_df = data_dictionary["train_features"] total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) @@ -69,9 +68,8 @@ class ReinforcementLearner(BaseReinforcementLearningModel): """ An example reward function. This is the one function that users will likely wish to inject their own creativity into. - :params: - action: int = The action made by the agent for the current candle. - :returns: + :param action: int = The action made by the agent for the current candle. + :return: float = the reward to give to the agent for current step (used for optimization of weights in NN) """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 41345b967..baba16066 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -61,13 +61,12 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): dk: FreqaiDataKitchen): """ User can override this if they are using a custom MyRLEnv - :params: - data_dictionary: dict = common data dictionary containing train and test + :param data_dictionary: dict = common data dictionary containing train and test features/labels/weights. - prices_train/test: DataFrame = dataframe comprised of the prices to be used in + :param prices_train/test: DataFrame = dataframe comprised of the prices to be used in the environment during training or testing - dk: FreqaiDataKitchen = the datakitchen for the current pair + :param dk: FreqaiDataKitchen = the datakitchen for the current pair """ train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] From bf4d5b432a19a090c1e09eb499c92d56258a7004 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 13 Nov 2022 18:50:25 +0100 Subject: [PATCH 135/232] ensure model_type is defined --- freqtrade/freqai/data_drawer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 590439697..96b481074 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -99,6 +99,7 @@ class FreqaiDataDrawer: "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} if 'Reinforcement' in self.config['freqaimodel']: + self.model_type = 'stable_baselines' logger.warning('User passed a ReinforcementLearner model, FreqAI will ' 'now use stable_baselines3 to save models.') else: From 659c8c237f7a7e30ad0929fed448c449a01fb2bf Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Mon, 14 Nov 2022 20:27:45 -0700 Subject: [PATCH 136/232] initial revision --- freqtrade/rpc/api_server/api_ws.py | 170 +++++--- freqtrade/rpc/api_server/deps.py | 4 +- freqtrade/rpc/api_server/webserver.py | 162 +++++--- freqtrade/rpc/api_server/ws/__init__.py | 3 +- freqtrade/rpc/api_server/ws/channel.py | 365 ++++++++++++------ freqtrade/rpc/api_server/ws/message_stream.py | 23 ++ freqtrade/rpc/api_server/ws/serializer.py | 8 +- 7 files changed, 494 insertions(+), 241 deletions(-) create mode 100644 freqtrade/rpc/api_server/ws/message_stream.py diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 785773b39..a9b88aadb 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -1,16 +1,17 @@ +import asyncio import logging from typing import Any, Dict -from fastapi import APIRouter, Depends, WebSocketDisconnect -from fastapi.websockets import WebSocket, WebSocketState +from fastapi import APIRouter, Depends +from fastapi.websockets import WebSocket, WebSocketDisconnect from pydantic import ValidationError -from websockets.exceptions import WebSocketException +from websockets.exceptions import ConnectionClosed from freqtrade.enums import RPCMessageType, RPCRequestType from freqtrade.rpc.api_server.api_auth import validate_ws_token -from freqtrade.rpc.api_server.deps import get_channel_manager, get_rpc +from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc from freqtrade.rpc.api_server.ws import WebSocketChannel -from freqtrade.rpc.api_server.ws.channel import ChannelManager +from freqtrade.rpc.api_server.ws.message_stream import MessageStream from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSMessageSchema, WSRequestSchema, WSWhitelistMessage) from freqtrade.rpc.rpc import RPC @@ -22,23 +23,63 @@ logger = logging.getLogger(__name__) router = APIRouter() -async def is_websocket_alive(ws: WebSocket) -> bool: +# async def is_websocket_alive(ws: WebSocket) -> bool: +# """ +# Check if a FastAPI Websocket is still open +# """ +# if ( +# ws.application_state == WebSocketState.CONNECTED and +# ws.client_state == WebSocketState.CONNECTED +# ): +# return True +# return False + + +class WebSocketChannelClosed(Exception): """ - Check if a FastAPI Websocket is still open + General WebSocket exception to signal closing the channel """ - if ( - ws.application_state == WebSocketState.CONNECTED and - ws.client_state == WebSocketState.CONNECTED + pass + + +async def channel_reader(channel: WebSocketChannel, rpc: RPC): + """ + Iterate over the messages from the channel and process the request + """ + try: + async for message in channel: + await _process_consumer_request(message, channel, rpc) + except ( + RuntimeError, + WebSocketDisconnect, + ConnectionClosed ): - return True - return False + raise WebSocketChannelClosed + except asyncio.CancelledError: + return + + +async def channel_broadcaster(channel: WebSocketChannel, message_stream: MessageStream): + """ + Iterate over messages in the message stream and send them + """ + try: + async for message in message_stream: + await channel.send(message) + except ( + RuntimeError, + WebSocketDisconnect, + ConnectionClosed + ): + raise WebSocketChannelClosed + except asyncio.CancelledError: + return async def _process_consumer_request( request: Dict[str, Any], channel: WebSocketChannel, - rpc: RPC, - channel_manager: ChannelManager + rpc: RPC ): """ Validate and handle a request from a websocket consumer @@ -75,7 +116,7 @@ async def _process_consumer_request( # Format response response = WSWhitelistMessage(data=whitelist) # Send it back - await channel_manager.send_direct(channel, response.dict(exclude_none=True)) + await channel.send(response.dict(exclude_none=True)) elif type == RPCRequestType.ANALYZED_DF: limit = None @@ -86,53 +127,76 @@ async def _process_consumer_request( # For every pair in the generator, send a separate message for message in rpc._ws_request_analyzed_df(limit): + # Format response response = WSAnalyzedDFMessage(data=message) - await channel_manager.send_direct(channel, response.dict(exclude_none=True)) + await channel.send(response.dict(exclude_none=True)) @router.websocket("/message/ws") async def message_endpoint( - ws: WebSocket, + websocket: WebSocket, + token: str = Depends(validate_ws_token), rpc: RPC = Depends(get_rpc), - channel_manager=Depends(get_channel_manager), - token: str = Depends(validate_ws_token) + message_stream: MessageStream = Depends(get_message_stream) ): - """ - Message WebSocket endpoint, facilitates sending RPC messages - """ - try: - channel = await channel_manager.on_connect(ws) - if await is_websocket_alive(ws): + async with WebSocketChannel(websocket).connect() as channel: + try: + logger.info(f"Channel connected - {channel}") - logger.info(f"Consumer connected - {channel}") + channel_tasks = asyncio.gather( + channel_reader(channel, rpc), + channel_broadcaster(channel, message_stream) + ) + await channel_tasks - # Keep connection open until explicitly closed, and process requests - try: - while not channel.is_closed(): - request = await channel.recv() + finally: + logger.info(f"Channel disconnected - {channel}") + channel_tasks.cancel() - # Process the request here - await _process_consumer_request(request, channel, rpc, channel_manager) - except (WebSocketDisconnect, WebSocketException): - # Handle client disconnects - logger.info(f"Consumer disconnected - {channel}") - except RuntimeError: - # Handle cases like - - # RuntimeError('Cannot call "send" once a closed message has been sent') - pass - except Exception as e: - logger.info(f"Consumer connection failed - {channel}: {e}") - logger.debug(e, exc_info=e) +# @router.websocket("/message/ws") +# async def message_endpoint( +# ws: WebSocket, +# rpc: RPC = Depends(get_rpc), +# channel_manager=Depends(get_channel_manager), +# token: str = Depends(validate_ws_token) +# ): +# """ +# Message WebSocket endpoint, facilitates sending RPC messages +# """ +# try: +# channel = await channel_manager.on_connect(ws) +# if await is_websocket_alive(ws): - except RuntimeError: - # WebSocket was closed - # Do nothing - pass - except Exception as e: - logger.error(f"Failed to serve - {ws.client}") - # Log tracebacks to keep track of what errors are happening - logger.exception(e) - finally: - if channel: - await channel_manager.on_disconnect(ws) +# logger.info(f"Consumer connected - {channel}") + +# # Keep connection open until explicitly closed, and process requests +# try: +# while not channel.is_closed(): +# request = await channel.recv() + +# # Process the request here +# await _process_consumer_request(request, channel, rpc, channel_manager) + +# except (WebSocketDisconnect, WebSocketException): +# # Handle client disconnects +# logger.info(f"Consumer disconnected - {channel}") +# except RuntimeError: +# # Handle cases like - +# # RuntimeError('Cannot call "send" once a closed message has been sent') +# pass +# except Exception as e: +# logger.info(f"Consumer connection failed - {channel}: {e}") +# logger.debug(e, exc_info=e) + +# except RuntimeError: +# # WebSocket was closed +# # Do nothing +# pass +# except Exception as e: +# logger.error(f"Failed to serve - {ws.client}") +# # Log tracebacks to keep track of what errors are happening +# logger.exception(e) +# finally: +# if channel: +# await channel_manager.on_disconnect(ws) diff --git a/freqtrade/rpc/api_server/deps.py b/freqtrade/rpc/api_server/deps.py index abd3db036..aed97367b 100644 --- a/freqtrade/rpc/api_server/deps.py +++ b/freqtrade/rpc/api_server/deps.py @@ -41,8 +41,8 @@ def get_exchange(config=Depends(get_config)): return ApiServer._exchange -def get_channel_manager(): - return ApiServer._ws_channel_manager +def get_message_stream(): + return ApiServer._message_stream def is_webserver_mode(config=Depends(get_config)): diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index e9a12e4df..7e2c3f39f 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -1,7 +1,6 @@ import asyncio import logging from ipaddress import IPv4Address -from threading import Thread from typing import Any, Dict import orjson @@ -15,7 +14,7 @@ from starlette.responses import JSONResponse from freqtrade.constants import Config from freqtrade.exceptions import OperationalException from freqtrade.rpc.api_server.uvicorn_threaded import UvicornServer -from freqtrade.rpc.api_server.ws import ChannelManager +from freqtrade.rpc.api_server.ws.message_stream import MessageStream from freqtrade.rpc.api_server.ws_schemas import WSMessageSchemaType from freqtrade.rpc.rpc import RPC, RPCException, RPCHandler @@ -51,9 +50,10 @@ class ApiServer(RPCHandler): # Exchange - only available in webserver mode. _exchange = None # websocket message queue stuff - _ws_channel_manager = None - _ws_thread = None - _ws_loop = None + # _ws_channel_manager = None + # _ws_thread = None + # _ws_loop = None + _message_stream = None def __new__(cls, *args, **kwargs): """ @@ -71,14 +71,15 @@ class ApiServer(RPCHandler): return self._standalone: bool = standalone self._server = None + self._ws_queue = None - self._ws_background_task = None + self._ws_publisher_task = None ApiServer.__initialized = True api_config = self._config['api_server'] - ApiServer._ws_channel_manager = ChannelManager() + # ApiServer._ws_channel_manager = ChannelManager() self.app = FastAPI(title="Freqtrade API", docs_url='/docs' if api_config.get('enable_openapi', False) else None, @@ -107,18 +108,18 @@ class ApiServer(RPCHandler): logger.info("Stopping API Server") self._server.cleanup() - if self._ws_thread and self._ws_loop: - logger.info("Stopping API Server background tasks") + # if self._ws_thread and self._ws_loop: + # logger.info("Stopping API Server background tasks") - if self._ws_background_task: - # Cancel the queue task - self._ws_background_task.cancel() + # if self._ws_background_task: + # # Cancel the queue task + # self._ws_background_task.cancel() - self._ws_thread.join() + # self._ws_thread.join() - self._ws_thread = None - self._ws_loop = None - self._ws_background_task = None + # self._ws_thread = None + # self._ws_loop = None + # self._ws_background_task = None @classmethod def shutdown(cls): @@ -170,51 +171,102 @@ class ApiServer(RPCHandler): ) app.add_exception_handler(RPCException, self.handle_rpc_exception) + app.add_event_handler( + event_type="startup", + func=self._api_startup_event + ) + app.add_event_handler( + event_type="shutdown", + func=self._api_shutdown_event + ) - def start_message_queue(self): - if self._ws_thread: - return + async def _api_startup_event(self): + if not ApiServer._message_stream: + ApiServer._message_stream = MessageStream() - # Create a new loop, as it'll be just for the background thread - self._ws_loop = asyncio.new_event_loop() + if not self._ws_queue: + self._ws_queue = ThreadedQueue() - # Start the thread - self._ws_thread = Thread(target=self._ws_loop.run_forever) - self._ws_thread.start() + if not self._ws_publisher_task: + self._ws_publisher_task = asyncio.create_task( + self._publish_messages() + ) - # Finally, submit the coro to the thread - self._ws_background_task = asyncio.run_coroutine_threadsafe( - self._broadcast_queue_data(), loop=self._ws_loop) + async def _api_shutdown_event(self): + if ApiServer._message_stream: + ApiServer._message_stream = None - async def _broadcast_queue_data(self): - # Instantiate the queue in this coroutine so it's attached to our loop - self._ws_queue = ThreadedQueue() - async_queue = self._ws_queue.async_q - - try: - while True: - logger.debug("Getting queue messages...") - # Get data from queue - message: WSMessageSchemaType = await async_queue.get() - logger.debug(f"Found message of type: {message.get('type')}") - async_queue.task_done() - # Broadcast it - await self._ws_channel_manager.broadcast(message) - except asyncio.CancelledError: - pass - - # For testing, shouldn't happen when stable - except Exception as e: - logger.exception(f"Exception happened in background task: {e}") - - finally: - # Disconnect channels and stop the loop on cancel - await self._ws_channel_manager.disconnect_all() - self._ws_loop.stop() - # Avoid adding more items to the queue if they aren't - # going to get broadcasted. + if self._ws_queue: self._ws_queue = None + if self._ws_publisher_task: + self._ws_publisher_task.cancel() + + async def _publish_messages(self): + """ + Background task that reads messages from the queue and adds them + to the message stream + """ + try: + async_queue = self._ws_queue.async_q + message_stream = ApiServer._message_stream + + while message_stream: + message: WSMessageSchemaType = await async_queue.get() + message_stream.publish(message) + + # Make sure to throttle how fast we + # publish messages as some clients will be + # slower than others + await asyncio.sleep(0.01) + async_queue.task_done() + finally: + self._ws_queue = None + + # def start_message_queue(self): + # if self._ws_thread: + # return + + # # Create a new loop, as it'll be just for the background thread + # self._ws_loop = asyncio.new_event_loop() + + # # Start the thread + # self._ws_thread = Thread(target=self._ws_loop.run_forever) + # self._ws_thread.start() + + # # Finally, submit the coro to the thread + # self._ws_background_task = asyncio.run_coroutine_threadsafe( + # self._broadcast_queue_data(), loop=self._ws_loop) + + # async def _broadcast_queue_data(self): + # # Instantiate the queue in this coroutine so it's attached to our loop + # self._ws_queue = ThreadedQueue() + # async_queue = self._ws_queue.async_q + + # try: + # while True: + # logger.debug("Getting queue messages...") + # # Get data from queue + # message: WSMessageSchemaType = await async_queue.get() + # logger.debug(f"Found message of type: {message.get('type')}") + # async_queue.task_done() + # # Broadcast it + # await self._ws_channel_manager.broadcast(message) + # except asyncio.CancelledError: + # pass + + # # For testing, shouldn't happen when stable + # except Exception as e: + # logger.exception(f"Exception happened in background task: {e}") + + # finally: + # # Disconnect channels and stop the loop on cancel + # await self._ws_channel_manager.disconnect_all() + # self._ws_loop.stop() + # # Avoid adding more items to the queue if they aren't + # # going to get broadcasted. + # self._ws_queue = None + def start_api(self): """ Start API ... should be run in thread. @@ -253,7 +305,7 @@ class ApiServer(RPCHandler): if self._standalone: self._server.run() else: - self.start_message_queue() + # self.start_message_queue() self._server.run_in_thread() except Exception: logger.exception("Api server failed to start.") diff --git a/freqtrade/rpc/api_server/ws/__init__.py b/freqtrade/rpc/api_server/ws/__init__.py index 055b20a9d..0b94d3fee 100644 --- a/freqtrade/rpc/api_server/ws/__init__.py +++ b/freqtrade/rpc/api_server/ws/__init__.py @@ -3,4 +3,5 @@ from freqtrade.rpc.api_server.ws.types import WebSocketType from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy from freqtrade.rpc.api_server.ws.serializer import HybridJSONWebSocketSerializer -from freqtrade.rpc.api_server.ws.channel import ChannelManager, WebSocketChannel +from freqtrade.rpc.api_server.ws.channel import WebSocketChannel +from freqtrade.rpc.api_server.ws.message_stream import MessageStream diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 88b4db9ba..b98bd13c9 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -1,12 +1,9 @@ import asyncio import logging -import time -from threading import RLock +from contextlib import asynccontextmanager from typing import Any, Dict, List, Optional, Type, Union from uuid import uuid4 -from fastapi import WebSocket as FastAPIWebSocket - from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy from freqtrade.rpc.api_server.ws.serializer import (HybridJSONWebSocketSerializer, WebSocketSerializer) @@ -21,32 +18,21 @@ class WebSocketChannel: """ Object to help facilitate managing a websocket connection """ - def __init__( self, websocket: WebSocketType, channel_id: Optional[str] = None, - drain_timeout: int = 3, - throttle: float = 0.01, serializer_cls: Type[WebSocketSerializer] = HybridJSONWebSocketSerializer ): - self.channel_id = channel_id if channel_id else uuid4().hex[:8] - - # The WebSocket object self._websocket = WebSocketProxy(websocket) - self.drain_timeout = drain_timeout - self.throttle = throttle - - self._subscriptions: List[str] = [] - # 32 is the size of the receiving queue in websockets package - self.queue: asyncio.Queue[Dict[str, Any]] = asyncio.Queue(maxsize=32) - self._relay_task = asyncio.create_task(self.relay()) - # Internal event to signify a closed websocket self._closed = asyncio.Event() + # Throttle how fast we send messages + self._throttle = 0.01 + # Wrap the WebSocket in the Serializing class self._wrapped_ws = serializer_cls(self._websocket) @@ -61,40 +47,16 @@ class WebSocketChannel: def remote_addr(self): return self._websocket.remote_addr - async def _send(self, data): + async def send(self, message: Union[WSMessageSchemaType, Dict[str, Any]]): """ - Send data on the wrapped websocket + Send a message on the wrapped websocket """ - await self._wrapped_ws.send(data) - - async def send(self, data) -> bool: - """ - Add the data to the queue to be sent. - :returns: True if data added to queue, False otherwise - """ - - # This block only runs if the queue is full, it will wait - # until self.drain_timeout for the relay to drain the outgoing queue - # We can't use asyncio.wait_for here because the queue may have been created with a - # different eventloop - start = time.time() - while self.queue.full(): - await asyncio.sleep(1) - if (time.time() - start) > self.drain_timeout: - return False - - # If for some reason the queue is still full, just return False - try: - self.queue.put_nowait(data) - except asyncio.QueueFull: - return False - - # If we got here everything is ok - return True + await asyncio.sleep(self._throttle) + await self._wrapped_ws.send(message) async def recv(self): """ - Receive data on the wrapped websocket + Receive a message on the wrapped websocket """ return await self._wrapped_ws.recv() @@ -104,18 +66,23 @@ class WebSocketChannel: """ return await self._websocket.ping() + async def accept(self): + """ + Accept the underlying websocket connection + """ + return await self._websocket.accept() + async def close(self): """ Close the WebSocketChannel """ try: - await self.raw_websocket.close() + await self._websocket.close() except Exception: pass self._closed.set() - self._relay_task.cancel() def is_closed(self) -> bool: """ @@ -139,99 +106,243 @@ class WebSocketChannel: """ return message_type in self._subscriptions - async def relay(self): + async def __aiter__(self): """ - Relay messages from the channel's queue and send them out. This is started - as a task. + Generator for received messages """ - while not self._closed.is_set(): - message = await self.queue.get() + while True: try: - await self._send(message) - self.queue.task_done() + yield await self.recv() + except Exception: + break - # Limit messages per sec. - # Could cause problems with queue size if too low, and - # problems with network traffik if too high. - # 0.01 = 100/s - await asyncio.sleep(self.throttle) - except RuntimeError: - # The connection was closed, just exit the task - return - - -class ChannelManager: - def __init__(self): - self.channels = dict() - self._lock = RLock() # Re-entrant Lock - - async def on_connect(self, websocket: WebSocketType): + @asynccontextmanager + async def connect(self): """ - Wrap websocket connection into Channel and add to list - - :param websocket: The WebSocket object to attach to the Channel + Context manager for safely opening and closing the websocket connection """ - if isinstance(websocket, FastAPIWebSocket): - try: - await websocket.accept() - except RuntimeError: - # The connection was closed before we could accept it - return + try: + await self.accept() + yield self + finally: + await self.close() - ws_channel = WebSocketChannel(websocket) - with self._lock: - self.channels[websocket] = ws_channel +# class WebSocketChannel: +# """ +# Object to help facilitate managing a websocket connection +# """ - return ws_channel +# def __init__( +# self, +# websocket: WebSocketType, +# channel_id: Optional[str] = None, +# drain_timeout: int = 3, +# throttle: float = 0.01, +# serializer_cls: Type[WebSocketSerializer] = HybridJSONWebSocketSerializer +# ): - async def on_disconnect(self, websocket: WebSocketType): - """ - Call close on the channel if it's not, and remove from channel list +# self.channel_id = channel_id if channel_id else uuid4().hex[:8] - :param websocket: The WebSocket objet attached to the Channel - """ - with self._lock: - channel = self.channels.get(websocket) - if channel: - logger.info(f"Disconnecting channel {channel}") - if not channel.is_closed(): - await channel.close() +# # The WebSocket object +# self._websocket = WebSocketProxy(websocket) - del self.channels[websocket] +# self.drain_timeout = drain_timeout +# self.throttle = throttle - async def disconnect_all(self): - """ - Disconnect all Channels - """ - with self._lock: - for websocket in self.channels.copy().keys(): - await self.on_disconnect(websocket) +# self._subscriptions: List[str] = [] +# # 32 is the size of the receiving queue in websockets package +# self.queue: asyncio.Queue[Dict[str, Any]] = asyncio.Queue(maxsize=32) +# self._relay_task = asyncio.create_task(self.relay()) - async def broadcast(self, message: WSMessageSchemaType): - """ - Broadcast a message on all Channels +# # Internal event to signify a closed websocket +# self._closed = asyncio.Event() - :param message: The message to send - """ - with self._lock: - for channel in self.channels.copy().values(): - if channel.subscribed_to(message.get('type')): - await self.send_direct(channel, message) +# # Wrap the WebSocket in the Serializing class +# self._wrapped_ws = serializer_cls(self._websocket) - async def send_direct( - self, channel: WebSocketChannel, message: Union[WSMessageSchemaType, Dict[str, Any]]): - """ - Send a message directly through direct_channel only +# def __repr__(self): +# return f"WebSocketChannel({self.channel_id}, {self.remote_addr})" - :param direct_channel: The WebSocketChannel object to send the message through - :param message: The message to send - """ - if not await channel.send(message): - await self.on_disconnect(channel.raw_websocket) +# @property +# def raw_websocket(self): +# return self._websocket.raw_websocket - def has_channels(self): - """ - Flag for more than 0 channels - """ - return len(self.channels) > 0 +# @property +# def remote_addr(self): +# return self._websocket.remote_addr + +# async def _send(self, data): +# """ +# Send data on the wrapped websocket +# """ +# await self._wrapped_ws.send(data) + +# async def send(self, data) -> bool: +# """ +# Add the data to the queue to be sent. +# :returns: True if data added to queue, False otherwise +# """ + +# # This block only runs if the queue is full, it will wait +# # until self.drain_timeout for the relay to drain the outgoing queue +# # We can't use asyncio.wait_for here because the queue may have been created with a +# # different eventloop +# start = time.time() +# while self.queue.full(): +# await asyncio.sleep(1) +# if (time.time() - start) > self.drain_timeout: +# return False + +# # If for some reason the queue is still full, just return False +# try: +# self.queue.put_nowait(data) +# except asyncio.QueueFull: +# return False + +# # If we got here everything is ok +# return True + +# async def recv(self): +# """ +# Receive data on the wrapped websocket +# """ +# return await self._wrapped_ws.recv() + +# async def ping(self): +# """ +# Ping the websocket +# """ +# return await self._websocket.ping() + +# async def close(self): +# """ +# Close the WebSocketChannel +# """ + +# try: +# await self.raw_websocket.close() +# except Exception: +# pass + +# self._closed.set() +# self._relay_task.cancel() + +# def is_closed(self) -> bool: +# """ +# Closed flag +# """ +# return self._closed.is_set() + +# def set_subscriptions(self, subscriptions: List[str] = []) -> None: +# """ +# Set which subscriptions this channel is subscribed to + +# :param subscriptions: List of subscriptions, List[str] +# """ +# self._subscriptions = subscriptions + +# def subscribed_to(self, message_type: str) -> bool: +# """ +# Check if this channel is subscribed to the message_type + +# :param message_type: The message type to check +# """ +# return message_type in self._subscriptions + +# async def relay(self): +# """ +# Relay messages from the channel's queue and send them out. This is started +# as a task. +# """ +# while not self._closed.is_set(): +# message = await self.queue.get() +# try: +# await self._send(message) +# self.queue.task_done() + +# # Limit messages per sec. +# # Could cause problems with queue size if too low, and +# # problems with network traffik if too high. +# # 0.01 = 100/s +# await asyncio.sleep(self.throttle) +# except RuntimeError: +# # The connection was closed, just exit the task +# return + + +# class ChannelManager: +# def __init__(self): +# self.channels = dict() +# self._lock = RLock() # Re-entrant Lock + +# async def on_connect(self, websocket: WebSocketType): +# """ +# Wrap websocket connection into Channel and add to list + +# :param websocket: The WebSocket object to attach to the Channel +# """ +# if isinstance(websocket, FastAPIWebSocket): +# try: +# await websocket.accept() +# except RuntimeError: +# # The connection was closed before we could accept it +# return + +# ws_channel = WebSocketChannel(websocket) + +# with self._lock: +# self.channels[websocket] = ws_channel + +# return ws_channel + +# async def on_disconnect(self, websocket: WebSocketType): +# """ +# Call close on the channel if it's not, and remove from channel list + +# :param websocket: The WebSocket objet attached to the Channel +# """ +# with self._lock: +# channel = self.channels.get(websocket) +# if channel: +# logger.info(f"Disconnecting channel {channel}") +# if not channel.is_closed(): +# await channel.close() + +# del self.channels[websocket] + +# async def disconnect_all(self): +# """ +# Disconnect all Channels +# """ +# with self._lock: +# for websocket in self.channels.copy().keys(): +# await self.on_disconnect(websocket) + +# async def broadcast(self, message: WSMessageSchemaType): +# """ +# Broadcast a message on all Channels + +# :param message: The message to send +# """ +# with self._lock: +# for channel in self.channels.copy().values(): +# if channel.subscribed_to(message.get('type')): +# await self.send_direct(channel, message) + +# async def send_direct( +# self, channel: WebSocketChannel, message: Union[WSMessageSchemaType, Dict[str, Any]]): +# """ +# Send a message directly through direct_channel only + +# :param direct_channel: The WebSocketChannel object to send the message through +# :param message: The message to send +# """ +# if not await channel.send(message): +# await self.on_disconnect(channel.raw_websocket) + +# def has_channels(self): +# """ +# Flag for more than 0 channels +# """ +# return len(self.channels) > 0 diff --git a/freqtrade/rpc/api_server/ws/message_stream.py b/freqtrade/rpc/api_server/ws/message_stream.py new file mode 100644 index 000000000..f77242719 --- /dev/null +++ b/freqtrade/rpc/api_server/ws/message_stream.py @@ -0,0 +1,23 @@ +import asyncio + + +class MessageStream: + """ + A message stream for consumers to subscribe to, + and for producers to publish to. + """ + def __init__(self): + self._loop = asyncio.get_running_loop() + self._waiter = self._loop.create_future() + + def publish(self, message): + waiter, self._waiter = self._waiter, self._loop.create_future() + waiter.set_result((message, self._waiter)) + + async def subscribe(self): + waiter = self._waiter + while True: + message, waiter = await waiter + yield message + + __aiter__ = subscribe diff --git a/freqtrade/rpc/api_server/ws/serializer.py b/freqtrade/rpc/api_server/ws/serializer.py index 6c402a100..85703136b 100644 --- a/freqtrade/rpc/api_server/ws/serializer.py +++ b/freqtrade/rpc/api_server/ws/serializer.py @@ -1,5 +1,6 @@ import logging from abc import ABC, abstractmethod +from typing import Any, Dict, Union import orjson import rapidjson @@ -7,6 +8,7 @@ from pandas import DataFrame from freqtrade.misc import dataframe_to_json, json_to_dataframe from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy +from freqtrade.rpc.api_server.ws_schemas import WSMessageSchemaType logger = logging.getLogger(__name__) @@ -24,7 +26,7 @@ class WebSocketSerializer(ABC): def _deserialize(self, data): raise NotImplementedError() - async def send(self, data: bytes): + async def send(self, data: Union[WSMessageSchemaType, Dict[str, Any]]): await self._websocket.send(self._serialize(data)) async def recv(self) -> bytes: @@ -32,8 +34,8 @@ class WebSocketSerializer(ABC): return self._deserialize(data) - async def close(self, code: int = 1000): - await self._websocket.close(code) + # async def close(self, code: int = 1000): + # await self._websocket.close(code) class HybridJSONWebSocketSerializer(WebSocketSerializer): From d713af045fbd51df67825836d9fe3a17f1424622 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Mon, 14 Nov 2022 22:21:40 -0700 Subject: [PATCH 137/232] remove main queue completely --- freqtrade/rpc/api_server/api_ws.py | 3 +- freqtrade/rpc/api_server/webserver.py | 47 ++------------------------ freqtrade/rpc/api_server/ws/channel.py | 5 ++- 3 files changed, 6 insertions(+), 49 deletions(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index a9b88aadb..3f207eac3 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -148,7 +148,8 @@ async def message_endpoint( channel_broadcaster(channel, message_stream) ) await channel_tasks - + except WebSocketChannelClosed: + pass finally: logger.info(f"Channel disconnected - {channel}") channel_tasks.cancel() diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index 7e2c3f39f..d0695e06d 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -1,4 +1,3 @@ -import asyncio import logging from ipaddress import IPv4Address from typing import Any, Dict @@ -7,15 +6,12 @@ import orjson import uvicorn from fastapi import Depends, FastAPI from fastapi.middleware.cors import CORSMiddleware -# Look into alternatives -from janus import Queue as ThreadedQueue from starlette.responses import JSONResponse from freqtrade.constants import Config from freqtrade.exceptions import OperationalException from freqtrade.rpc.api_server.uvicorn_threaded import UvicornServer from freqtrade.rpc.api_server.ws.message_stream import MessageStream -from freqtrade.rpc.api_server.ws_schemas import WSMessageSchemaType from freqtrade.rpc.rpc import RPC, RPCException, RPCHandler @@ -72,9 +68,6 @@ class ApiServer(RPCHandler): self._standalone: bool = standalone self._server = None - self._ws_queue = None - self._ws_publisher_task = None - ApiServer.__initialized = True api_config = self._config['api_server'] @@ -130,9 +123,8 @@ class ApiServer(RPCHandler): cls._rpc = None def send_msg(self, msg: Dict[str, Any]) -> None: - if self._ws_queue: - sync_q = self._ws_queue.sync_q - sync_q.put(msg) + if ApiServer._message_stream: + ApiServer._message_stream.publish(msg) def handle_rpc_exception(self, request, exc): logger.exception(f"API Error calling: {exc}") @@ -184,45 +176,10 @@ class ApiServer(RPCHandler): if not ApiServer._message_stream: ApiServer._message_stream = MessageStream() - if not self._ws_queue: - self._ws_queue = ThreadedQueue() - - if not self._ws_publisher_task: - self._ws_publisher_task = asyncio.create_task( - self._publish_messages() - ) - async def _api_shutdown_event(self): if ApiServer._message_stream: ApiServer._message_stream = None - if self._ws_queue: - self._ws_queue = None - - if self._ws_publisher_task: - self._ws_publisher_task.cancel() - - async def _publish_messages(self): - """ - Background task that reads messages from the queue and adds them - to the message stream - """ - try: - async_queue = self._ws_queue.async_q - message_stream = ApiServer._message_stream - - while message_stream: - message: WSMessageSchemaType = await async_queue.get() - message_stream.publish(message) - - # Make sure to throttle how fast we - # publish messages as some clients will be - # slower than others - await asyncio.sleep(0.01) - async_queue.task_done() - finally: - self._ws_queue = None - # def start_message_queue(self): # if self._ws_thread: # return diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index b98bd13c9..39c8db516 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -30,8 +30,8 @@ class WebSocketChannel: # Internal event to signify a closed websocket self._closed = asyncio.Event() - # Throttle how fast we send messages - self._throttle = 0.01 + # The subscribed message types + self._subscriptions: List[str] = [] # Wrap the WebSocket in the Serializing class self._wrapped_ws = serializer_cls(self._websocket) @@ -51,7 +51,6 @@ class WebSocketChannel: """ Send a message on the wrapped websocket """ - await asyncio.sleep(self._throttle) await self._wrapped_ws.send(message) async def recv(self): From 442467e8aed2ff639bfba04e7a2f6e175f774af1 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Mon, 14 Nov 2022 22:26:34 -0700 Subject: [PATCH 138/232] remove old comments and code --- freqtrade/rpc/api_server/api_ws.py | 60 ------ freqtrade/rpc/api_server/webserver.py | 75 ++------ freqtrade/rpc/api_server/ws/channel.py | 220 ---------------------- freqtrade/rpc/api_server/ws/serializer.py | 3 - 4 files changed, 12 insertions(+), 346 deletions(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 3f207eac3..01243b0cc 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -23,18 +23,6 @@ logger = logging.getLogger(__name__) router = APIRouter() -# async def is_websocket_alive(ws: WebSocket) -> bool: -# """ -# Check if a FastAPI Websocket is still open -# """ -# if ( -# ws.application_state == WebSocketState.CONNECTED and -# ws.client_state == WebSocketState.CONNECTED -# ): -# return True -# return False - - class WebSocketChannelClosed(Exception): """ General WebSocket exception to signal closing the channel @@ -153,51 +141,3 @@ async def message_endpoint( finally: logger.info(f"Channel disconnected - {channel}") channel_tasks.cancel() - - -# @router.websocket("/message/ws") -# async def message_endpoint( -# ws: WebSocket, -# rpc: RPC = Depends(get_rpc), -# channel_manager=Depends(get_channel_manager), -# token: str = Depends(validate_ws_token) -# ): -# """ -# Message WebSocket endpoint, facilitates sending RPC messages -# """ -# try: -# channel = await channel_manager.on_connect(ws) -# if await is_websocket_alive(ws): - -# logger.info(f"Consumer connected - {channel}") - -# # Keep connection open until explicitly closed, and process requests -# try: -# while not channel.is_closed(): -# request = await channel.recv() - -# # Process the request here -# await _process_consumer_request(request, channel, rpc, channel_manager) - -# except (WebSocketDisconnect, WebSocketException): -# # Handle client disconnects -# logger.info(f"Consumer disconnected - {channel}") -# except RuntimeError: -# # Handle cases like - -# # RuntimeError('Cannot call "send" once a closed message has been sent') -# pass -# except Exception as e: -# logger.info(f"Consumer connection failed - {channel}: {e}") -# logger.debug(e, exc_info=e) - -# except RuntimeError: -# # WebSocket was closed -# # Do nothing -# pass -# except Exception as e: -# logger.error(f"Failed to serve - {ws.client}") -# # Log tracebacks to keep track of what errors are happening -# logger.exception(e) -# finally: -# if channel: -# await channel_manager.on_disconnect(ws) diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index d0695e06d..f100a46ef 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -45,10 +45,7 @@ class ApiServer(RPCHandler): _config: Config = {} # Exchange - only available in webserver mode. _exchange = None - # websocket message queue stuff - # _ws_channel_manager = None - # _ws_thread = None - # _ws_loop = None + # websocket message stuff _message_stream = None def __new__(cls, *args, **kwargs): @@ -72,8 +69,6 @@ class ApiServer(RPCHandler): api_config = self._config['api_server'] - # ApiServer._ws_channel_manager = ChannelManager() - self.app = FastAPI(title="Freqtrade API", docs_url='/docs' if api_config.get('enable_openapi', False) else None, redoc_url=None, @@ -101,19 +96,6 @@ class ApiServer(RPCHandler): logger.info("Stopping API Server") self._server.cleanup() - # if self._ws_thread and self._ws_loop: - # logger.info("Stopping API Server background tasks") - - # if self._ws_background_task: - # # Cancel the queue task - # self._ws_background_task.cancel() - - # self._ws_thread.join() - - # self._ws_thread = None - # self._ws_loop = None - # self._ws_background_task = None - @classmethod def shutdown(cls): cls.__initialized = False @@ -123,6 +105,9 @@ class ApiServer(RPCHandler): cls._rpc = None def send_msg(self, msg: Dict[str, Any]) -> None: + """ + Publish the message to the message stream + """ if ApiServer._message_stream: ApiServer._message_stream.publish(msg) @@ -173,57 +158,21 @@ class ApiServer(RPCHandler): ) async def _api_startup_event(self): + """ + Creates the MessageStream class on startup + so it has access to the same event loop + as uvicorn + """ if not ApiServer._message_stream: ApiServer._message_stream = MessageStream() async def _api_shutdown_event(self): + """ + Removes the MessageStream class on shutdown + """ if ApiServer._message_stream: ApiServer._message_stream = None - # def start_message_queue(self): - # if self._ws_thread: - # return - - # # Create a new loop, as it'll be just for the background thread - # self._ws_loop = asyncio.new_event_loop() - - # # Start the thread - # self._ws_thread = Thread(target=self._ws_loop.run_forever) - # self._ws_thread.start() - - # # Finally, submit the coro to the thread - # self._ws_background_task = asyncio.run_coroutine_threadsafe( - # self._broadcast_queue_data(), loop=self._ws_loop) - - # async def _broadcast_queue_data(self): - # # Instantiate the queue in this coroutine so it's attached to our loop - # self._ws_queue = ThreadedQueue() - # async_queue = self._ws_queue.async_q - - # try: - # while True: - # logger.debug("Getting queue messages...") - # # Get data from queue - # message: WSMessageSchemaType = await async_queue.get() - # logger.debug(f"Found message of type: {message.get('type')}") - # async_queue.task_done() - # # Broadcast it - # await self._ws_channel_manager.broadcast(message) - # except asyncio.CancelledError: - # pass - - # # For testing, shouldn't happen when stable - # except Exception as e: - # logger.exception(f"Exception happened in background task: {e}") - - # finally: - # # Disconnect channels and stop the loop on cancel - # await self._ws_channel_manager.disconnect_all() - # self._ws_loop.stop() - # # Avoid adding more items to the queue if they aren't - # # going to get broadcasted. - # self._ws_queue = None - def start_api(self): """ Start API ... should be run in thread. diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 39c8db516..ee16a95c6 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -125,223 +125,3 @@ class WebSocketChannel: yield self finally: await self.close() - - -# class WebSocketChannel: -# """ -# Object to help facilitate managing a websocket connection -# """ - -# def __init__( -# self, -# websocket: WebSocketType, -# channel_id: Optional[str] = None, -# drain_timeout: int = 3, -# throttle: float = 0.01, -# serializer_cls: Type[WebSocketSerializer] = HybridJSONWebSocketSerializer -# ): - -# self.channel_id = channel_id if channel_id else uuid4().hex[:8] - -# # The WebSocket object -# self._websocket = WebSocketProxy(websocket) - -# self.drain_timeout = drain_timeout -# self.throttle = throttle - -# self._subscriptions: List[str] = [] -# # 32 is the size of the receiving queue in websockets package -# self.queue: asyncio.Queue[Dict[str, Any]] = asyncio.Queue(maxsize=32) -# self._relay_task = asyncio.create_task(self.relay()) - -# # Internal event to signify a closed websocket -# self._closed = asyncio.Event() - -# # Wrap the WebSocket in the Serializing class -# self._wrapped_ws = serializer_cls(self._websocket) - -# def __repr__(self): -# return f"WebSocketChannel({self.channel_id}, {self.remote_addr})" - -# @property -# def raw_websocket(self): -# return self._websocket.raw_websocket - -# @property -# def remote_addr(self): -# return self._websocket.remote_addr - -# async def _send(self, data): -# """ -# Send data on the wrapped websocket -# """ -# await self._wrapped_ws.send(data) - -# async def send(self, data) -> bool: -# """ -# Add the data to the queue to be sent. -# :returns: True if data added to queue, False otherwise -# """ - -# # This block only runs if the queue is full, it will wait -# # until self.drain_timeout for the relay to drain the outgoing queue -# # We can't use asyncio.wait_for here because the queue may have been created with a -# # different eventloop -# start = time.time() -# while self.queue.full(): -# await asyncio.sleep(1) -# if (time.time() - start) > self.drain_timeout: -# return False - -# # If for some reason the queue is still full, just return False -# try: -# self.queue.put_nowait(data) -# except asyncio.QueueFull: -# return False - -# # If we got here everything is ok -# return True - -# async def recv(self): -# """ -# Receive data on the wrapped websocket -# """ -# return await self._wrapped_ws.recv() - -# async def ping(self): -# """ -# Ping the websocket -# """ -# return await self._websocket.ping() - -# async def close(self): -# """ -# Close the WebSocketChannel -# """ - -# try: -# await self.raw_websocket.close() -# except Exception: -# pass - -# self._closed.set() -# self._relay_task.cancel() - -# def is_closed(self) -> bool: -# """ -# Closed flag -# """ -# return self._closed.is_set() - -# def set_subscriptions(self, subscriptions: List[str] = []) -> None: -# """ -# Set which subscriptions this channel is subscribed to - -# :param subscriptions: List of subscriptions, List[str] -# """ -# self._subscriptions = subscriptions - -# def subscribed_to(self, message_type: str) -> bool: -# """ -# Check if this channel is subscribed to the message_type - -# :param message_type: The message type to check -# """ -# return message_type in self._subscriptions - -# async def relay(self): -# """ -# Relay messages from the channel's queue and send them out. This is started -# as a task. -# """ -# while not self._closed.is_set(): -# message = await self.queue.get() -# try: -# await self._send(message) -# self.queue.task_done() - -# # Limit messages per sec. -# # Could cause problems with queue size if too low, and -# # problems with network traffik if too high. -# # 0.01 = 100/s -# await asyncio.sleep(self.throttle) -# except RuntimeError: -# # The connection was closed, just exit the task -# return - - -# class ChannelManager: -# def __init__(self): -# self.channels = dict() -# self._lock = RLock() # Re-entrant Lock - -# async def on_connect(self, websocket: WebSocketType): -# """ -# Wrap websocket connection into Channel and add to list - -# :param websocket: The WebSocket object to attach to the Channel -# """ -# if isinstance(websocket, FastAPIWebSocket): -# try: -# await websocket.accept() -# except RuntimeError: -# # The connection was closed before we could accept it -# return - -# ws_channel = WebSocketChannel(websocket) - -# with self._lock: -# self.channels[websocket] = ws_channel - -# return ws_channel - -# async def on_disconnect(self, websocket: WebSocketType): -# """ -# Call close on the channel if it's not, and remove from channel list - -# :param websocket: The WebSocket objet attached to the Channel -# """ -# with self._lock: -# channel = self.channels.get(websocket) -# if channel: -# logger.info(f"Disconnecting channel {channel}") -# if not channel.is_closed(): -# await channel.close() - -# del self.channels[websocket] - -# async def disconnect_all(self): -# """ -# Disconnect all Channels -# """ -# with self._lock: -# for websocket in self.channels.copy().keys(): -# await self.on_disconnect(websocket) - -# async def broadcast(self, message: WSMessageSchemaType): -# """ -# Broadcast a message on all Channels - -# :param message: The message to send -# """ -# with self._lock: -# for channel in self.channels.copy().values(): -# if channel.subscribed_to(message.get('type')): -# await self.send_direct(channel, message) - -# async def send_direct( -# self, channel: WebSocketChannel, message: Union[WSMessageSchemaType, Dict[str, Any]]): -# """ -# Send a message directly through direct_channel only - -# :param direct_channel: The WebSocketChannel object to send the message through -# :param message: The message to send -# """ -# if not await channel.send(message): -# await self.on_disconnect(channel.raw_websocket) - -# def has_channels(self): -# """ -# Flag for more than 0 channels -# """ -# return len(self.channels) > 0 diff --git a/freqtrade/rpc/api_server/ws/serializer.py b/freqtrade/rpc/api_server/ws/serializer.py index 85703136b..625a0990c 100644 --- a/freqtrade/rpc/api_server/ws/serializer.py +++ b/freqtrade/rpc/api_server/ws/serializer.py @@ -34,9 +34,6 @@ class WebSocketSerializer(ABC): return self._deserialize(data) - # async def close(self, code: int = 1000): - # await self._websocket.close(code) - class HybridJSONWebSocketSerializer(WebSocketSerializer): def _serialize(self, data) -> str: From 6a1655c047b88bba462677566f6943819ffc83a7 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 15 Nov 2022 22:26:54 -0700 Subject: [PATCH 139/232] support ssl connections in emc --- freqtrade/constants.py | 1 + freqtrade/rpc/external_message_consumer.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 534d06fd4..cfac98ebd 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -512,6 +512,7 @@ CONF_SCHEMA = { 'minimum': 0, 'maximum': 65535 }, + 'secure': {'type': 'boolean', 'default': False}, 'ws_token': {'type': 'string'}, }, 'required': ['name', 'host', 'ws_token'] diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index b978407e4..d9aed7d52 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -31,6 +31,7 @@ class Producer(TypedDict): name: str host: str port: int + secure: bool ws_token: str @@ -180,7 +181,8 @@ class ExternalMessageConsumer: host, port = producer['host'], producer['port'] token = producer['ws_token'] name = producer['name'] - ws_url = f"ws://{host}:{port}/api/v1/message/ws?token={token}" + scheme = 'wss' if producer['secure'] else 'ws' + ws_url = f"{scheme}://{host}:{port}/api/v1/message/ws?token={token}" # This will raise InvalidURI if the url is bad async with websockets.connect( From 86e094e39b77e9c6ea8b8e88fa7a84339be151e0 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 15 Nov 2022 22:33:42 -0700 Subject: [PATCH 140/232] update docs --- docs/producer-consumer.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/producer-consumer.md b/docs/producer-consumer.md index b69406edf..88e34d0d6 100644 --- a/docs/producer-consumer.md +++ b/docs/producer-consumer.md @@ -21,6 +21,7 @@ Enable subscribing to an instance by adding the `external_message_consumer` sect "name": "default", // This can be any name you'd like, default is "default" "host": "127.0.0.1", // The host from your producer's api_server config "port": 8080, // The port from your producer's api_server config + "secure": false, // Use a secure websockets connection, default false "ws_token": "sercet_Ws_t0ken" // The ws_token from your producer's api_server config } ], @@ -42,6 +43,7 @@ Enable subscribing to an instance by adding the `external_message_consumer` sect | `producers.name` | **Required.** Name of this producer. This name must be used in calls to `get_producer_pairs()` and `get_producer_df()` if more than one producer is used.
**Datatype:** string | `producers.host` | **Required.** The hostname or IP address from your producer.
**Datatype:** string | `producers.port` | **Required.** The port matching the above host.
**Datatype:** string +| `producers.secure` | **Optional.** Use ssl in websockets connection. Default False.
**Datatype:** string | `producers.ws_token` | **Required.** `ws_token` as configured on the producer.
**Datatype:** string | | **Optional settings** | `wait_timeout` | Timeout until we ping again if no message is received.
*Defaults to `300`.*
**Datatype:** Integer - in seconds. From 1380ddd066c46784ea0ffd518f919ef8da7972f5 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 15 Nov 2022 22:44:02 -0700 Subject: [PATCH 141/232] update ws client --- scripts/ws_client.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/ws_client.py b/scripts/ws_client.py index 090039cde..70dbead14 100644 --- a/scripts/ws_client.py +++ b/scripts/ws_client.py @@ -199,6 +199,7 @@ async def create_client( host, port, token, + scheme='ws', name='default', protocol=ClientProtocol(), sleep_time=10, @@ -211,13 +212,14 @@ async def create_client( :param host: The host :param port: The port :param token: The websocket auth token + :param scheme: `ws` for most connections, `wss` for ssl :param name: The name of the producer :param **kwargs: Any extra kwargs passed to websockets.connect """ while 1: try: - websocket_url = f"ws://{host}:{port}/api/v1/message/ws?token={token}" + websocket_url = f"{scheme}://{host}:{port}/api/v1/message/ws?token={token}" logger.info(f"Attempting to connect to {name} @ {host}:{port}") async with websockets.connect(websocket_url, **kwargs) as ws: @@ -304,6 +306,7 @@ async def _main(args): producer['host'], producer['port'], producer['ws_token'], + 'wss' if producer['secure'] else 'ws', producer['name'], sleep_time=sleep_time, ping_timeout=ping_timeout, From a993cb512de6422d0c186e27c214502a5356c5eb Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Thu, 17 Nov 2022 10:22:55 -0700 Subject: [PATCH 142/232] change to get call in ws_client --- scripts/ws_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/ws_client.py b/scripts/ws_client.py index 70dbead14..5d27f512e 100644 --- a/scripts/ws_client.py +++ b/scripts/ws_client.py @@ -306,7 +306,7 @@ async def _main(args): producer['host'], producer['port'], producer['ws_token'], - 'wss' if producer['secure'] else 'ws', + 'wss' if producer.get('secure', False) else 'ws', producer['name'], sleep_time=sleep_time, ping_timeout=ping_timeout, From 60fcd8dce22024ea5cff3b48a5b17ff33bfc723e Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 17 Nov 2022 21:50:02 +0100 Subject: [PATCH 143/232] fix skipped mac test, fix RL bug in add_state_info, fix use of __import__, revise doc --- docs/freqai-reinforcement-learning.md | 2 +- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 6 +++--- tests/freqai/test_freqai_interface.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index c4e70130b..b96c591de 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -20,7 +20,7 @@ With the current framework, we aim to expose the training environment via the co We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configruation settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. -The framework is built on stable_baselines3 (torch) and openai gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework (albeit with some basic assistance from core-dev). For the environment, it is inheriting from `gym.env` which means that a user would need to write an entirely new environment if they wish to switch to a different library. +The framework is built on stable_baselines3 (torch) and openai gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library. ## Running Reinforcement Learning diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index d0ddce294..629633814 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -1,3 +1,4 @@ +import importlib import logging from abc import abstractmethod from datetime import datetime, timezone @@ -58,8 +59,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): f'sb3_contrib. please choose one of {SB3_MODELS} or ' f'{SB3_CONTRIB_MODELS}') - mod = __import__(import_str, fromlist=[ - self.model_type]) + mod = importlib.import_module(import_str, self.model_type) self.MODELCLASS = getattr(mod, self.model_type) self.policy_type = self.freqai_info['rl_config']['policy_type'] self.unset_outlier_removal() @@ -236,7 +236,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def _predict(window): observations = dataframe.iloc[window.index] - if self.live and self.rl_config('add_state_info', False): + if self.live and self.rl_config.get('add_state_info', False): market_side, current_profit, trade_duration = self.get_state_info(dk.pair) observations['current_profit_pct'] = current_profit observations['position'] = market_side diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 08f33add9..3415c75ca 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -196,7 +196,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog) if is_arm() and "Catboost" in model: pytest.skip("CatBoost is not supported on ARM") - if is_mac(): + if is_mac() and 'Reinforcement' in model: pytest.skip("Reinforcement learning module not available on intel based Mac OS") Trade.use_db = False From 61a859ba4c8462c4ae7785063f6ed1014e598764 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 18 Nov 2022 17:30:03 +0100 Subject: [PATCH 144/232] remove tensorboard req from rl reqs --- requirements-freqai-rl.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index 22e077241..b6bd7ef15 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -6,4 +6,3 @@ torch==1.12.1 stable-baselines3==1.6.1 gym==0.21 sb3-contrib==1.6.1 -tensorboard==2.10.1 From 0cb6f71c026bd2f771a862c43c5b2c744a64264e Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 18 Nov 2022 13:32:27 -0700 Subject: [PATCH 145/232] better error handling, true async sending, more readable api --- freqtrade/rpc/api_server/api_ws.py | 66 +++----------- freqtrade/rpc/api_server/webserver.py | 1 + freqtrade/rpc/api_server/ws/channel.py | 89 +++++++++++++++---- freqtrade/rpc/api_server/ws/message_stream.py | 3 +- 4 files changed, 88 insertions(+), 71 deletions(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 01243b0cc..2454646ea 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -1,16 +1,14 @@ -import asyncio import logging from typing import Any, Dict from fastapi import APIRouter, Depends -from fastapi.websockets import WebSocket, WebSocketDisconnect +from fastapi.websockets import WebSocket from pydantic import ValidationError -from websockets.exceptions import ConnectionClosed from freqtrade.enums import RPCMessageType, RPCRequestType from freqtrade.rpc.api_server.api_auth import validate_ws_token from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc -from freqtrade.rpc.api_server.ws import WebSocketChannel +from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel from freqtrade.rpc.api_server.ws.message_stream import MessageStream from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSMessageSchema, WSRequestSchema, WSWhitelistMessage) @@ -23,45 +21,20 @@ logger = logging.getLogger(__name__) router = APIRouter() -class WebSocketChannelClosed(Exception): - """ - General WebSocket exception to signal closing the channel - """ - pass - - async def channel_reader(channel: WebSocketChannel, rpc: RPC): """ Iterate over the messages from the channel and process the request """ - try: - async for message in channel: - await _process_consumer_request(message, channel, rpc) - except ( - RuntimeError, - WebSocketDisconnect, - ConnectionClosed - ): - raise WebSocketChannelClosed - except asyncio.CancelledError: - return + async for message in channel: + await _process_consumer_request(message, channel, rpc) async def channel_broadcaster(channel: WebSocketChannel, message_stream: MessageStream): """ Iterate over messages in the message stream and send them """ - try: - async for message in message_stream: - await channel.send(message) - except ( - RuntimeError, - WebSocketDisconnect, - ConnectionClosed - ): - raise WebSocketChannelClosed - except asyncio.CancelledError: - return + async for message in message_stream: + await channel.send(message) async def _process_consumer_request( @@ -103,15 +76,11 @@ async def _process_consumer_request( # Format response response = WSWhitelistMessage(data=whitelist) - # Send it back await channel.send(response.dict(exclude_none=True)) elif type == RPCRequestType.ANALYZED_DF: - limit = None - - if data: - # Limit the amount of candles per dataframe to 'limit' or 1500 - limit = max(data.get('limit', 1500), 1500) + # Limit the amount of candles per dataframe to 'limit' or 1500 + limit = min(data.get('limit', 1500), 1500) if data else None # For every pair in the generator, send a separate message for message in rpc._ws_request_analyzed_df(limit): @@ -127,17 +96,8 @@ async def message_endpoint( rpc: RPC = Depends(get_rpc), message_stream: MessageStream = Depends(get_message_stream) ): - async with WebSocketChannel(websocket).connect() as channel: - try: - logger.info(f"Channel connected - {channel}") - - channel_tasks = asyncio.gather( - channel_reader(channel, rpc), - channel_broadcaster(channel, message_stream) - ) - await channel_tasks - except WebSocketChannelClosed: - pass - finally: - logger.info(f"Channel disconnected - {channel}") - channel_tasks.cancel() + async with create_channel(websocket) as channel: + await channel.run_channel_tasks( + channel_reader(channel, rpc), + channel_broadcaster(channel, message_stream) + ) diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index f100a46ef..4a9f089d1 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -94,6 +94,7 @@ class ApiServer(RPCHandler): del ApiServer._rpc if self._server and not self._standalone: logger.info("Stopping API Server") + # self._server.force_exit, self._server.should_exit = True, True self._server.cleanup() @classmethod diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 8e248d368..d4d4d6453 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -29,6 +29,7 @@ class WebSocketChannel: # Internal event to signify a closed websocket self._closed = asyncio.Event() + self._send_timeout_high_limit = 2 # The subscribed message types self._subscriptions: List[str] = [] @@ -36,6 +37,9 @@ class WebSocketChannel: # Wrap the WebSocket in the Serializing class self._wrapped_ws = serializer_cls(self._websocket) + # The async tasks created for the channel + self._channel_tasks: List[asyncio.Task] = [] + def __repr__(self): return f"WebSocketChannel({self.channel_id}, {self.remote_addr})" @@ -51,7 +55,14 @@ class WebSocketChannel: """ Send a message on the wrapped websocket """ - await self._wrapped_ws.send(message) + + # Without this sleep, messages would send to one channel + # first then another after the first one finished. + # With the sleep call, it gives control to the event + # loop to schedule other channel send methods. + await asyncio.sleep(0) + + return await self._wrapped_ws.send(message) async def recv(self): """ @@ -77,7 +88,6 @@ class WebSocketChannel: """ self._closed.set() - self._relay_task.cancel() try: await self._websocket.close() @@ -106,23 +116,68 @@ class WebSocketChannel: """ return message_type in self._subscriptions + async def run_channel_tasks(self, *tasks, **kwargs): + """ + Create and await on the channel tasks unless an exception + was raised, then cancel them all. + + :params *tasks: All coros or tasks to be run concurrently + :param **kwargs: Any extra kwargs to pass to gather + """ + + # Wrap the coros into tasks if they aren't already + self._channel_tasks = [ + task if isinstance(task, asyncio.Task) else asyncio.create_task(task) + for task in tasks + ] + + try: + await asyncio.gather(*self._channel_tasks, **kwargs) + except Exception: + # If an exception occurred, cancel the rest of the tasks and bubble up + # the error that was caught here + await self.cancel_channel_tasks() + raise + + async def cancel_channel_tasks(self): + """ + Cancel and wait on all channel tasks + """ + for task in self._channel_tasks: + task.cancel() + + # Wait for tasks to finish cancelling + try: + await asyncio.wait(self._channel_tasks) + except asyncio.CancelledError: + pass + + self._channel_tasks = [] + async def __aiter__(self): """ Generator for received messages """ - while True: - try: - yield await self.recv() - except Exception: - break + # We can not catch any errors here as websocket.recv is + # the first to catch any disconnects and bubble it up + # so the connection is garbage collected right away + while not self.is_closed(): + yield await self.recv() - @asynccontextmanager - async def connect(self): - """ - Context manager for safely opening and closing the websocket connection - """ - try: - await self.accept() - yield self - finally: - await self.close() + +@asynccontextmanager +async def create_channel(websocket: WebSocketType, **kwargs): + """ + Context manager for safely opening and closing a WebSocketChannel + """ + channel = WebSocketChannel(websocket, **kwargs) + try: + await channel.accept() + logger.info(f"Connected to channel - {channel}") + + yield channel + except Exception: + pass + finally: + await channel.close() + logger.info(f"Disconnected from channel - {channel}") diff --git a/freqtrade/rpc/api_server/ws/message_stream.py b/freqtrade/rpc/api_server/ws/message_stream.py index f77242719..9592908ab 100644 --- a/freqtrade/rpc/api_server/ws/message_stream.py +++ b/freqtrade/rpc/api_server/ws/message_stream.py @@ -17,7 +17,8 @@ class MessageStream: async def subscribe(self): waiter = self._waiter while True: - message, waiter = await waiter + # Shield the future from being cancelled by a task waiting on it + message, waiter = await asyncio.shield(waiter) yield message __aiter__ = subscribe From d02da279f8d76bcbd4042e473a1d8d199355b266 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 19 Nov 2022 13:20:20 +0100 Subject: [PATCH 146/232] document the simplifications of the training environment --- docs/freqai-reinforcement-learning.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index b96c591de..bd2b36463 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -23,6 +23,11 @@ We envision the majority of users focusing their effort on creative design of th The framework is built on stable_baselines3 (torch) and openai gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library. +### Important considerations + +As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, ect) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. + + ## Running Reinforcement Learning Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line: From c1a73a551225424591891c8bb15491de85a79a36 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sat, 19 Nov 2022 13:21:26 -0700 Subject: [PATCH 147/232] move sleep call in send, minor cleanup --- freqtrade/rpc/api_server/ws/channel.py | 20 +++++++++----------- freqtrade/rpc/api_server/ws/serializer.py | 1 - 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index d4d4d6453..7a1191d62 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -55,14 +55,16 @@ class WebSocketChannel: """ Send a message on the wrapped websocket """ + await self._wrapped_ws.send(message) # Without this sleep, messages would send to one channel - # first then another after the first one finished. + # first then another after the first one finished and prevent + # any normal Rest API calls from processing at the same time. # With the sleep call, it gives control to the event - # loop to schedule other channel send methods. - await asyncio.sleep(0) - - return await self._wrapped_ws.send(message) + # loop to schedule other channel send methods, and helps + # throttle how fast we send. + # 0.01 = 100 messages/second max throughput + await asyncio.sleep(0.01) async def recv(self): """ @@ -132,12 +134,10 @@ class WebSocketChannel: ] try: - await asyncio.gather(*self._channel_tasks, **kwargs) + return await asyncio.gather(*self._channel_tasks, **kwargs) except Exception: - # If an exception occurred, cancel the rest of the tasks and bubble up - # the error that was caught here + # If an exception occurred, cancel the rest of the tasks await self.cancel_channel_tasks() - raise async def cancel_channel_tasks(self): """ @@ -176,8 +176,6 @@ async def create_channel(websocket: WebSocketType, **kwargs): logger.info(f"Connected to channel - {channel}") yield channel - except Exception: - pass finally: await channel.close() logger.info(f"Disconnected from channel - {channel}") diff --git a/freqtrade/rpc/api_server/ws/serializer.py b/freqtrade/rpc/api_server/ws/serializer.py index 625a0990c..9a894e1bf 100644 --- a/freqtrade/rpc/api_server/ws/serializer.py +++ b/freqtrade/rpc/api_server/ws/serializer.py @@ -31,7 +31,6 @@ class WebSocketSerializer(ABC): async def recv(self) -> bytes: data = await self._websocket.recv() - return self._deserialize(data) From 3714d7074b91b9f0219e9fbac9c3effed9b4aecd Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sat, 19 Nov 2022 13:29:23 -0700 Subject: [PATCH 148/232] smaller throttle in channel send --- freqtrade/rpc/api_server/ws/channel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 7a1191d62..80b2ec220 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -63,8 +63,8 @@ class WebSocketChannel: # With the sleep call, it gives control to the event # loop to schedule other channel send methods, and helps # throttle how fast we send. - # 0.01 = 100 messages/second max throughput - await asyncio.sleep(0.01) + # 0.005 = 200 messages/second max throughput + await asyncio.sleep(0.005) async def recv(self): """ From 60a167bdefac8ba1cdf5224aee00dfdc26145020 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sun, 20 Nov 2022 14:09:45 -0700 Subject: [PATCH 149/232] add dynamic send timeout --- freqtrade/rpc/api_server/api_ws.py | 2 +- freqtrade/rpc/api_server/ws/channel.py | 65 +++++++++++++++++++------- 2 files changed, 50 insertions(+), 17 deletions(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 2454646ea..618490ec8 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -34,7 +34,7 @@ async def channel_broadcaster(channel: WebSocketChannel, message_stream: Message Iterate over messages in the message stream and send them """ async for message in message_stream: - await channel.send(message) + await channel.send(message, timeout=True) async def _process_consumer_request( diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 80b2ec220..5424d7440 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -1,7 +1,9 @@ import asyncio import logging +import time +from collections import deque from contextlib import asynccontextmanager -from typing import Any, Dict, List, Optional, Type, Union +from typing import Any, Deque, Dict, List, Optional, Type, Union from uuid import uuid4 from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy @@ -29,7 +31,13 @@ class WebSocketChannel: # Internal event to signify a closed websocket self._closed = asyncio.Event() - self._send_timeout_high_limit = 2 + # The async tasks created for the channel + self._channel_tasks: List[asyncio.Task] = [] + + # Deque for average send times + self._send_times: Deque[float] = deque([], maxlen=10) + # High limit defaults to 3 to start + self._send_high_limit = 3 # The subscribed message types self._subscriptions: List[str] = [] @@ -37,9 +45,6 @@ class WebSocketChannel: # Wrap the WebSocket in the Serializing class self._wrapped_ws = serializer_cls(self._websocket) - # The async tasks created for the channel - self._channel_tasks: List[asyncio.Task] = [] - def __repr__(self): return f"WebSocketChannel({self.channel_id}, {self.remote_addr})" @@ -51,20 +56,48 @@ class WebSocketChannel: def remote_addr(self): return self._websocket.remote_addr - async def send(self, message: Union[WSMessageSchemaType, Dict[str, Any]]): + def _calc_send_limit(self): """ - Send a message on the wrapped websocket + Calculate the send high limit for this channel """ - await self._wrapped_ws.send(message) - # Without this sleep, messages would send to one channel - # first then another after the first one finished and prevent - # any normal Rest API calls from processing at the same time. - # With the sleep call, it gives control to the event - # loop to schedule other channel send methods, and helps - # throttle how fast we send. - # 0.005 = 200 messages/second max throughput - await asyncio.sleep(0.005) + # Only update if we have enough data + if len(self._send_times) == self._send_times.maxlen: + # At least 1s or twice the average of send times + self._send_high_limit = max( + (sum(self._send_times) / len(self._send_times)) * 2, + 1 + ) + + async def send( + self, + message: Union[WSMessageSchemaType, Dict[str, Any]], + timeout: bool = False + ): + """ + Send a message on the wrapped websocket. If the sending + takes too long, it will raise a TimeoutError and + disconnect the connection. + + :param message: The message to send + :param timeout: Enforce send high limit, defaults to False + """ + try: + _ = time.time() + # If the send times out, it will raise + # a TimeoutError and bubble up to the + # message_endpoint to close the connection + await asyncio.wait_for( + self._wrapped_ws.send(message), + timeout=self._send_high_limit if timeout else None + ) + total_time = time.time() - _ + self._send_times.append(total_time) + + self._calc_send_limit() + except asyncio.TimeoutError: + logger.info(f"Connection for {self} is too far behind, disconnecting") + raise async def recv(self): """ From 48a1f2418ffb89c148e3417f65545ec7248a6faf Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sun, 20 Nov 2022 16:18:24 -0700 Subject: [PATCH 150/232] update typing, remove unneeded try block, readd sleep --- freqtrade/rpc/api_server/ws/channel.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 5424d7440..4bd7b0e4b 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -3,7 +3,7 @@ import logging import time from collections import deque from contextlib import asynccontextmanager -from typing import Any, Deque, Dict, List, Optional, Type, Union +from typing import Any, AsyncGenerator, Deque, Dict, List, Optional, Type, Union from uuid import uuid4 from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy @@ -99,6 +99,15 @@ class WebSocketChannel: logger.info(f"Connection for {self} is too far behind, disconnecting") raise + # Without this sleep, messages would send to one channel + # first then another after the first one finished and prevent + # any normal Rest API calls from processing at the same time. + # With the sleep call, it gives control to the event + # loop to schedule other channel send methods, and helps + # throttle how fast we send. + # 0.01 = 100 messages/second max throughput + await asyncio.sleep(0.01) + async def recv(self): """ Receive a message on the wrapped websocket @@ -180,10 +189,7 @@ class WebSocketChannel: task.cancel() # Wait for tasks to finish cancelling - try: - await asyncio.wait(self._channel_tasks) - except asyncio.CancelledError: - pass + await asyncio.wait(self._channel_tasks) self._channel_tasks = [] @@ -199,7 +205,10 @@ class WebSocketChannel: @asynccontextmanager -async def create_channel(websocket: WebSocketType, **kwargs): +async def create_channel( + websocket: WebSocketType, + **kwargs +) -> AsyncGenerator[WebSocketChannel, None]: """ Context manager for safely opening and closing a WebSocketChannel """ From d2870d48ea8e7d19782f6a2c753ea622c16d36ae Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sun, 20 Nov 2022 16:24:44 -0700 Subject: [PATCH 151/232] change typing to async iterator --- freqtrade/rpc/api_server/ws/channel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 4bd7b0e4b..8699de66c 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -3,7 +3,7 @@ import logging import time from collections import deque from contextlib import asynccontextmanager -from typing import Any, AsyncGenerator, Deque, Dict, List, Optional, Type, Union +from typing import Any, AsyncIterator, Deque, Dict, List, Optional, Type, Union from uuid import uuid4 from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy @@ -208,7 +208,7 @@ class WebSocketChannel: async def create_channel( websocket: WebSocketType, **kwargs -) -> AsyncGenerator[WebSocketChannel, None]: +) -> AsyncIterator[WebSocketChannel]: """ Context manager for safely opening and closing a WebSocketChannel """ From 106ac2ab4d76ab32ac25e8b77e429d7b22779bbc Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sun, 20 Nov 2022 16:36:22 -0700 Subject: [PATCH 152/232] fix tests, change to get call --- freqtrade/rpc/external_message_consumer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index d9aed7d52..6078efd07 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -181,7 +181,7 @@ class ExternalMessageConsumer: host, port = producer['host'], producer['port'] token = producer['ws_token'] name = producer['name'] - scheme = 'wss' if producer['secure'] else 'ws' + scheme = 'wss' if producer.get('secure', False) else 'ws' ws_url = f"{scheme}://{host}:{port}/api/v1/message/ws?token={token}" # This will raise InvalidURI if the url is bad From edb817e2e6254b9156b00e89e8144ba54f1c9644 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sun, 20 Nov 2022 19:19:28 -0700 Subject: [PATCH 153/232] add tutorial for ssl in docs --- docs/rest-api.md | 169 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 169 insertions(+) diff --git a/docs/rest-api.md b/docs/rest-api.md index c7d762648..2087d9a94 100644 --- a/docs/rest-api.md +++ b/docs/rest-api.md @@ -458,3 +458,172 @@ The correct configuration for this case is `http://localhost:8080` - the main pa !!! Note We strongly recommend to also set `jwt_secret_key` to something random and known only to yourself to avoid unauthorized access to your bot. + + +### Using SSL/TLS + +SSL/TLS is used to provide security and encrypt network traffic. Freqtrade does not directly support SSL, but you can easily accomplish this with a reverse proxy such as Nginx or Traefik. Below are some steps to help you get started on setting one up for your bot. For the sake of simplicity, we will use a native installation of Nginx and certbot. + + +**Prerequisites** + +Before starting this tutorial, you will need a few things. + +- A Freqtrade bot set up and running +- A registered domain name, e.g. myftbot.com +- A DNS A record for the top level domain pointing to your server's public IP + +**Step 1: Installing Nginx and Certbot** + +Once you have all of the prerequisites, the first step is to get Nginx installed on your system. This tutorial assumes the use of Ubuntu 20.04, though you can find your linux distro's package commands via a search engine. First, update your local package index so that you have access to the most recent package listings then install Nginx: + +``` bash +> sudo apt update +> sudo apt install nginx +``` + +After accepting the installation, Nginx and any dependencies will be installed to your system and automatically started. You can check it is running with `systemd`: + +``` bash +> sudo systemctl status nginx + +● nginx.service - A high performance web server and a reverse proxy server + Loaded: loaded (/lib/systemd/system/nginx.service; enabled; vendor preset: enabled) + Active: active (running) since Wed 2022-11-16 12:09:27 MST; 4 days ago + Docs: man:nginx(8) + Process: 1026 ExecStartPre=/usr/sbin/nginx -t -q -g daemon on; master_process on; (code=exited, status=0/SUCCESS) + Process: 1106 ExecStart=/usr/sbin/nginx -g daemon on; master_process on; (code=exited, status=0/SUCCESS) + Main PID: 1107 (nginx) + Tasks: 5 (limit: 6929) + Memory: 5.7M + CGroup: /system.slice/nginx.service + ├─1107 nginx: master process /usr/sbin/nginx -g daemon on; master_process on; + ├─1108 nginx: worker process + ├─1109 nginx: worker process + ├─1110 nginx: worker process + └─1111 nginx: worker process +``` + +Next you need to install certbot which will handle all of the certificate automation for your web server and domain. To install certbot it is required to have `snapd` on Ubuntu. If you haven't installed it yet, please review the instructions on [snapcraft's site for installation](https://snapcraft.io/docs/installing-snapd/). + +Once you are good to go, ensure your snapd version is up to date: + +``` bash +> sudo snap install core; sudo snap refresh core +``` + +If you have any Certbot packages installed via your package manager, you should remove them before installing Certbot: + +``` bash +> sudo apt remove certbot +``` + +Finally, install Certbot and prepare the Certbot command. + +``` bash +> sudo snap install --classic certbot +> sudo ln -s /snap/bin/certbot /usr/bin/certbot +``` + +**Step 2: Adjust the firewall** + +The next step is to allow HTTP and HTTPS traffic through your firewall. This is different for each depending on which firewall you use, and how you have it configured. In this example, we are using `ufw`. + +We'll start by enabling `ufw` if it isn't already: + +``` bash +> sudo ufw enable +``` + +You can list the application configurations that ufw knows how to work with + +``` bash +> sudo ufw app list + +Available applications: + CUPS + Nginx Full + Nginx HTTP + Nginx HTTPS +``` + +As you can see in the output, there are 3 profiles available for Nginx: + +- **Nginx Full**: This profile opens both port 80 (normal web traffic) and port 443 (SSL/TLS traffic) +- **Nginx HTTP**: This profile only opens port 80 (normal web traffic) +- **Nginx HTTPS**: This profile only opens port 443 (SSL/TLS traffic) + +We will configure the firewall to allow both port 80 and 443: + +``` bash +> sudo ufw allow 'Nginx Full' +``` + +You can verify the change by typing: + +``` bash +> sudo ufw status + +Status: active + +To Action From +-- ------ ---- +Nginx HTTPS ALLOW Anywhere +Nginx Full ALLOW Anywhere +Nginx HTTPS (v6) ALLOW Anywhere (v6) +Nginx Full (v6) ALLOW Anywhere (v6) +``` + +**Step 3: Configuring Nginx** + +Using your favorite editor, edit the default nginx configuration. In our case, it'll be under `/etc/nginx/conf.d/default.conf`: +``` bash +> sudo vim /etc/nginx/conf.d/default.conf +``` + +Add a section to your configuration like this: + +``` +server { + server_name myftbot.com; + location / { + proxy_pass http://localhost:8080; + } +} +``` + +Make sure to change `localhost` and `8080` to what you have set in your `api_server` configuration for your bot. + +Verify your nginx config file syntax and make sure there are no errors: +``` bash +> sudo nginx -t +``` + +Finally you can reload nginx to get the new configuration changes: + +``` bash +> sudo systemctl reload nginx +``` + +!!! Note + The `reload` command forces Nginx to read the new configuration without interrupting any connections. The `restart` command restarts the whole nginx service. + +**Step 4: Getting the certificates** + +Certbot already comes with an easy way to setup Nginx with SSL/TLS th automatically changes your configuration file with the required fields: + +``` bash +> sudo certbot --nginx +``` + +You will be prompted for some information such as your email (To receive updates about your certificates), the domain you pointed to the server, and agree to the TOS and optional newsletter. You can also set to redirect HTTP traffic to HTTPS, removing HTTP access. + +You can now test your SSL setup by using curl to make a request to your bot's Rest API: + +``` bash +> curl https://myftbot.com/api/v1/ping + +{'status': 'pong'} +``` + +If you see a pong response then everything is working and you have successfully set up SSL/TLS termination for your bot. From d9d7df70bfcbc2094ed51518438b238254d193f6 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Mon, 21 Nov 2022 12:21:40 -0700 Subject: [PATCH 154/232] fix tests, log unknown errors --- freqtrade/rpc/api_server/webserver.py | 1 - freqtrade/rpc/api_server/ws/channel.py | 14 ++++++++++- tests/rpc/test_rpc_apiserver.py | 34 ++++++++++++-------------- 3 files changed, 29 insertions(+), 20 deletions(-) diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index 4a9f089d1..e4eb3895d 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -212,7 +212,6 @@ class ApiServer(RPCHandler): if self._standalone: self._server.run() else: - # self.start_message_queue() self._server.run_in_thread() except Exception: logger.exception("Api server failed to start.") diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 8699de66c..9dea21f3b 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -6,6 +6,9 @@ from contextlib import asynccontextmanager from typing import Any, AsyncIterator, Deque, Dict, List, Optional, Type, Union from uuid import uuid4 +from fastapi import WebSocketDisconnect +from websockets.exceptions import ConnectionClosed + from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy from freqtrade.rpc.api_server.ws.serializer import (HybridJSONWebSocketSerializer, WebSocketSerializer) @@ -189,7 +192,16 @@ class WebSocketChannel: task.cancel() # Wait for tasks to finish cancelling - await asyncio.wait(self._channel_tasks) + try: + await task + except ( + asyncio.CancelledError, + WebSocketDisconnect, + ConnectionClosed + ): + pass + except Exception as e: + logger.info(f"Encountered unknown exception: {e}", exc_info=e) self._channel_tasks = [] diff --git a/tests/rpc/test_rpc_apiserver.py b/tests/rpc/test_rpc_apiserver.py index 969728b6f..25d6a32e3 100644 --- a/tests/rpc/test_rpc_apiserver.py +++ b/tests/rpc/test_rpc_apiserver.py @@ -57,7 +57,10 @@ def botclient(default_conf, mocker): try: apiserver = ApiServer(default_conf) apiserver.add_rpc_handler(rpc) - yield ftbot, TestClient(apiserver.app) + # We need to use the TestClient as a context manager to + # handle lifespan events correctly + with TestClient(apiserver.app) as client: + yield ftbot, client # Cleanup ... ? finally: if apiserver: @@ -438,7 +441,6 @@ def test_api_cleanup(default_conf, mocker, caplog): apiserver.cleanup() assert apiserver._server.cleanup.call_count == 1 assert log_has("Stopping API Server", caplog) - assert log_has("Stopping API Server background tasks", caplog) ApiServer.shutdown() @@ -1714,12 +1716,14 @@ def test_api_ws_subscribe(botclient, mocker): with client.websocket_connect(ws_url) as ws: ws.send_json({'type': 'subscribe', 'data': ['whitelist']}) + time.sleep(1) # Check call count is now 1 as we sent a valid subscribe request assert sub_mock.call_count == 1 with client.websocket_connect(ws_url) as ws: ws.send_json({'type': 'subscribe', 'data': 'whitelist'}) + time.sleep(1) # Call count hasn't changed as the subscribe request was invalid assert sub_mock.call_count == 1 @@ -1773,24 +1777,18 @@ def test_api_ws_send_msg(default_conf, mocker, caplog): mocker.patch('freqtrade.rpc.api_server.ApiServer.start_api') apiserver = ApiServer(default_conf) apiserver.add_rpc_handler(RPC(get_patched_freqtradebot(mocker, default_conf))) - apiserver.start_message_queue() - # Give the queue thread time to start - time.sleep(0.2) - # Test message_queue coro receives the message - test_message = {"type": "status", "data": "test"} - apiserver.send_msg(test_message) - time.sleep(0.1) # Not sure how else to wait for the coro to receive the data - assert log_has("Found message of type: status", caplog) + # Start test client context manager to run lifespan events + with TestClient(apiserver.app): + # Test message is published on the Message Stream + test_message = {"type": "status", "data": "test"} + first_waiter = apiserver._message_stream._waiter + apiserver.send_msg(test_message) + assert first_waiter.result()[0] == test_message - # Test if exception logged when error occurs in sending - mocker.patch('freqtrade.rpc.api_server.ws.channel.ChannelManager.broadcast', - side_effect=Exception) - - apiserver.send_msg(test_message) - time.sleep(0.1) # Not sure how else to wait for the coro to receive the data - assert log_has_re(r"Exception happened in background task.*", caplog) + second_waiter = apiserver._message_stream._waiter + apiserver.send_msg(test_message) + assert first_waiter != second_waiter finally: - apiserver.cleanup() ApiServer.shutdown() From 86ff711525ba13cd88673686f90eed994e18e8b9 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Mon, 21 Nov 2022 12:52:18 -0700 Subject: [PATCH 155/232] update docs on reverse proxy --- docs/rest-api.md | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/docs/rest-api.md b/docs/rest-api.md index 2087d9a94..beb9ac194 100644 --- a/docs/rest-api.md +++ b/docs/rest-api.md @@ -389,6 +389,23 @@ Now anytime those types of RPC messages are sent in the bot, you will receive th } ``` +#### Reverse Proxy and Websockets + +There are some quirks when using a reverse proxy with the message websocket endpoint. The message websocket endpoint keeps a long-running connection open between the Rest API and the client. It's built on top of HTTP and uses the HTTP Upgrade mechanism to change from HTTP to WebSockets during connection. There are some challenges that a reverse proxy faces when supporting WebSockets, such as WebSockets are a hop-by-hop protocol, so when a proxy intercepts an Upgrade request from the client it needs to send it's own Upgrade request to the server, including appropriate headers. Also, since these connections are long lived, the proxy needs to allow these connections to remain open. + +When using Nginx, the following configuration is required for WebSockets to work: +``` +proxy_http_version 1.1; +proxy_set_header Upgrade $http_upgrade; +proxy_set_header Connection $connection_upgrade; +proxy_set_header Host $host; +``` + +To configure your reverse proxy, see it's documentation for proxying websockets. + +- **Traefik**: Traefik supports websockets out of the box, see the [documentation](https://doc.traefik.io/traefik/) +- **Caddy**: Caddy v2 supports websockets out of the box, see the [documentation](https://caddyserver.com/docs/v2-upgrade#proxy) + ### OpenAPI interface To enable the builtin openAPI interface (Swagger UI), specify `"enable_openapi": true` in the api_server configuration. @@ -459,7 +476,7 @@ The correct configuration for this case is `http://localhost:8080` - the main pa !!! Note We strongly recommend to also set `jwt_secret_key` to something random and known only to yourself to avoid unauthorized access to your bot. - + From fff745fd83e63c5816801a000ac01ec279b6038e Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 22 Nov 2022 07:17:57 -0700 Subject: [PATCH 156/232] add map to nginx config --- docs/rest-api.md | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/docs/rest-api.md b/docs/rest-api.md index beb9ac194..c7c41d571 100644 --- a/docs/rest-api.md +++ b/docs/rest-api.md @@ -393,12 +393,27 @@ Now anytime those types of RPC messages are sent in the bot, you will receive th There are some quirks when using a reverse proxy with the message websocket endpoint. The message websocket endpoint keeps a long-running connection open between the Rest API and the client. It's built on top of HTTP and uses the HTTP Upgrade mechanism to change from HTTP to WebSockets during connection. There are some challenges that a reverse proxy faces when supporting WebSockets, such as WebSockets are a hop-by-hop protocol, so when a proxy intercepts an Upgrade request from the client it needs to send it's own Upgrade request to the server, including appropriate headers. Also, since these connections are long lived, the proxy needs to allow these connections to remain open. -When using Nginx, the following configuration is required for WebSockets to work: +When using Nginx, the following configuration is required for WebSockets to work (Note this configuration isn't complete, it's missing some information and can not be used as is): ``` -proxy_http_version 1.1; -proxy_set_header Upgrade $http_upgrade; -proxy_set_header Connection $connection_upgrade; -proxy_set_header Host $host; +http { + map $http_upgrade $connection_upgrade { + default upgrade; + '' close; + } + + ... + + server { + ... + + location / { + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Host $host; + } + } +} ``` To configure your reverse proxy, see it's documentation for proxying websockets. From a5442772fc22138dc18fcd3c99c2727f1e9007dd Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 22 Nov 2022 09:42:09 -0700 Subject: [PATCH 157/232] ensure only broadcasting to subscribed topics --- freqtrade/rpc/api_server/api_ws.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 618490ec8..fe2968c05 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -34,7 +34,8 @@ async def channel_broadcaster(channel: WebSocketChannel, message_stream: Message Iterate over messages in the message stream and send them """ async for message in message_stream: - await channel.send(message, timeout=True) + if channel.subscribed_to(message.get('type')): + await channel.send(message, timeout=True) async def _process_consumer_request( From bd05f85c26bc65228af6b3942bbd1b2f3955eb3e Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 22 Nov 2022 18:11:18 +0100 Subject: [PATCH 158/232] Simplify ssl documentation --- docs/rest-api.md | 187 +++-------------------------------------------- 1 file changed, 12 insertions(+), 175 deletions(-) diff --git a/docs/rest-api.md b/docs/rest-api.md index c7c41d571..62ad586dd 100644 --- a/docs/rest-api.md +++ b/docs/rest-api.md @@ -389,11 +389,12 @@ Now anytime those types of RPC messages are sent in the bot, you will receive th } ``` -#### Reverse Proxy and Websockets +#### Reverse Proxy setup -There are some quirks when using a reverse proxy with the message websocket endpoint. The message websocket endpoint keeps a long-running connection open between the Rest API and the client. It's built on top of HTTP and uses the HTTP Upgrade mechanism to change from HTTP to WebSockets during connection. There are some challenges that a reverse proxy faces when supporting WebSockets, such as WebSockets are a hop-by-hop protocol, so when a proxy intercepts an Upgrade request from the client it needs to send it's own Upgrade request to the server, including appropriate headers. Also, since these connections are long lived, the proxy needs to allow these connections to remain open. +When using [Nginx](https://nginx.org/en/docs/), the following configuration is required for WebSockets to work (Note this configuration is incomplete, it's missing some information and can not be used as is): + +Please make sure to replace `` (and the subsequent port) with the IP and Port matching your configuration/setup. -When using Nginx, the following configuration is required for WebSockets to work (Note this configuration isn't complete, it's missing some information and can not be used as is): ``` http { map $http_upgrade $connection_upgrade { @@ -401,13 +402,14 @@ http { '' close; } - ... + #... server { - ... + #... location / { proxy_http_version 1.1; + proxy_pass http://:8080; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $connection_upgrade; proxy_set_header Host $host; @@ -416,11 +418,15 @@ http { } ``` -To configure your reverse proxy, see it's documentation for proxying websockets. +To properly configure your reverse proxy (securely), please consult it's documentation for proxying websockets. - **Traefik**: Traefik supports websockets out of the box, see the [documentation](https://doc.traefik.io/traefik/) - **Caddy**: Caddy v2 supports websockets out of the box, see the [documentation](https://caddyserver.com/docs/v2-upgrade#proxy) +!!! Tip "SSL certificates" + You can use tools like certbot to setup ssl certificates to access your bot's UI through encrypted connection by using any fo the above reverse proxies. + While this will protect your data in transit, we do not recommend to run the freqtrade API outside of your private network (VPN, SSH tunnel). + ### OpenAPI interface To enable the builtin openAPI interface (Swagger UI), specify `"enable_openapi": true` in the api_server configuration. @@ -490,172 +496,3 @@ The correct configuration for this case is `http://localhost:8080` - the main pa !!! Note We strongly recommend to also set `jwt_secret_key` to something random and known only to yourself to avoid unauthorized access to your bot. - - From 48242ca02b0f819d0d0318e89ad2b1804017b076 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 22 Nov 2022 12:43:45 -0700 Subject: [PATCH 159/232] update catch block in cancel channel tasks --- freqtrade/rpc/api_server/ws/channel.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 9dea21f3b..ad183ce5b 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -99,7 +99,7 @@ class WebSocketChannel: self._calc_send_limit() except asyncio.TimeoutError: - logger.info(f"Connection for {self} is too far behind, disconnecting") + logger.info(f"Connection for {self} timed out, disconnecting") raise # Without this sleep, messages would send to one channel @@ -138,7 +138,7 @@ class WebSocketChannel: try: await self._websocket.close() - except Exception: + except RuntimeError: pass def is_closed(self) -> bool: @@ -196,8 +196,10 @@ class WebSocketChannel: await task except ( asyncio.CancelledError, + asyncio.TimeoutError, WebSocketDisconnect, - ConnectionClosed + ConnectionClosed, + RuntimeError ): pass except Exception as e: From 335de760edd40b0d013698fca75724c1eb1a1f9f Mon Sep 17 00:00:00 2001 From: Emre Date: Wed, 23 Nov 2022 18:34:50 +0300 Subject: [PATCH 160/232] Enable --use-pep517 flag for freqai --- setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.sh b/setup.sh index 1a4a285a3..fceab0074 100755 --- a/setup.sh +++ b/setup.sh @@ -82,7 +82,7 @@ function updateenv() { dev=$REPLY if [[ $REPLY =~ ^[Yy]$ ]] then - REQUIREMENTS_FREQAI="-r requirements-freqai.txt" + REQUIREMENTS_FREQAI="-r requirements-freqai.txt --use-pep517" fi ${PYTHON} -m pip install --upgrade -r ${REQUIREMENTS} ${REQUIREMENTS_HYPEROPT} ${REQUIREMENTS_PLOT} ${REQUIREMENTS_FREQAI} From c963fd720b01e111f46e1c7fb136bcb9e5621f75 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 23 Nov 2022 18:17:10 +0100 Subject: [PATCH 161/232] Slightly change test setup for dry_run_order_fill --- tests/exchange/test_exchange.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/exchange/test_exchange.py b/tests/exchange/test_exchange.py index a719496e5..e61ad8532 100644 --- a/tests/exchange/test_exchange.py +++ b/tests/exchange/test_exchange.py @@ -1207,12 +1207,17 @@ def test_create_dry_run_order_fees( assert order1['fee']['rate'] == fee -@pytest.mark.parametrize("side,startprice,endprice", [ - ("buy", 25.563, 25.566), - ("sell", 25.566, 25.563) +@pytest.mark.parametrize("side,price,filled", [ + # order_book_l2_usd spread: + # best ask: 25.566 + # best bid: 25.563 + ("buy", 25.563, False), + ("buy", 25.566, True), + ("sell", 25.566, False), + ("sell", 25.563, True), ]) @pytest.mark.parametrize("exchange_name", EXCHANGES) -def test_create_dry_run_order_limit_fill(default_conf, mocker, side, startprice, endprice, +def test_create_dry_run_order_limit_fill(default_conf, mocker, side, price, filled, exchange_name, order_book_l2_usd): default_conf['dry_run'] = True exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) @@ -1226,7 +1231,7 @@ def test_create_dry_run_order_limit_fill(default_conf, mocker, side, startprice, ordertype='limit', side=side, amount=1, - rate=startprice, + rate=price, leverage=1.0 ) assert order_book_l2_usd.call_count == 1 @@ -1235,22 +1240,17 @@ def test_create_dry_run_order_limit_fill(default_conf, mocker, side, startprice, assert order["side"] == side assert order["type"] == "limit" assert order["symbol"] == "LTC/USDT" + assert order["average"] == price + assert order['status'] == 'open' if not filled else 'closed' order_book_l2_usd.reset_mock() + # fetch order again... order_closed = exchange.fetch_dry_run_order(order['id']) - assert order_book_l2_usd.call_count == 1 - assert order_closed['status'] == 'open' - assert not order['fee'] - assert order_closed['filled'] == 0 + assert order_book_l2_usd.call_count == (1 if not filled else 0) + assert order_closed['status'] == ('open' if not filled else 'closed') + assert order_closed['filled'] == (0 if not filled else 1) order_book_l2_usd.reset_mock() - order_closed['price'] = endprice - - order_closed = exchange.fetch_dry_run_order(order['id']) - assert order_closed['status'] == 'closed' - assert order['fee'] - assert order_closed['filled'] == 1 - assert order_closed['filled'] == order_closed['amount'] # Empty orderbook test mocker.patch('freqtrade.exchange.Exchange.fetch_l2_order_book', From 3d26659d5ef520a6320532e767608cbdfbc1563c Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 23 Nov 2022 20:09:55 +0100 Subject: [PATCH 162/232] Fix some doc typos --- docs/freqai-reinforcement-learning.md | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index bd2b36463..45f29c6ea 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -1,16 +1,15 @@ # Reinforcement Learning -!!! Note - Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?" Users who prefer docker should ensure they use the docker image appended with `_freqaiRL`. - +!!! Note "Installation size" + Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?" Users who prefer docker should ensure they use the docker image appended with `_freqaiRL`. ## Background and terminology ### What is RL and why does FreqAI need it? -Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-the-reward)). The reward is used to train weights in a neural network. +Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-the-reward)). The reward is used to train weights in a neural network. -A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployements.* +A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployments.* Reinforcement learning is a natural progression for FreqAI, since it adds a new layer of adaptivity and market reactivity that Classifiers and Regressors cannot match. However, Classifiers and Regressors have strengths that RL does not have such as robust predictions. Improperly trained RL agents may find "cheats" and "tricks" to maximize reward without actually winning any trades. For this reason, RL is more complex and demands a higher level of understanding than typical Classifiers and Regressors. @@ -18,16 +17,14 @@ Reinforcement learning is a natural progression for FreqAI, since it adds a new With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-the-reward). -We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configruation settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. - -The framework is built on stable_baselines3 (torch) and openai gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library. +We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. +The framework is built on stable_baselines3 (torch) and OpenAI gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library. ### Important considerations As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, ect) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. - ## Running Reinforcement Learning Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line: @@ -87,7 +84,7 @@ where `ReinforcementLearner` will use the templated `ReinforcementLearner` from return df ``` -Most of the function remains the same as for typical Regressors, however, the function above shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environent: +Most of the function remains the same as for typical Regressors, however, the function above shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environment: ```python # The following features are necessary for RL models @@ -154,7 +151,7 @@ In order to configure the `Reinforcement Learner` the following dictionary must } ``` -Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environment to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link). +Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environment to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link). !!! Note Remember that the general `model_training_parameters` dictionary should contain all the model hyperparameter customizations for the particular `model_type`. For example, `PPO` parameters can be found [here](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html). @@ -220,15 +217,14 @@ where `unique-id` is the `identifier` set in the `freqai` configuration file. Th ![tensorboard](assets/tensorboard.jpg) - ### Choosing a base environment -FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 4 or 5 actions. In the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Meanwhile, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include: +FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 4 or 5 actions. In the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Meanwhile, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include: * the actions available in the `calculate_reward` * the actions consumed by the user strategy -Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-the-reward)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. +Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-the-reward)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. !!! Note FreqAI does not provide by default, a long-only training environment. However, creating one should be as simple as copy-pasting one of the built in environments and removing the `short` actions (and all associated references to those). From e5fc21f577285f0f168467b4b5ea27765d656313 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 23 Nov 2022 20:59:45 +0100 Subject: [PATCH 163/232] Fix broken table rendering --- docs/freqai-parameter-table.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index be306fd71..9e16aec8f 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -48,7 +48,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `n_estimators` | The number of boosted trees to fit in the training of the model.
**Datatype:** Integer. | `learning_rate` | Boosting learning rate during training of the model.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. -| | *Reinforcement Learning Parameters** +| | **Reinforcement Learning Parameters** | `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. | `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. | `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process.
**Datatype:** int. From 8f1a8c752bdf3dc91f415b4d27931e87a0e6611d Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 24 Nov 2022 07:00:12 +0100 Subject: [PATCH 164/232] Add freqairl docker build process --- build_helpers/publish_docker_arm64.sh | 8 ++++++++ build_helpers/publish_docker_multi.sh | 4 ++++ docker/Dockerfile.freqai_rl | 8 ++++++++ docs/freqai-reinforcement-learning.md | 3 ++- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 5 ++--- 5 files changed, 24 insertions(+), 4 deletions(-) create mode 100644 docker/Dockerfile.freqai_rl diff --git a/build_helpers/publish_docker_arm64.sh b/build_helpers/publish_docker_arm64.sh index 4c66f4483..071eb0fa2 100755 --- a/build_helpers/publish_docker_arm64.sh +++ b/build_helpers/publish_docker_arm64.sh @@ -7,11 +7,13 @@ export DOCKER_BUILDKIT=1 TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g") TAG_PLOT=${TAG}_plot TAG_FREQAI=${TAG}_freqai +TAG_FREQAI_RL=${TAG_FREQAI}rl TAG_PI="${TAG}_pi" TAG_ARM=${TAG}_arm TAG_PLOT_ARM=${TAG_PLOT}_arm TAG_FREQAI_ARM=${TAG_FREQAI}_arm +TAG_FREQAI_RL_ARM=${TAG_FREQAI_RL}_arm CACHE_IMAGE=freqtradeorg/freqtrade_cache echo "Running for ${TAG}" @@ -41,9 +43,11 @@ docker tag freqtrade:$TAG_ARM ${CACHE_IMAGE}:$TAG_ARM docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_PLOT_ARM} -f docker/Dockerfile.plot . docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_FREQAI_ARM} -f docker/Dockerfile.freqai . +docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_FREQAI_RL_ARM} -f docker/Dockerfile.freqai_rl . docker tag freqtrade:$TAG_PLOT_ARM ${CACHE_IMAGE}:$TAG_PLOT_ARM docker tag freqtrade:$TAG_FREQAI_ARM ${CACHE_IMAGE}:$TAG_FREQAI_ARM +docker tag freqtrade:$TAG_FREQAI_RL_ARM ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM # Run backtest docker run --rm -v $(pwd)/config_examples/config_bittrex.example.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG_ARM} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3 @@ -58,6 +62,7 @@ docker images # docker push ${IMAGE_NAME} docker push ${CACHE_IMAGE}:$TAG_PLOT_ARM docker push ${CACHE_IMAGE}:$TAG_FREQAI_ARM +docker push ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM docker push ${CACHE_IMAGE}:$TAG_ARM # Create multi-arch image @@ -74,6 +79,9 @@ docker manifest push -p ${IMAGE_NAME}:${TAG_PLOT} docker manifest create ${IMAGE_NAME}:${TAG_FREQAI} ${CACHE_IMAGE}:${TAG_FREQAI_ARM} ${CACHE_IMAGE}:${TAG_FREQAI} docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI} +docker manifest create ${IMAGE_NAME}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL_ARM} ${CACHE_IMAGE}:${TAG_FREQAI_RL} +docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI_RL} + # Tag as latest for develop builds if [ "${TAG}" = "develop" ]; then docker manifest create ${IMAGE_NAME}:latest ${CACHE_IMAGE}:${TAG_ARM} ${IMAGE_NAME}:${TAG_PI} ${CACHE_IMAGE}:${TAG} diff --git a/build_helpers/publish_docker_multi.sh b/build_helpers/publish_docker_multi.sh index c13732003..a608c1282 100755 --- a/build_helpers/publish_docker_multi.sh +++ b/build_helpers/publish_docker_multi.sh @@ -6,6 +6,7 @@ TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g") TAG_PLOT=${TAG}_plot TAG_FREQAI=${TAG}_freqai +TAG_FREQAI_RL=${TAG_FREQAI}rl TAG_PI="${TAG}_pi" PI_PLATFORM="linux/arm/v7" @@ -51,9 +52,11 @@ docker tag freqtrade:$TAG ${CACHE_IMAGE}:$TAG docker build --cache-from freqtrade:${TAG} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG} -t freqtrade:${TAG_PLOT} -f docker/Dockerfile.plot . docker build --cache-from freqtrade:${TAG} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG} -t freqtrade:${TAG_FREQAI} -f docker/Dockerfile.freqai . +docker build --cache-from freqtrade:${TAG_FREQAI} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_FREQAI} -t freqtrade:${TAG_FREQAI_RL} -f docker/Dockerfile.freqai_rl . docker tag freqtrade:$TAG_PLOT ${CACHE_IMAGE}:$TAG_PLOT docker tag freqtrade:$TAG_FREQAI ${CACHE_IMAGE}:$TAG_FREQAI +docker tag freqtrade:$TAG_FREQAI_RL ${CACHE_IMAGE}:$TAG_FREQAI_RL # Run backtest docker run --rm -v $(pwd)/config_examples/config_bittrex.example.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3 @@ -68,6 +71,7 @@ docker images docker push ${CACHE_IMAGE} docker push ${CACHE_IMAGE}:$TAG_PLOT docker push ${CACHE_IMAGE}:$TAG_FREQAI +docker push ${CACHE_IMAGE}:$TAG_FREQAI_RL docker push ${CACHE_IMAGE}:$TAG diff --git a/docker/Dockerfile.freqai_rl b/docker/Dockerfile.freqai_rl new file mode 100644 index 000000000..18fb9afa2 --- /dev/null +++ b/docker/Dockerfile.freqai_rl @@ -0,0 +1,8 @@ +ARG sourceimage=freqtradeorg/freqtrade +ARG sourcetag=develop_freqai +FROM ${sourceimage}:${sourcetag} + +# Install dependencies +COPY requirements-freqai.txt requirements-freqai-rl.txt /freqtrade/ + +RUN pip install -r requirements-freqai-rl.txt --user --no-cache-dir diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 45f29c6ea..0e4388cf1 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -1,7 +1,8 @@ # Reinforcement Learning !!! Note "Installation size" - Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?" Users who prefer docker should ensure they use the docker image appended with `_freqaiRL`. + Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?". + Users who prefer docker should ensure they use the docker image appended with `_freqairl`. ## Background and terminology diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 629633814..16cab4c7d 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -133,8 +133,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): :param data_dictionary: dict = common data dictionary containing train and test features/labels/weights. :param prices_train/test: DataFrame = dataframe comprised of the prices to be used in the - environment during training - or testing + environment during training or testing :param dk: FreqaiDataKitchen = the datakitchen for the current pair """ train_df = data_dictionary["train_features"] @@ -201,7 +200,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: """ Filter the prediction features data and predict with it. - :param: unfiltered_dataframe: Full dataframe for the current backtest period. + :param unfiltered_dataframe: Full dataframe for the current backtest period. :return: :pred_df: dataframe containing the predictions :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove From 44b042ba51aa8827dabd07fe296d3a893c71a421 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 24 Nov 2022 17:53:26 +0100 Subject: [PATCH 165/232] remove unused function --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 0d101ee9c..0d7672b2f 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -18,12 +18,6 @@ class Actions(Enum): Short_exit = 4 -def mean_over_std(x): - std = np.std(x, ddof=1) - mean = np.mean(x) - return mean / std if std > 0 else 0 - - class Base5ActionRLEnv(BaseEnvironment): """ Base class for a 5 action environment From 8855e36f577ba6d2769da545b97709bfc8ef95e2 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 24 Nov 2022 18:16:33 +0100 Subject: [PATCH 166/232] reduce freqai testing time by reducing retrain frequency and number of features --- tests/freqai/conftest.py | 3 +- tests/freqai/test_freqai_interface.py | 18 ++++---- .../ReinforcementLearner_test_4ac.py | 42 +------------------ tests/strategy/strats/freqai_rl_test_strat.py | 19 --------- 4 files changed, 12 insertions(+), 70 deletions(-) diff --git a/tests/freqai/conftest.py b/tests/freqai/conftest.py index 00efad3a7..bee7df27e 100644 --- a/tests/freqai/conftest.py +++ b/tests/freqai/conftest.py @@ -27,10 +27,9 @@ def freqai_conf(default_conf, tmpdir): "timerange": "20180110-20180115", "freqai": { "enabled": True, - "startup_candles": 10000, "purge_old_models": True, "train_period_days": 2, - "backtest_period_days": 2, + "backtest_period_days": 10, "live_retrain_hours": 0, "expiration_hours": 1, "identifier": "uniqe-id100", diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index b379d05d7..335cce519 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -192,13 +192,13 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): @pytest.mark.parametrize( "model, num_files, strat", [ - ("LightGBMRegressor", 6, "freqai_test_strat"), - ("XGBoostRegressor", 6, "freqai_test_strat"), - ("CatboostRegressor", 6, "freqai_test_strat"), - ("ReinforcementLearner", 7, "freqai_rl_test_strat"), - ("XGBoostClassifier", 6, "freqai_test_classifier"), - ("LightGBMClassifier", 6, "freqai_test_classifier"), - ("CatboostClassifier", 6, "freqai_test_classifier") + ("LightGBMRegressor", 2, "freqai_test_strat"), + ("XGBoostRegressor", 2, "freqai_test_strat"), + ("CatboostRegressor", 2, "freqai_test_strat"), + ("ReinforcementLearner", 3, "freqai_rl_test_strat"), + ("XGBoostClassifier", 2, "freqai_test_classifier"), + ("LightGBMClassifier", 2, "freqai_test_classifier"), + ("CatboostClassifier", 2, "freqai_test_classifier") ], ) def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog): @@ -305,7 +305,7 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): freqai.start_backtesting(df, metadata, freqai.dk) model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] - assert len(model_folders) == 6 + assert len(model_folders) == 2 # without deleting the existing folder structure, re-run @@ -333,7 +333,7 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): path = (freqai.dd.full_path / freqai.dk.backtest_predictions_folder) prediction_files = [x for x in path.iterdir() if x.is_file()] - assert len(prediction_files) == 5 + assert len(prediction_files) == 1 shutil.rmtree(Path(freqai.dk.full_path)) diff --git a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py index 13e5af02f..1f40d86d1 100644 --- a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py +++ b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py @@ -1,57 +1,19 @@ import logging -from pathlib import Path -from typing import Any, Dict import numpy as np -import torch as th -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.RL.Base4ActionRLEnv import Actions, Base4ActionRLEnv, Positions -from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel +from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner logger = logging.getLogger(__name__) -class ReinforcementLearner_test_4ac(BaseReinforcementLearningModel): +class ReinforcementLearner_test_4ac(ReinforcementLearner): """ User created Reinforcement Learning Model prediction model. """ - def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): - - train_df = data_dictionary["train_features"] - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=[64, 64]) - - if dk.pair not in self.dd.model_dictionary or not self.continual_learning: - model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path( - dk.full_path / "tensorboard" / dk.pair.split('/')[0]), - **self.freqai_info['model_training_parameters'] - ) - else: - logger.info('Continual training activated - starting training from previously ' - 'trained agent.') - model = self.dd.model_dictionary[dk.pair] - model.set_env(self.train_env) - - model.learn( - total_timesteps=int(total_timesteps), - callback=self.eval_callback - ) - - if Path(dk.data_path / "best_model.zip").is_file(): - logger.info('Callback found a best model.') - best_model = self.MODELCLASS.load(dk.data_path / "best_model") - return best_model - - logger.info('Couldnt find best model, using final model instead.') - - return model - class MyRLEnv(Base4ActionRLEnv): """ User can override any function in BaseRLEnv and gym.Env. Here the user diff --git a/tests/strategy/strats/freqai_rl_test_strat.py b/tests/strategy/strats/freqai_rl_test_strat.py index 8d507a6da..f32a4adca 100644 --- a/tests/strategy/strats/freqai_rl_test_strat.py +++ b/tests/strategy/strats/freqai_rl_test_strat.py @@ -19,19 +19,6 @@ class freqai_rl_test_strat(IStrategy): minimal_roi = {"0": 0.1, "240": -1} - plot_config = { - "main_plot": {}, - "subplots": { - "prediction": {"prediction": {"color": "blue"}}, - "target_roi": { - "target_roi": {"color": "brown"}, - }, - "do_predict": { - "do_predict": {"color": "brown"}, - }, - }, - } - process_only_new_candles = True stoploss = -0.05 use_exit_signal = True @@ -50,10 +37,7 @@ class freqai_rl_test_strat(IStrategy): t = int(t) informative[f"%-{pair}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) - informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, window=t) - # FIXME: add these outside the user strategy? # The following columns are necessary for RL models. informative[f"%-{pair}raw_close"] = informative["close"] informative[f"%-{pair}raw_open"] = informative["open"] @@ -79,9 +63,6 @@ class freqai_rl_test_strat(IStrategy): # function to populate indicators during training). Notice how we ensure not to # add them multiple times if set_generalized_indicators: - df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7 - df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25 - # For RL, there are no direct targets to set. This is filler (neutral) # until the agent sends an action. df["&-action"] = 0 From 3a07749fcc47570259649c1107bec0e2a0bab407 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 24 Nov 2022 18:46:54 +0100 Subject: [PATCH 167/232] fix docstring --- .../freqai/RL/BaseReinforcementLearningModel.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 16cab4c7d..bddac23b3 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -166,14 +166,14 @@ class BaseReinforcementLearningModel(IFreqaiModel): def get_state_info(self, pair: str) -> Tuple[float, float, int]: """ - State info during dry/live/backtesting which is fed back + State info during dry/live (not backtesting) which is fed back into the model. - :param: - pair: str = COIN/STAKE to get the environment information for - :returns: - market_side: float = representing short, long, or neutral for + :param pair: str = COIN/STAKE to get the environment information for + :return: + :market_side: float = representing short, long, or neutral for pair - trade_duration: int = the number of candles that the trade has + :current_profit: float = unrealized profit of the current trade + :trade_duration: int = the number of candles that the trade has been open for """ open_trades = Trade.get_trades_proxy(is_open=True) From 00d2a01bf077c0ae140773091553cc74ec0092aa Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 24 Nov 2022 18:57:01 +0100 Subject: [PATCH 168/232] isort --- tests/freqai/test_models/ReinforcementLearner_test_4ac.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py index 1f40d86d1..9861acfd8 100644 --- a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py +++ b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py @@ -2,8 +2,8 @@ import logging import numpy as np -from freqtrade.freqai.RL.Base4ActionRLEnv import Actions, Base4ActionRLEnv, Positions from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner +from freqtrade.freqai.RL.Base4ActionRLEnv import Actions, Base4ActionRLEnv, Positions logger = logging.getLogger(__name__) From 73c458d47b31f59e8a2f841ed650272b53756553 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 24 Nov 2022 19:04:35 +0100 Subject: [PATCH 169/232] use importlib instead of __import___ --- freqtrade/freqai/data_drawer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 96b481074..3b9352efe 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -1,4 +1,5 @@ import collections +import importlib import logging import re import shutil @@ -573,8 +574,8 @@ class FreqaiDataDrawer: from tensorflow import keras model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") elif self.model_type == 'stable_baselines': - mod = __import__('stable_baselines3', fromlist=[ - self.freqai_info['rl_config']['model_type']]) + mod = importlib.import_module( + 'stable_baselines3', self.freqai_info['rl_config']['model_type']) MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model") From 2e82e6784a1cb5b8ebee3bcca8ddbfcd7782a917 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 24 Nov 2022 19:07:38 +0100 Subject: [PATCH 170/232] move data_provider cleanup to shutdown() --- freqtrade/freqai/freqai_interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 19d6b4faa..806dbf6f7 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -168,7 +168,6 @@ class IFreqaiModel(ABC): """ self.model = None self.dk = None - self.data_provider = None def _on_stop(self): """ @@ -185,6 +184,7 @@ class IFreqaiModel(ABC): logger.info("Stopping FreqAI") self._stop_event.set() + self.data_provider = None self._on_stop() logger.info("Waiting on Training iteration") From 101dec461e40c2b8ed15a7075bb4b7dc9099c7b2 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Thu, 24 Nov 2022 11:35:50 -0700 Subject: [PATCH 171/232] close ws channel if can't accept --- freqtrade/rpc/api_server/ws/channel.py | 56 ++++++++++++++------------ 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index ad183ce5b..7343bc306 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -125,9 +125,14 @@ class WebSocketChannel: async def accept(self): """ - Accept the underlying websocket connection + Accept the underlying websocket connection, + if the connection has been closed before we can + accept, just close the channel. """ - return await self._websocket.accept() + try: + return await self._websocket.accept() + except RuntimeError: + await self.close() async def close(self): """ @@ -172,17 +177,18 @@ class WebSocketChannel: :param **kwargs: Any extra kwargs to pass to gather """ - # Wrap the coros into tasks if they aren't already - self._channel_tasks = [ - task if isinstance(task, asyncio.Task) else asyncio.create_task(task) - for task in tasks - ] + if not self.is_closed(): + # Wrap the coros into tasks if they aren't already + self._channel_tasks = [ + task if isinstance(task, asyncio.Task) else asyncio.create_task(task) + for task in tasks + ] - try: - return await asyncio.gather(*self._channel_tasks, **kwargs) - except Exception: - # If an exception occurred, cancel the rest of the tasks - await self.cancel_channel_tasks() + try: + return await asyncio.gather(*self._channel_tasks, **kwargs) + except Exception: + # If an exception occurred, cancel the rest of the tasks + await self.cancel_channel_tasks() async def cancel_channel_tasks(self): """ @@ -191,19 +197,19 @@ class WebSocketChannel: for task in self._channel_tasks: task.cancel() - # Wait for tasks to finish cancelling - try: - await task - except ( - asyncio.CancelledError, - asyncio.TimeoutError, - WebSocketDisconnect, - ConnectionClosed, - RuntimeError - ): - pass - except Exception as e: - logger.info(f"Encountered unknown exception: {e}", exc_info=e) + # Wait for tasks to finish cancelling + try: + await task + except ( + asyncio.CancelledError, + asyncio.TimeoutError, + WebSocketDisconnect, + ConnectionClosed, + RuntimeError + ): + pass + except Exception as e: + logger.info(f"Encountered unknown exception: {e}", exc_info=e) self._channel_tasks = [] From fc59b02255e3b91e8329b6bf02517102b05d0996 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Thu, 24 Nov 2022 13:41:10 -0700 Subject: [PATCH 172/232] prevent ws endpoint from running without valid token --- freqtrade/rpc/api_server/api_auth.py | 2 -- freqtrade/rpc/api_server/api_ws.py | 11 ++++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/freqtrade/rpc/api_server/api_auth.py b/freqtrade/rpc/api_server/api_auth.py index ee66fce2b..71f1145a9 100644 --- a/freqtrade/rpc/api_server/api_auth.py +++ b/freqtrade/rpc/api_server/api_auth.py @@ -81,8 +81,6 @@ async def validate_ws_token( except HTTPException: pass - # No checks passed, deny the connection - logger.debug("Denying websocket request.") # If it doesn't match, close the websocket connection await ws.close(code=status.WS_1008_POLICY_VIOLATION) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index fe2968c05..77950923d 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -97,8 +97,9 @@ async def message_endpoint( rpc: RPC = Depends(get_rpc), message_stream: MessageStream = Depends(get_message_stream) ): - async with create_channel(websocket) as channel: - await channel.run_channel_tasks( - channel_reader(channel, rpc), - channel_broadcaster(channel, message_stream) - ) + if token: + async with create_channel(websocket) as channel: + await channel.run_channel_tasks( + channel_reader(channel, rpc), + channel_broadcaster(channel, message_stream) + ) From b8d1862ca8b996be341e9ac408c389ebb5a8272c Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 25 Nov 2022 10:42:19 +0100 Subject: [PATCH 173/232] Update cached binance leverage tiers closes #7794 --- .../exchange/binance_leverage_tiers.json | 2582 +++++++++-------- 1 file changed, 1301 insertions(+), 1281 deletions(-) diff --git a/freqtrade/exchange/binance_leverage_tiers.json b/freqtrade/exchange/binance_leverage_tiers.json index cf2fd7287..09bf0a4dc 100644 --- a/freqtrade/exchange/binance_leverage_tiers.json +++ b/freqtrade/exchange/binance_leverage_tiers.json @@ -268,10 +268,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -382,10 +382,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -398,10 +398,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -414,10 +414,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -460,13 +460,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -480,10 +480,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.012, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.012", @@ -774,10 +774,10 @@ "minNotional": 0.0, "maxNotional": 10000.0, "maintenanceMarginRate": 0.0065, - "maxLeverage": 75.0, + "maxLeverage": 50.0, "info": { "bracket": "1", - "initialLeverage": "75", + "initialLeverage": "50", "notionalCap": "10000", "notionalFloor": "0", "maintMarginRatio": "0.0065", @@ -790,10 +790,10 @@ "minNotional": 10000.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 40.0, "info": { "bracket": "2", - "initialLeverage": "50", + "initialLeverage": "40", "notionalCap": "50000", "notionalFloor": "10000", "maintMarginRatio": "0.01", @@ -1018,10 +1018,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -1034,10 +1034,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -1050,10 +1050,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -1116,10 +1116,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -1378,10 +1378,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.012, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.012", @@ -1394,10 +1394,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -1410,10 +1410,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -1476,10 +1476,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -1492,10 +1492,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -1508,10 +1508,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -1656,10 +1656,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -1770,10 +1770,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -1786,10 +1786,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -1802,10 +1802,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -1861,6 +1861,202 @@ } } ], + "APT/BUSD": [ + { + "tier": 1.0, + "currency": "BUSD", + "minNotional": 0.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, + "info": { + "bracket": "1", + "initialLeverage": "20", + "notionalCap": "25000", + "notionalFloor": "0", + "maintMarginRatio": "0.025", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "BUSD", + "minNotional": 25000.0, + "maxNotional": 100000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, + "info": { + "bracket": "2", + "initialLeverage": "10", + "notionalCap": "100000", + "notionalFloor": "25000", + "maintMarginRatio": "0.05", + "cum": "625.0" + } + }, + { + "tier": 3.0, + "currency": "BUSD", + "minNotional": 100000.0, + "maxNotional": 250000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "3", + "initialLeverage": "5", + "notionalCap": "250000", + "notionalFloor": "100000", + "maintMarginRatio": "0.1", + "cum": "5625.0" + } + }, + { + "tier": 4.0, + "currency": "BUSD", + "minNotional": 250000.0, + "maxNotional": 1000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 2.0, + "info": { + "bracket": "4", + "initialLeverage": "2", + "notionalCap": "1000000", + "notionalFloor": "250000", + "maintMarginRatio": "0.125", + "cum": "11875.0" + } + }, + { + "tier": 5.0, + "currency": "BUSD", + "minNotional": 1000000.0, + "maxNotional": 5000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "5", + "initialLeverage": "1", + "notionalCap": "5000000", + "notionalFloor": "1000000", + "maintMarginRatio": "0.5", + "cum": "386875.0" + } + } + ], + "APT/USDT": [ + { + "tier": 1.0, + "currency": "USDT", + "minNotional": 0.0, + "maxNotional": 150000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 21.0, + "info": { + "bracket": "1", + "initialLeverage": "21", + "notionalCap": "150000", + "notionalFloor": "0", + "maintMarginRatio": "0.02", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDT", + "minNotional": 150000.0, + "maxNotional": 250000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, + "info": { + "bracket": "2", + "initialLeverage": "20", + "notionalCap": "250000", + "notionalFloor": "150000", + "maintMarginRatio": "0.025", + "cum": "750.0" + } + }, + { + "tier": 3.0, + "currency": "USDT", + "minNotional": 250000.0, + "maxNotional": 1000000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, + "info": { + "bracket": "3", + "initialLeverage": "10", + "notionalCap": "1000000", + "notionalFloor": "250000", + "maintMarginRatio": "0.05", + "cum": "7000.0" + } + }, + { + "tier": 4.0, + "currency": "USDT", + "minNotional": 1000000.0, + "maxNotional": 2000000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "4", + "initialLeverage": "5", + "notionalCap": "2000000", + "notionalFloor": "1000000", + "maintMarginRatio": "0.1", + "cum": "57000.0" + } + }, + { + "tier": 5.0, + "currency": "USDT", + "minNotional": 2000000.0, + "maxNotional": 5000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, + "info": { + "bracket": "5", + "initialLeverage": "4", + "notionalCap": "5000000", + "notionalFloor": "2000000", + "maintMarginRatio": "0.125", + "cum": "107000.0" + } + }, + { + "tier": 6.0, + "currency": "USDT", + "minNotional": 5000000.0, + "maxNotional": 10000000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "6", + "initialLeverage": "2", + "notionalCap": "10000000", + "notionalFloor": "5000000", + "maintMarginRatio": "0.25", + "cum": "732000.0" + } + }, + { + "tier": 7.0, + "currency": "USDT", + "minNotional": 10000000.0, + "maxNotional": 11000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "7", + "initialLeverage": "1", + "notionalCap": "11000000", + "notionalFloor": "10000000", + "maintMarginRatio": "0.5", + "cum": "3232000.0" + } + } + ], "AR/USDT": [ { "tier": 1.0, @@ -1966,10 +2162,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -1982,10 +2178,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -1998,10 +2194,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -2064,10 +2260,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -2080,10 +2276,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -2096,10 +2292,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -2342,10 +2538,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -2358,10 +2554,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -2374,10 +2570,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -2522,10 +2718,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -2636,10 +2832,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -2652,10 +2848,10 @@ "minNotional": 50000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "2", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "250000", "notionalFloor": "50000", "maintMarginRatio": "0.02", @@ -2766,10 +2962,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -2782,10 +2978,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -2798,10 +2994,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -2864,10 +3060,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -2960,96 +3156,80 @@ "tier": 1.0, "currency": "USDT", "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", - "notionalCap": "5000", + "initialLeverage": "20", + "notionalCap": "25000", "notionalFloor": "0", - "maintMarginRatio": "0.01", + "maintMarginRatio": "0.025", "cum": "0.0" } }, { "tier": 2.0, "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, - "info": { - "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "75.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { - "bracket": "3", + "bracket": "2", "initialLeverage": "10", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", - "cum": "700.0" + "cum": "625.0" } }, { - "tier": 4.0, + "tier": 3.0, "currency": "USDT", "minNotional": 100000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { - "bracket": "4", + "bracket": "3", "initialLeverage": "5", "notionalCap": "250000", "notionalFloor": "100000", "maintMarginRatio": "0.1", - "cum": "5700.0" + "cum": "5625.0" } }, { - "tier": 5.0, + "tier": 4.0, "currency": "USDT", "minNotional": 250000.0, "maxNotional": 1000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 2.0, "info": { - "bracket": "5", + "bracket": "4", "initialLeverage": "2", "notionalCap": "1000000", "notionalFloor": "250000", "maintMarginRatio": "0.125", - "cum": "11950.0" + "cum": "11875.0" } }, { - "tier": 6.0, + "tier": 5.0, "currency": "USDT", "minNotional": 1000000.0, "maxNotional": 5000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "6", + "bracket": "5", "initialLeverage": "1", "notionalCap": "5000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", - "cum": "386950.0" + "cum": "386875.0" } } ], @@ -3060,10 +3240,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -3076,10 +3256,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -3092,10 +3272,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -3158,10 +3338,10 @@ "minNotional": 0.0, "maxNotional": 10000.0, "maintenanceMarginRate": 0.0065, - "maxLeverage": 75.0, + "maxLeverage": 50.0, "info": { "bracket": "1", - "initialLeverage": "75", + "initialLeverage": "50", "notionalCap": "10000", "notionalFloor": "0", "maintMarginRatio": "0.0065", @@ -3174,10 +3354,10 @@ "minNotional": 10000.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 40.0, "info": { "bracket": "2", - "initialLeverage": "50", + "initialLeverage": "40", "notionalCap": "50000", "notionalFloor": "10000", "maintMarginRatio": "0.01", @@ -3304,10 +3484,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -3320,10 +3500,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -3336,10 +3516,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -3395,17 +3575,17 @@ } } ], - "BLZ/USDT": [ + "BLUEBIRD/USDT": [ { "tier": 1.0, "currency": "USDT", "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -3418,10 +3598,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -3434,10 +3614,108 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", + "initialLeverage": "8", + "notionalCap": "100000", + "notionalFloor": "25000", + "maintMarginRatio": "0.05", + "cum": "700.0" + } + }, + { + "tier": 4.0, + "currency": "USDT", + "minNotional": 100000.0, + "maxNotional": 250000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "4", + "initialLeverage": "5", + "notionalCap": "250000", + "notionalFloor": "100000", + "maintMarginRatio": "0.1", + "cum": "5700.0" + } + }, + { + "tier": 5.0, + "currency": "USDT", + "minNotional": 250000.0, + "maxNotional": 1000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 2.0, + "info": { + "bracket": "5", + "initialLeverage": "2", + "notionalCap": "1000000", + "notionalFloor": "250000", + "maintMarginRatio": "0.125", + "cum": "11950.0" + } + }, + { + "tier": 6.0, + "currency": "USDT", + "minNotional": 1000000.0, + "maxNotional": 5000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "6", + "initialLeverage": "1", + "notionalCap": "5000000", + "notionalFloor": "1000000", + "maintMarginRatio": "0.5", + "cum": "386950.0" + } + } + ], + "BLZ/USDT": [ + { + "tier": 1.0, + "currency": "USDT", + "minNotional": 0.0, + "maxNotional": 5000.0, + "maintenanceMarginRate": 0.01, + "maxLeverage": 20.0, + "info": { + "bracket": "1", + "initialLeverage": "20", + "notionalCap": "5000", + "notionalFloor": "0", + "maintMarginRatio": "0.01", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDT", + "minNotional": 5000.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 10.0, + "info": { + "bracket": "2", "initialLeverage": "10", + "notionalCap": "25000", + "notionalFloor": "5000", + "maintMarginRatio": "0.025", + "cum": "75.0" + } + }, + { + "tier": 3.0, + "currency": "USDT", + "minNotional": 25000.0, + "maxNotional": 100000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 8.0, + "info": { + "bracket": "3", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -3598,10 +3876,10 @@ "minNotional": 0.0, "maxNotional": 10000.0, "maintenanceMarginRate": 0.0065, - "maxLeverage": 75.0, + "maxLeverage": 50.0, "info": { "bracket": "1", - "initialLeverage": "75", + "initialLeverage": "50", "notionalCap": "10000", "notionalFloor": "0", "maintMarginRatio": "0.0065", @@ -3614,10 +3892,10 @@ "minNotional": 10000.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 40.0, "info": { "bracket": "2", - "initialLeverage": "50", + "initialLeverage": "40", "notionalCap": "50000", "notionalFloor": "10000", "maintMarginRatio": "0.01", @@ -3744,10 +4022,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -4052,10 +4330,10 @@ "minNotional": 250000.0, "maxNotional": 1000000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "3", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "1000000", "notionalFloor": "250000", "maintMarginRatio": "0.01", @@ -4068,10 +4346,10 @@ "minNotional": 1000000.0, "maxNotional": 10000000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 15.0, "info": { "bracket": "4", - "initialLeverage": "20", + "initialLeverage": "15", "notionalCap": "10000000", "notionalFloor": "1000000", "maintMarginRatio": "0.025", @@ -4162,13 +4440,13 @@ "tier": 10.0, "currency": "USDT", "minNotional": 300000000.0, - "maxNotional": 9.223372036854776e+18, + "maxNotional": 500000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "10", "initialLeverage": "1", - "notionalCap": "9223372036854775807", + "notionalCap": "500000000", "notionalFloor": "300000000", "maintMarginRatio": "0.5", "cum": "1.000163E8" @@ -4182,10 +4460,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -4198,10 +4476,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -4214,10 +4492,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -4260,13 +4538,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 8000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "8000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -4371,120 +4649,6 @@ } } ], - "BTCUSDT_220930": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 375000.0, - "maintenanceMarginRate": 0.02, - "maxLeverage": 25.0, - "info": { - "bracket": "1", - "initialLeverage": "25", - "notionalCap": "375000", - "notionalFloor": "0", - "maintMarginRatio": "0.02", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 375000.0, - "maxNotional": 2000000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, - "info": { - "bracket": "2", - "initialLeverage": "10", - "notionalCap": "2000000", - "notionalFloor": "375000", - "maintMarginRatio": "0.05", - "cum": "11250.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 2000000.0, - "maxNotional": 4000000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "3", - "initialLeverage": "5", - "notionalCap": "4000000", - "notionalFloor": "2000000", - "maintMarginRatio": "0.1", - "cum": "111250.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 4000000.0, - "maxNotional": 10000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4.0, - "info": { - "bracket": "4", - "initialLeverage": "4", - "notionalCap": "10000000", - "notionalFloor": "4000000", - "maintMarginRatio": "0.125", - "cum": "211250.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 10000000.0, - "maxNotional": 20000000.0, - "maintenanceMarginRate": 0.15, - "maxLeverage": 3.0, - "info": { - "bracket": "5", - "initialLeverage": "3", - "notionalCap": "20000000", - "notionalFloor": "10000000", - "maintMarginRatio": "0.15", - "cum": "461250.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 20000000.0, - "maxNotional": 40000000.0, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2.0, - "info": { - "bracket": "6", - "initialLeverage": "2", - "notionalCap": "40000000", - "notionalFloor": "20000000", - "maintMarginRatio": "0.25", - "cum": "2461250.0" - } - }, - { - "tier": 7.0, - "currency": "USDT", - "minNotional": 40000000.0, - "maxNotional": 400000000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "7", - "initialLeverage": "1", - "notionalCap": "400000000", - "notionalFloor": "40000000", - "maintMarginRatio": "0.5", - "cum": "1.246125E7" - } - } - ], "BTCUSDT_221230": [ { "tier": 1.0, @@ -4802,10 +4966,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -4818,10 +4982,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -4834,10 +4998,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -4998,10 +5162,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -5096,10 +5260,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.012, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.012", @@ -5194,10 +5358,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -5292,10 +5456,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -5390,10 +5554,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 21.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "21", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -5484,13 +5648,13 @@ "tier": 7.0, "currency": "USDT", "minNotional": 2000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "2000000", "maintMarginRatio": "0.5", "cum": "654500.0" @@ -5504,10 +5668,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -5602,10 +5766,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -5618,10 +5782,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -5634,10 +5798,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -5680,13 +5844,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -5700,10 +5864,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -5716,10 +5880,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -5732,10 +5896,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -5778,13 +5942,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 2000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "2000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -5878,96 +6042,80 @@ "tier": 1.0, "currency": "USDT", "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", - "notionalCap": "5000", + "initialLeverage": "20", + "notionalCap": "25000", "notionalFloor": "0", - "maintMarginRatio": "0.01", + "maintMarginRatio": "0.025", "cum": "0.0" } }, { "tier": 2.0, "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, - "info": { - "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "75.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { - "bracket": "3", + "bracket": "2", "initialLeverage": "10", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", - "cum": "700.0" + "cum": "625.0" } }, { - "tier": 4.0, + "tier": 3.0, "currency": "USDT", "minNotional": 100000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { - "bracket": "4", + "bracket": "3", "initialLeverage": "5", "notionalCap": "250000", "notionalFloor": "100000", "maintMarginRatio": "0.1", - "cum": "5700.0" + "cum": "5625.0" } }, { - "tier": 5.0, + "tier": 4.0, "currency": "USDT", "minNotional": 250000.0, "maxNotional": 1000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 2.0, "info": { - "bracket": "5", + "bracket": "4", "initialLeverage": "2", "notionalCap": "1000000", "notionalFloor": "250000", "maintMarginRatio": "0.125", - "cum": "11950.0" + "cum": "11875.0" } }, { - "tier": 6.0, + "tier": 5.0, "currency": "USDT", "minNotional": 1000000.0, "maxNotional": 5000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "6", + "bracket": "5", "initialLeverage": "1", "notionalCap": "5000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", - "cum": "386950.0" + "cum": "386875.0" } } ], @@ -5978,10 +6126,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -5994,10 +6142,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -6010,10 +6158,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -6056,13 +6204,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -6076,10 +6224,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -6174,10 +6322,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -6190,10 +6338,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -6206,10 +6354,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -6252,13 +6400,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -6272,10 +6420,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -6370,10 +6518,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -6386,10 +6534,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -6402,10 +6550,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -6448,13 +6596,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -6648,10 +6796,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -6844,10 +6992,10 @@ "minNotional": 0.0, "maxNotional": 10000.0, "maintenanceMarginRate": 0.0065, - "maxLeverage": 75.0, + "maxLeverage": 50.0, "info": { "bracket": "1", - "initialLeverage": "75", + "initialLeverage": "50", "notionalCap": "10000", "notionalFloor": "0", "maintMarginRatio": "0.0065", @@ -6860,10 +7008,10 @@ "minNotional": 10000.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 40.0, "info": { "bracket": "2", - "initialLeverage": "50", + "initialLeverage": "40", "notionalCap": "50000", "notionalFloor": "10000", "maintMarginRatio": "0.01", @@ -6990,10 +7138,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -7006,10 +7154,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -7022,10 +7170,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -7068,13 +7216,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -7202,10 +7350,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -7300,10 +7448,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -7316,10 +7464,10 @@ "minNotional": 50000.0, "maxNotional": 150000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "150000", "notionalFloor": "50000", "maintMarginRatio": "0.025", @@ -7332,10 +7480,10 @@ "minNotional": 150000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "250000", "notionalFloor": "150000", "maintMarginRatio": "0.05", @@ -7394,13 +7542,13 @@ "tier": 7.0, "currency": "USDT", "minNotional": 2000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "2000000", "maintMarginRatio": "0.5", "cum": "654500.0" @@ -7512,10 +7660,10 @@ "minNotional": 0.0, "maxNotional": 10000.0, "maintenanceMarginRate": 0.0065, - "maxLeverage": 75.0, + "maxLeverage": 50.0, "info": { "bracket": "1", - "initialLeverage": "75", + "initialLeverage": "50", "notionalCap": "10000", "notionalFloor": "0", "maintMarginRatio": "0.0065", @@ -7528,10 +7676,10 @@ "minNotional": 10000.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 40.0, "info": { "bracket": "2", - "initialLeverage": "50", + "initialLeverage": "40", "notionalCap": "50000", "notionalFloor": "10000", "maintMarginRatio": "0.01", @@ -7740,10 +7888,10 @@ "minNotional": 0.0, "maxNotional": 10000.0, "maintenanceMarginRate": 0.0065, - "maxLeverage": 75.0, + "maxLeverage": 50.0, "info": { "bracket": "1", - "initialLeverage": "75", + "initialLeverage": "50", "notionalCap": "10000", "notionalFloor": "0", "maintMarginRatio": "0.0065", @@ -7756,10 +7904,10 @@ "minNotional": 10000.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 40.0, "info": { "bracket": "2", - "initialLeverage": "50", + "initialLeverage": "40", "notionalCap": "50000", "notionalFloor": "10000", "maintMarginRatio": "0.01", @@ -8080,10 +8228,10 @@ "minNotional": 250000.0, "maxNotional": 1000000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "3", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "1000000", "notionalFloor": "250000", "maintMarginRatio": "0.01", @@ -8096,10 +8244,10 @@ "minNotional": 1000000.0, "maxNotional": 5000000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 25.0, + "maxLeverage": 15.0, "info": { "bracket": "4", - "initialLeverage": "25", + "initialLeverage": "15", "notionalCap": "5000000", "notionalFloor": "1000000", "maintMarginRatio": "0.02", @@ -8203,120 +8351,6 @@ } } ], - "ETHUSDT_220930": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 375000.0, - "maintenanceMarginRate": 0.02, - "maxLeverage": 25.0, - "info": { - "bracket": "1", - "initialLeverage": "25", - "notionalCap": "375000", - "notionalFloor": "0", - "maintMarginRatio": "0.02", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 375000.0, - "maxNotional": 2000000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, - "info": { - "bracket": "2", - "initialLeverage": "10", - "notionalCap": "2000000", - "notionalFloor": "375000", - "maintMarginRatio": "0.05", - "cum": "11250.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 2000000.0, - "maxNotional": 4000000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "3", - "initialLeverage": "5", - "notionalCap": "4000000", - "notionalFloor": "2000000", - "maintMarginRatio": "0.1", - "cum": "111250.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 4000000.0, - "maxNotional": 10000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4.0, - "info": { - "bracket": "4", - "initialLeverage": "4", - "notionalCap": "10000000", - "notionalFloor": "4000000", - "maintMarginRatio": "0.125", - "cum": "211250.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 10000000.0, - "maxNotional": 20000000.0, - "maintenanceMarginRate": 0.15, - "maxLeverage": 3.0, - "info": { - "bracket": "5", - "initialLeverage": "3", - "notionalCap": "20000000", - "notionalFloor": "10000000", - "maintMarginRatio": "0.15", - "cum": "461250.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 20000000.0, - "maxNotional": 40000000.0, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2.0, - "info": { - "bracket": "6", - "initialLeverage": "2", - "notionalCap": "40000000", - "notionalFloor": "20000000", - "maintMarginRatio": "0.25", - "cum": "2461250.0" - } - }, - { - "tier": 7.0, - "currency": "USDT", - "minNotional": 40000000.0, - "maxNotional": 400000000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "7", - "initialLeverage": "1", - "notionalCap": "400000000", - "notionalFloor": "40000000", - "maintMarginRatio": "0.5", - "cum": "1.246125E7" - } - } - ], "ETHUSDT_221230": [ { "tier": 1.0, @@ -8520,10 +8554,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -8536,10 +8570,10 @@ "minNotional": 50000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "2", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "250000", "notionalFloor": "50000", "maintMarginRatio": "0.02", @@ -8650,10 +8684,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -8666,10 +8700,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -8682,10 +8716,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -8748,10 +8782,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -8764,10 +8798,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -8780,10 +8814,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -8846,10 +8880,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -8862,10 +8896,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -8878,10 +8912,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -9138,13 +9172,13 @@ "tier": 1.0, "currency": "BUSD", "minNotional": 0.0, - "maxNotional": 100000.0, + "maxNotional": 15000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 8.0, "info": { "bracket": "1", - "initialLeverage": "20", - "notionalCap": "100000", + "initialLeverage": "8", + "notionalCap": "15000", "notionalFloor": "0", "maintMarginRatio": "0.025", "cum": "0.0" @@ -9153,81 +9187,81 @@ { "tier": 2.0, "currency": "BUSD", - "minNotional": 100000.0, - "maxNotional": 500000.0, + "minNotional": 15000.0, + "maxNotional": 50000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 6.0, "info": { "bracket": "2", - "initialLeverage": "10", - "notionalCap": "500000", - "notionalFloor": "100000", + "initialLeverage": "6", + "notionalCap": "50000", + "notionalFloor": "15000", "maintMarginRatio": "0.05", - "cum": "2500.0" + "cum": "375.0" } }, { "tier": 3.0, "currency": "BUSD", - "minNotional": 500000.0, - "maxNotional": 1000000.0, + "minNotional": 50000.0, + "maxNotional": 200000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "3", "initialLeverage": "5", - "notionalCap": "1000000", - "notionalFloor": "500000", + "notionalCap": "200000", + "notionalFloor": "50000", "maintMarginRatio": "0.1", - "cum": "27500.0" + "cum": "2875.0" } }, { "tier": 4.0, "currency": "BUSD", - "minNotional": 1000000.0, - "maxNotional": 2000000.0, + "minNotional": 200000.0, + "maxNotional": 500000.0, "maintenanceMarginRate": 0.15, "maxLeverage": 3.0, "info": { "bracket": "4", "initialLeverage": "3", - "notionalCap": "2000000", - "notionalFloor": "1000000", + "notionalCap": "500000", + "notionalFloor": "200000", "maintMarginRatio": "0.15", - "cum": "77500.0" + "cum": "12875.0" } }, { "tier": 5.0, "currency": "BUSD", - "minNotional": 2000000.0, - "maxNotional": 5000000.0, + "minNotional": 500000.0, + "maxNotional": 800000.0, "maintenanceMarginRate": 0.25, "maxLeverage": 2.0, "info": { "bracket": "5", "initialLeverage": "2", - "notionalCap": "5000000", - "notionalFloor": "2000000", + "notionalCap": "800000", + "notionalFloor": "500000", "maintMarginRatio": "0.25", - "cum": "277500.0" + "cum": "62875.0" } }, { "tier": 6.0, "currency": "BUSD", - "minNotional": 5000000.0, - "maxNotional": 8000000.0, + "minNotional": 800000.0, + "maxNotional": 1500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "8000000", - "notionalFloor": "5000000", + "notionalCap": "1500000", + "notionalFloor": "800000", "maintMarginRatio": "0.5", - "cum": "1527500.0" + "cum": "262875.0" } } ], @@ -9236,96 +9270,80 @@ "tier": 1.0, "currency": "USDT", "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxNotional": 15000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 8.0, "info": { "bracket": "1", - "initialLeverage": "25", - "notionalCap": "5000", + "initialLeverage": "8", + "notionalCap": "15000", "notionalFloor": "0", - "maintMarginRatio": "0.01", + "maintMarginRatio": "0.025", "cum": "0.0" } }, { "tier": 2.0, "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "minNotional": 15000.0, + "maxNotional": 50000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 6.0, "info": { "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "75.0" + "initialLeverage": "6", + "notionalCap": "50000", + "notionalFloor": "15000", + "maintMarginRatio": "0.05", + "cum": "375.0" } }, { "tier": 3.0, "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "minNotional": 50000.0, + "maxNotional": 200000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, "info": { "bracket": "3", - "initialLeverage": "10", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "700.0" + "initialLeverage": "5", + "notionalCap": "200000", + "notionalFloor": "50000", + "maintMarginRatio": "0.1", + "cum": "2875.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 250000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, + "minNotional": 200000.0, + "maxNotional": 800000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 2.0, "info": { "bracket": "4", - "initialLeverage": "5", - "notionalCap": "250000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5700.0" + "initialLeverage": "2", + "notionalCap": "800000", + "notionalFloor": "200000", + "maintMarginRatio": "0.125", + "cum": "7875.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 2.0, - "info": { - "bracket": "5", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "250000", - "maintMarginRatio": "0.125", - "cum": "11950.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "minNotional": 800000.0, + "maxNotional": 1500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "6", + "bracket": "5", "initialLeverage": "1", - "notionalCap": "5000000", - "notionalFloor": "1000000", + "notionalCap": "1500000", + "notionalFloor": "800000", "maintMarginRatio": "0.5", - "cum": "386950.0" + "cum": "307875.0" } } ], @@ -9418,10 +9436,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -9434,10 +9452,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -9450,10 +9468,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -9676,13 +9694,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -9778,10 +9796,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -9892,10 +9910,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -9990,10 +10008,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -10006,10 +10024,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -10022,10 +10040,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -10088,10 +10106,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -10186,10 +10204,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -10202,10 +10220,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -10218,10 +10236,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -10284,10 +10302,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -10464,10 +10482,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -10480,10 +10498,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -10496,10 +10514,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -10542,13 +10560,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 30000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "30000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -10562,10 +10580,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -10578,10 +10596,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -10594,10 +10612,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -10640,13 +10658,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -10660,10 +10678,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -10676,10 +10694,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -10692,10 +10710,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -10856,10 +10874,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -10954,10 +10972,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -11052,10 +11070,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -11068,10 +11086,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -11084,10 +11102,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -11248,10 +11266,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -11264,10 +11282,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -11280,10 +11298,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -11340,6 +11358,88 @@ } ], "KLAY/USDT": [ + { + "tier": 1.0, + "currency": "USDT", + "minNotional": 0.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, + "info": { + "bracket": "1", + "initialLeverage": "20", + "notionalCap": "25000", + "notionalFloor": "0", + "maintMarginRatio": "0.025", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDT", + "minNotional": 25000.0, + "maxNotional": 100000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, + "info": { + "bracket": "2", + "initialLeverage": "10", + "notionalCap": "100000", + "notionalFloor": "25000", + "maintMarginRatio": "0.05", + "cum": "625.0" + } + }, + { + "tier": 3.0, + "currency": "USDT", + "minNotional": 100000.0, + "maxNotional": 250000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "3", + "initialLeverage": "5", + "notionalCap": "250000", + "notionalFloor": "100000", + "maintMarginRatio": "0.1", + "cum": "5625.0" + } + }, + { + "tier": 4.0, + "currency": "USDT", + "minNotional": 250000.0, + "maxNotional": 1000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 2.0, + "info": { + "bracket": "4", + "initialLeverage": "2", + "notionalCap": "1000000", + "notionalFloor": "250000", + "maintMarginRatio": "0.125", + "cum": "11875.0" + } + }, + { + "tier": 5.0, + "currency": "USDT", + "minNotional": 1000000.0, + "maxNotional": 5000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "5", + "initialLeverage": "1", + "notionalCap": "5000000", + "notionalFloor": "1000000", + "maintMarginRatio": "0.5", + "cum": "386875.0" + } + } + ], + "KNC/USDT": [ { "tier": 1.0, "currency": "USDT", @@ -11437,104 +11537,6 @@ } } ], - "KNC/USDT": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, - "info": { - "bracket": "1", - "initialLeverage": "50", - "notionalCap": "5000", - "notionalFloor": "0", - "maintMarginRatio": "0.01", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, - "info": { - "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "75.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, - "info": { - "bracket": "3", - "initialLeverage": "10", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "700.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 250000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "4", - "initialLeverage": "5", - "notionalCap": "250000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5700.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 2.0, - "info": { - "bracket": "5", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "250000", - "maintMarginRatio": "0.125", - "cum": "11950.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 5000000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "6", - "initialLeverage": "1", - "notionalCap": "5000000", - "notionalFloor": "1000000", - "maintMarginRatio": "0.5", - "cum": "386950.0" - } - } - ], "KSM/USDT": [ { "tier": 1.0, @@ -11542,10 +11544,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -11722,10 +11724,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -11738,10 +11740,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -11754,10 +11756,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -11902,10 +11904,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -11918,10 +11920,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -11934,10 +11936,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -12082,10 +12084,10 @@ "minNotional": 0.0, "maxNotional": 10000.0, "maintenanceMarginRate": 0.0065, - "maxLeverage": 75.0, + "maxLeverage": 50.0, "info": { "bracket": "1", - "initialLeverage": "75", + "initialLeverage": "50", "notionalCap": "10000", "notionalFloor": "0", "maintMarginRatio": "0.0065", @@ -12098,10 +12100,10 @@ "minNotional": 10000.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 40.0, "info": { "bracket": "2", - "initialLeverage": "50", + "initialLeverage": "40", "notionalCap": "50000", "notionalFloor": "10000", "maintMarginRatio": "0.01", @@ -12226,96 +12228,80 @@ "tier": 1.0, "currency": "USDT", "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", - "notionalCap": "5000", + "initialLeverage": "20", + "notionalCap": "25000", "notionalFloor": "0", - "maintMarginRatio": "0.01", + "maintMarginRatio": "0.025", "cum": "0.0" } }, { "tier": 2.0, "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, - "info": { - "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "75.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { - "bracket": "3", + "bracket": "2", "initialLeverage": "10", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", - "cum": "700.0" + "cum": "625.0" } }, { - "tier": 4.0, + "tier": 3.0, "currency": "USDT", "minNotional": 100000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { - "bracket": "4", + "bracket": "3", "initialLeverage": "5", "notionalCap": "250000", "notionalFloor": "100000", "maintMarginRatio": "0.1", - "cum": "5700.0" + "cum": "5625.0" } }, { - "tier": 5.0, + "tier": 4.0, "currency": "USDT", "minNotional": 250000.0, "maxNotional": 1000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 2.0, "info": { - "bracket": "5", + "bracket": "4", "initialLeverage": "2", "notionalCap": "1000000", "notionalFloor": "250000", "maintMarginRatio": "0.125", - "cum": "11950.0" + "cum": "11875.0" } }, { - "tier": 6.0, + "tier": 5.0, "currency": "USDT", "minNotional": 1000000.0, "maxNotional": 5000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "6", + "bracket": "5", "initialLeverage": "1", "notionalCap": "5000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", - "cum": "386950.0" + "cum": "386875.0" } } ], @@ -12326,10 +12312,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -12342,10 +12328,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -12358,10 +12344,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -12604,10 +12590,10 @@ "minNotional": 0.0, "maxNotional": 10000.0, "maintenanceMarginRate": 0.0065, - "maxLeverage": 75.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "75", + "initialLeverage": "25", "notionalCap": "10000", "notionalFloor": "0", "maintMarginRatio": "0.0065", @@ -12620,10 +12606,10 @@ "minNotional": 10000.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "2", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "50000", "notionalFloor": "10000", "maintMarginRatio": "0.01", @@ -12636,10 +12622,10 @@ "minNotional": 50000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 25.0, + "maxLeverage": 15.0, "info": { "bracket": "3", - "initialLeverage": "25", + "initialLeverage": "15", "notionalCap": "250000", "notionalFloor": "50000", "maintMarginRatio": "0.02", @@ -12730,13 +12716,13 @@ "tier": 9.0, "currency": "USDT", "minNotional": 20000000.0, - "maxNotional": 50000000.0, + "maxNotional": 32000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "9", "initialLeverage": "1", - "notionalCap": "50000000", + "notionalCap": "32000000", "notionalFloor": "20000000", "maintMarginRatio": "0.5", "cum": "6233035.0" @@ -12930,10 +12916,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -13042,96 +13028,80 @@ "tier": 1.0, "currency": "USDT", "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", - "notionalCap": "5000", + "initialLeverage": "20", + "notionalCap": "25000", "notionalFloor": "0", - "maintMarginRatio": "0.01", + "maintMarginRatio": "0.025", "cum": "0.0" } }, { "tier": 2.0, "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, - "info": { - "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "75.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { - "bracket": "3", + "bracket": "2", "initialLeverage": "10", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", - "cum": "700.0" + "cum": "625.0" } }, { - "tier": 4.0, + "tier": 3.0, "currency": "USDT", "minNotional": 100000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { - "bracket": "4", + "bracket": "3", "initialLeverage": "5", "notionalCap": "250000", "notionalFloor": "100000", "maintMarginRatio": "0.1", - "cum": "5700.0" + "cum": "5625.0" } }, { - "tier": 5.0, + "tier": 4.0, "currency": "USDT", "minNotional": 250000.0, "maxNotional": 1000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 2.0, "info": { - "bracket": "5", + "bracket": "4", "initialLeverage": "2", "notionalCap": "1000000", "notionalFloor": "250000", "maintMarginRatio": "0.125", - "cum": "11950.0" + "cum": "11875.0" } }, { - "tier": 6.0, + "tier": 5.0, "currency": "USDT", "minNotional": 1000000.0, "maxNotional": 5000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "6", + "bracket": "5", "initialLeverage": "1", "notionalCap": "5000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", - "cum": "386950.0" + "cum": "386875.0" } } ], @@ -13224,10 +13194,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -13338,10 +13308,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -13436,10 +13406,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -13616,10 +13586,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -13730,10 +13700,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -13828,10 +13798,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -13844,10 +13814,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -13860,10 +13830,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -13926,10 +13896,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -14024,10 +13994,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -14040,10 +14010,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -14056,10 +14026,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -14122,10 +14092,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -14138,10 +14108,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -14154,10 +14124,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -14200,13 +14170,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -14220,10 +14190,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -14236,10 +14206,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -14252,10 +14222,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -14318,10 +14288,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -14687,6 +14657,104 @@ } } ], + "QNT/USDT": [ + { + "tier": 1.0, + "currency": "USDT", + "minNotional": 0.0, + "maxNotional": 5000.0, + "maintenanceMarginRate": 0.01, + "maxLeverage": 20.0, + "info": { + "bracket": "1", + "initialLeverage": "20", + "notionalCap": "5000", + "notionalFloor": "0", + "maintMarginRatio": "0.01", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDT", + "minNotional": 5000.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 10.0, + "info": { + "bracket": "2", + "initialLeverage": "10", + "notionalCap": "25000", + "notionalFloor": "5000", + "maintMarginRatio": "0.025", + "cum": "75.0" + } + }, + { + "tier": 3.0, + "currency": "USDT", + "minNotional": 25000.0, + "maxNotional": 100000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 8.0, + "info": { + "bracket": "3", + "initialLeverage": "8", + "notionalCap": "100000", + "notionalFloor": "25000", + "maintMarginRatio": "0.05", + "cum": "700.0" + } + }, + { + "tier": 4.0, + "currency": "USDT", + "minNotional": 100000.0, + "maxNotional": 250000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "4", + "initialLeverage": "5", + "notionalCap": "250000", + "notionalFloor": "100000", + "maintMarginRatio": "0.1", + "cum": "5700.0" + } + }, + { + "tier": 5.0, + "currency": "USDT", + "minNotional": 250000.0, + "maxNotional": 1000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 2.0, + "info": { + "bracket": "5", + "initialLeverage": "2", + "notionalCap": "1000000", + "notionalFloor": "250000", + "maintMarginRatio": "0.125", + "cum": "11950.0" + } + }, + { + "tier": 6.0, + "currency": "USDT", + "minNotional": 1000000.0, + "maxNotional": 5000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "6", + "initialLeverage": "1", + "notionalCap": "5000000", + "notionalFloor": "1000000", + "maintMarginRatio": "0.5", + "cum": "386950.0" + } + } + ], "QTUM/USDT": [ { "tier": 1.0, @@ -14694,10 +14762,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -14988,10 +15056,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -15004,10 +15072,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -15020,10 +15088,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -15066,13 +15134,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 1500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "1500000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -15084,96 +15152,80 @@ "tier": 1.0, "currency": "USDT", "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", - "notionalCap": "5000", + "initialLeverage": "20", + "notionalCap": "25000", "notionalFloor": "0", - "maintMarginRatio": "0.01", + "maintMarginRatio": "0.025", "cum": "0.0" } }, { "tier": 2.0, "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, - "info": { - "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "75.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { - "bracket": "3", + "bracket": "2", "initialLeverage": "10", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", - "cum": "700.0" + "cum": "625.0" } }, { - "tier": 4.0, + "tier": 3.0, "currency": "USDT", "minNotional": 100000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { - "bracket": "4", + "bracket": "3", "initialLeverage": "5", "notionalCap": "250000", "notionalFloor": "100000", "maintMarginRatio": "0.1", - "cum": "5700.0" + "cum": "5625.0" } }, { - "tier": 5.0, + "tier": 4.0, "currency": "USDT", "minNotional": 250000.0, "maxNotional": 1000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 2.0, "info": { - "bracket": "5", + "bracket": "4", "initialLeverage": "2", "notionalCap": "1000000", "notionalFloor": "250000", "maintMarginRatio": "0.125", - "cum": "11950.0" + "cum": "11875.0" } }, { - "tier": 6.0, + "tier": 5.0, "currency": "USDT", "minNotional": 1000000.0, "maxNotional": 5000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "6", + "bracket": "5", "initialLeverage": "1", "notionalCap": "5000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", - "cum": "386950.0" + "cum": "386875.0" } } ], @@ -15184,10 +15236,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -15200,10 +15252,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -15216,10 +15268,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -15281,14 +15333,14 @@ "currency": "USDT", "minNotional": 0.0, "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, + "maintenanceMarginRate": 0.02, "maxLeverage": 25.0, "info": { "bracket": "1", "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", - "maintMarginRatio": "0.01", + "maintMarginRatio": "0.02", "cum": "0.0" } }, @@ -15305,7 +15357,7 @@ "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", - "cum": "75.0" + "cum": "25.0" } }, { @@ -15321,7 +15373,7 @@ "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", - "cum": "700.0" + "cum": "650.0" } }, { @@ -15337,7 +15389,7 @@ "notionalCap": "250000", "notionalFloor": "100000", "maintMarginRatio": "0.1", - "cum": "5700.0" + "cum": "5650.0" } }, { @@ -15353,7 +15405,7 @@ "notionalCap": "1000000", "notionalFloor": "250000", "maintMarginRatio": "0.125", - "cum": "11950.0" + "cum": "11900.0" } }, { @@ -15369,7 +15421,7 @@ "notionalCap": "5000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", - "cum": "386950.0" + "cum": "386900.0" } } ], @@ -15380,10 +15432,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -15478,10 +15530,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -15494,10 +15546,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -15510,10 +15562,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -15658,10 +15710,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -15868,96 +15920,80 @@ "tier": 1.0, "currency": "USDT", "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxNotional": 15000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 8.0, "info": { "bracket": "1", - "initialLeverage": "50", - "notionalCap": "5000", + "initialLeverage": "8", + "notionalCap": "15000", "notionalFloor": "0", - "maintMarginRatio": "0.01", + "maintMarginRatio": "0.025", "cum": "0.0" } }, { "tier": 2.0, "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "minNotional": 15000.0, + "maxNotional": 100000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 7.0, "info": { "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "75.0" + "initialLeverage": "7", + "notionalCap": "100000", + "notionalFloor": "15000", + "maintMarginRatio": "0.05", + "cum": "375.0" } }, { "tier": 3.0, "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, - "info": { - "bracket": "3", - "initialLeverage": "10", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "700.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", "minNotional": 100000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { - "bracket": "4", + "bracket": "3", "initialLeverage": "5", "notionalCap": "250000", "notionalFloor": "100000", "maintMarginRatio": "0.1", - "cum": "5700.0" + "cum": "5375.0" } }, { - "tier": 5.0, + "tier": 4.0, "currency": "USDT", "minNotional": 250000.0, "maxNotional": 1000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 2.0, "info": { - "bracket": "5", + "bracket": "4", "initialLeverage": "2", "notionalCap": "1000000", "notionalFloor": "250000", "maintMarginRatio": "0.125", - "cum": "11950.0" + "cum": "11625.0" } }, { - "tier": 6.0, + "tier": 5.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "6", + "bracket": "5", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", - "cum": "386950.0" + "cum": "386625.0" } } ], @@ -15968,10 +16004,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -16164,10 +16200,10 @@ "minNotional": 0.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 11.0, "info": { "bracket": "1", - "initialLeverage": "20", + "initialLeverage": "11", "notionalCap": "100000", "notionalFloor": "0", "maintMarginRatio": "0.025", @@ -16260,30 +16296,30 @@ "tier": 1.0, "currency": "USDT", "minNotional": 0.0, - "maxNotional": 50000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxNotional": 150000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 12.0, "info": { "bracket": "1", - "initialLeverage": "50", - "notionalCap": "50000", + "initialLeverage": "12", + "notionalCap": "150000", "notionalFloor": "0", - "maintMarginRatio": "0.01", + "maintMarginRatio": "0.02", "cum": "0.0" } }, { "tier": 2.0, "currency": "USDT", - "minNotional": 50000.0, + "minNotional": 150000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 11.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "11", "notionalCap": "250000", - "notionalFloor": "50000", + "notionalFloor": "150000", "maintMarginRatio": "0.025", "cum": "750.0" } @@ -16356,13 +16392,13 @@ "tier": 7.0, "currency": "USDT", "minNotional": 10000000.0, - "maxNotional": 20000000.0, + "maxNotional": 11000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "20000000", + "notionalCap": "11000000", "notionalFloor": "10000000", "maintMarginRatio": "0.5", "cum": "3232000.0" @@ -16376,10 +16412,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -16392,10 +16428,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -16408,10 +16444,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -16472,96 +16508,80 @@ "tier": 1.0, "currency": "USDT", "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxNotional": 15000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 8.0, "info": { "bracket": "1", - "initialLeverage": "50", - "notionalCap": "5000", + "initialLeverage": "8", + "notionalCap": "15000", "notionalFloor": "0", - "maintMarginRatio": "0.01", + "maintMarginRatio": "0.025", "cum": "0.0" } }, { "tier": 2.0, "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "minNotional": 15000.0, + "maxNotional": 50000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 6.0, "info": { "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "75.0" + "initialLeverage": "6", + "notionalCap": "50000", + "notionalFloor": "15000", + "maintMarginRatio": "0.05", + "cum": "375.0" } }, { "tier": 3.0, "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "minNotional": 50000.0, + "maxNotional": 200000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, "info": { "bracket": "3", - "initialLeverage": "10", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "700.0" + "initialLeverage": "5", + "notionalCap": "200000", + "notionalFloor": "50000", + "maintMarginRatio": "0.1", + "cum": "2875.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 250000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, + "minNotional": 200000.0, + "maxNotional": 1000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 2.0, "info": { "bracket": "4", - "initialLeverage": "5", - "notionalCap": "250000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5700.0" + "initialLeverage": "2", + "notionalCap": "1000000", + "notionalFloor": "200000", + "maintMarginRatio": "0.125", + "cum": "7875.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 2.0, - "info": { - "bracket": "5", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "250000", - "maintMarginRatio": "0.125", - "cum": "11950.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 1500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "6", + "bracket": "5", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "1500000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", - "cum": "386950.0" + "cum": "382875.0" } } ], @@ -16572,10 +16592,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -16588,10 +16608,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -16604,10 +16624,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -16670,10 +16690,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -16686,10 +16706,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -16702,10 +16722,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -16748,13 +16768,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -16768,10 +16788,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -16964,10 +16984,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -17062,10 +17082,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -17078,10 +17098,10 @@ "minNotional": 50000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "2", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "250000", "notionalFloor": "50000", "maintMarginRatio": "0.02", @@ -17372,10 +17392,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -17388,10 +17408,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -17404,10 +17424,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -17470,10 +17490,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -17486,10 +17506,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -17502,10 +17522,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -17650,10 +17670,10 @@ "minNotional": 0.0, "maxNotional": 10000.0, "maintenanceMarginRate": 0.0065, - "maxLeverage": 25.0, + "maxLeverage": 50.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "50", "notionalCap": "10000", "notionalFloor": "0", "maintMarginRatio": "0.0065", @@ -17666,10 +17686,10 @@ "minNotional": 10000.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 22.0, + "maxLeverage": 25.0, "info": { "bracket": "2", - "initialLeverage": "22", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "10000", "maintMarginRatio": "0.01", @@ -18254,10 +18274,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -18352,10 +18372,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -18368,10 +18388,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -18384,10 +18404,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -18450,10 +18470,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -18466,10 +18486,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -18482,10 +18502,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -18528,13 +18548,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386950.0" @@ -18548,10 +18568,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -18662,10 +18682,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -18874,10 +18894,10 @@ "minNotional": 0.0, "maxNotional": 10000.0, "maintenanceMarginRate": 0.0065, - "maxLeverage": 75.0, + "maxLeverage": 50.0, "info": { "bracket": "1", - "initialLeverage": "75", + "initialLeverage": "50", "notionalCap": "10000", "notionalFloor": "0", "maintMarginRatio": "0.0065", @@ -18890,10 +18910,10 @@ "minNotional": 10000.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 40.0, "info": { "bracket": "2", - "initialLeverage": "50", + "initialLeverage": "40", "notionalCap": "50000", "notionalFloor": "10000", "maintMarginRatio": "0.01", @@ -19020,10 +19040,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "20", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -19036,10 +19056,10 @@ "minNotional": 50000.0, "maxNotional": 250000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "250000", "notionalFloor": "50000", "maintMarginRatio": "0.025", @@ -19052,10 +19072,10 @@ "minNotional": 250000.0, "maxNotional": 1000000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "1000000", "notionalFloor": "250000", "maintMarginRatio": "0.05", @@ -19114,13 +19134,13 @@ "tier": 7.0, "currency": "USDT", "minNotional": 10000000.0, - "maxNotional": 20000000.0, + "maxNotional": 15000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "20000000", + "notionalCap": "15000000", "notionalFloor": "10000000", "maintMarginRatio": "0.5", "cum": "3232000.0" @@ -19134,10 +19154,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -19232,10 +19252,10 @@ "minNotional": 0.0, "maxNotional": 50000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "50000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -19346,10 +19366,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.01", @@ -19438,6 +19458,104 @@ } ], "ZIL/USDT": [ + { + "tier": 1.0, + "currency": "USDT", + "minNotional": 0.0, + "maxNotional": 5000.0, + "maintenanceMarginRate": 0.01, + "maxLeverage": 20.0, + "info": { + "bracket": "1", + "initialLeverage": "20", + "notionalCap": "5000", + "notionalFloor": "0", + "maintMarginRatio": "0.01", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDT", + "minNotional": 5000.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 10.0, + "info": { + "bracket": "2", + "initialLeverage": "10", + "notionalCap": "25000", + "notionalFloor": "5000", + "maintMarginRatio": "0.025", + "cum": "75.0" + } + }, + { + "tier": 3.0, + "currency": "USDT", + "minNotional": 25000.0, + "maxNotional": 100000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 8.0, + "info": { + "bracket": "3", + "initialLeverage": "8", + "notionalCap": "100000", + "notionalFloor": "25000", + "maintMarginRatio": "0.05", + "cum": "700.0" + } + }, + { + "tier": 4.0, + "currency": "USDT", + "minNotional": 100000.0, + "maxNotional": 250000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "4", + "initialLeverage": "5", + "notionalCap": "250000", + "notionalFloor": "100000", + "maintMarginRatio": "0.1", + "cum": "5700.0" + } + }, + { + "tier": 5.0, + "currency": "USDT", + "minNotional": 250000.0, + "maxNotional": 1000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 2.0, + "info": { + "bracket": "5", + "initialLeverage": "2", + "notionalCap": "1000000", + "notionalFloor": "250000", + "maintMarginRatio": "0.125", + "cum": "11950.0" + } + }, + { + "tier": 6.0, + "currency": "USDT", + "minNotional": 1000000.0, + "maxNotional": 5000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "6", + "initialLeverage": "1", + "notionalCap": "5000000", + "notionalFloor": "1000000", + "maintMarginRatio": "0.5", + "cum": "386950.0" + } + } + ], + "ZRX/USDT": [ { "tier": 1.0, "currency": "USDT", @@ -19534,103 +19652,5 @@ "cum": "386950.0" } } - ], - "ZRX/USDT": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, - "info": { - "bracket": "1", - "initialLeverage": "50", - "notionalCap": "5000", - "notionalFloor": "0", - "maintMarginRatio": "0.01", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, - "info": { - "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "75.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, - "info": { - "bracket": "3", - "initialLeverage": "10", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "700.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 250000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "4", - "initialLeverage": "5", - "notionalCap": "250000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5700.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 2.0, - "info": { - "bracket": "5", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "250000", - "maintMarginRatio": "0.125", - "cum": "11950.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 5000000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "6", - "initialLeverage": "1", - "notionalCap": "5000000", - "notionalFloor": "1000000", - "maintMarginRatio": "0.5", - "cum": "386950.0" - } - } ] -} +} \ No newline at end of file From 048119ad3d327b36cfb0b8502ea72aeeea453498 Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 25 Nov 2022 14:20:41 +0100 Subject: [PATCH 174/232] Improve doc wording around informative pair candle types closes #7792 --- docs/strategy-customization.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/docs/strategy-customization.md b/docs/strategy-customization.md index 3e8bab8ee..c006bf12c 100644 --- a/docs/strategy-customization.md +++ b/docs/strategy-customization.md @@ -446,15 +446,17 @@ A full sample can be found [in the DataProvider section](#complete-data-provider ??? Note "Alternative candle types" Informative_pairs can also provide a 3rd tuple element defining the candle type explicitly. - Availability of alternative candle-types will depend on the trading-mode and the exchange. Details about this can be found in the exchange documentation. + Availability of alternative candle-types will depend on the trading-mode and the exchange. + In general, spot pairs cannot be used in futures markets, and futures candles can't be used as informative pairs for spot bots. + Details about this may vary, if they do, this can be found in the exchange documentation. ``` python def informative_pairs(self): return [ - ("ETH/USDT", "5m", ""), # Uses default candletype, depends on trading_mode - ("ETH/USDT", "5m", "spot"), # Forces usage of spot candles - ("BTC/TUSD", "15m", "futures"), # Uses futures candles - ("BTC/TUSD", "15m", "mark"), # Uses mark candles + ("ETH/USDT", "5m", ""), # Uses default candletype, depends on trading_mode (recommended) + ("ETH/USDT", "5m", "spot"), # Forces usage of spot candles (only valid for bots running on spot markets). + ("BTC/TUSD", "15m", "futures"), # Uses futures candles (only bots with `trading_mode=futures`) + ("BTC/TUSD", "15m", "mark"), # Uses mark candles (only bots with `trading_mode=futures`) ] ``` *** From 5e6cda11efdc1d03a09a3db30ba42dab646c8270 Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 25 Nov 2022 14:43:43 +0100 Subject: [PATCH 175/232] Update method name for trade fee updating --- freqtrade/freqtradebot.py | 4 ++-- tests/test_freqtradebot.py | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index 77b099d80..6ffc6a755 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -191,7 +191,7 @@ class FreqtradeBot(LoggingMixin): # Check whether markets have to be reloaded and reload them when it's needed self.exchange.reload_markets() - self.update_closed_trades_without_assigned_fees() + self.update_trades_without_assigned_fees() # Query trades from persistence layer trades = Trade.get_open_trades() @@ -354,7 +354,7 @@ class FreqtradeBot(LoggingMixin): if self.trading_mode == TradingMode.FUTURES: self._schedule.run_pending() - def update_closed_trades_without_assigned_fees(self) -> None: + def update_trades_without_assigned_fees(self) -> None: """ Update closed trades without close fees assigned. Only acts when Orders are in the database, otherwise the last order-id is unknown. diff --git a/tests/test_freqtradebot.py b/tests/test_freqtradebot.py index 6b47dc1d1..83b7e9b27 100644 --- a/tests/test_freqtradebot.py +++ b/tests/test_freqtradebot.py @@ -5046,7 +5046,7 @@ def test_startup_backpopulate_precision(mocker, default_conf_usdt, fee, caplog): @pytest.mark.usefixtures("init_persistence") @pytest.mark.parametrize("is_short", [False, True]) -def test_update_closed_trades_without_assigned_fees(mocker, default_conf_usdt, fee, is_short): +def test_update_trades_without_assigned_fees(mocker, default_conf_usdt, fee, is_short): freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) def patch_with_fee(order): @@ -5075,7 +5075,7 @@ def test_update_closed_trades_without_assigned_fees(mocker, default_conf_usdt, f assert trade.fee_close_cost is None assert trade.fee_close_currency is None - freqtrade.update_closed_trades_without_assigned_fees() + freqtrade.update_trades_without_assigned_fees() # Does nothing for dry-run trades = Trade.get_trades().all() @@ -5088,7 +5088,7 @@ def test_update_closed_trades_without_assigned_fees(mocker, default_conf_usdt, f freqtrade.config['dry_run'] = False - freqtrade.update_closed_trades_without_assigned_fees() + freqtrade.update_trades_without_assigned_fees() trades = Trade.get_trades().all() assert len(trades) == MOCK_TRADE_COUNT @@ -5551,7 +5551,7 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None: assert trade.stake_amount == 110 # Assume it does nothing since order is closed and trade is open - freqtrade.update_closed_trades_without_assigned_fees() + freqtrade.update_trades_without_assigned_fees() trade = Trade.query.first() assert trade @@ -5622,7 +5622,7 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None: mocker.patch('freqtrade.exchange.Exchange.create_order', fetch_order_mm) mocker.patch('freqtrade.exchange.Exchange.fetch_order', fetch_order_mm) mocker.patch('freqtrade.exchange.Exchange.fetch_order_or_stoploss_order', fetch_order_mm) - freqtrade.update_closed_trades_without_assigned_fees() + freqtrade.update_trades_without_assigned_fees() orders = Order.query.all() assert orders @@ -5839,7 +5839,7 @@ def test_position_adjust2(mocker, default_conf_usdt, fee) -> None: assert trade.stake_amount == bid * amount # Assume it does nothing since order is closed and trade is open - freqtrade.update_closed_trades_without_assigned_fees() + freqtrade.update_trades_without_assigned_fees() trade = Trade.query.first() assert trade From c593cdc4382ca0070f1a78f5a85a34e6e8ab777b Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 25 Nov 2022 14:48:06 +0100 Subject: [PATCH 176/232] Improve type hints --- freqtrade/freqtradebot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index 6ffc6a755..c8544ac13 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -194,7 +194,7 @@ class FreqtradeBot(LoggingMixin): self.update_trades_without_assigned_fees() # Query trades from persistence layer - trades = Trade.get_open_trades() + trades: List[Trade] = Trade.get_open_trades() self.active_pair_whitelist = self._refresh_active_whitelist(trades) @@ -982,7 +982,7 @@ class FreqtradeBot(LoggingMixin): # SELL / exit positions / close trades logic and methods # - def exit_positions(self, trades: List[Any]) -> int: + def exit_positions(self, trades: List[Trade]) -> int: """ Tries to execute exit orders for open trades (positions) """ From 1b3e62bcbc0f6a06ecb2b8871be7f8880ef3b82d Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 25 Nov 2022 14:50:48 +0100 Subject: [PATCH 177/232] Lock execute_entry to prevent timing hickups --- freqtrade/freqtradebot.py | 19 ++++++++++--------- freqtrade/rpc/rpc.py | 23 ++++++++++++----------- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index c8544ac13..e47c4f7a3 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -381,15 +381,16 @@ class FreqtradeBot(LoggingMixin): trades = Trade.get_open_trades_without_assigned_fees() for trade in trades: - if trade.is_open and not trade.fee_updated(trade.entry_side): - order = trade.select_order(trade.entry_side, False) - open_order = trade.select_order(trade.entry_side, True) - if order and open_order is None: - logger.info( - f"Updating {trade.entry_side}-fee on trade {trade}" - f"for order {order.order_id}." - ) - self.update_trade_state(trade, order.order_id, send_msg=False) + with self._exit_lock: + if trade.is_open and not trade.fee_updated(trade.entry_side): + order = trade.select_order(trade.entry_side, False) + open_order = trade.select_order(trade.entry_side, True) + if order and open_order is None: + logger.info( + f"Updating {trade.entry_side}-fee on trade {trade}" + f"for order {order.order_id}." + ) + self.update_trade_state(trade, order.order_id, send_msg=False) def handle_insufficient_funds(self, trade: Trade): """ diff --git a/freqtrade/rpc/rpc.py b/freqtrade/rpc/rpc.py index 1d3f36844..011543a09 100644 --- a/freqtrade/rpc/rpc.py +++ b/freqtrade/rpc/rpc.py @@ -789,17 +789,18 @@ class RPC: if not order_type: order_type = self._freqtrade.strategy.order_types.get( 'force_entry', self._freqtrade.strategy.order_types['entry']) - if self._freqtrade.execute_entry(pair, stake_amount, price, - ordertype=order_type, trade=trade, - is_short=is_short, - enter_tag=enter_tag, - leverage_=leverage, - ): - Trade.commit() - trade = Trade.get_trades([Trade.is_open.is_(True), Trade.pair == pair]).first() - return trade - else: - raise RPCException(f'Failed to enter position for {pair}.') + with self._freqtrade._exit_lock: + if self._freqtrade.execute_entry(pair, stake_amount, price, + ordertype=order_type, trade=trade, + is_short=is_short, + enter_tag=enter_tag, + leverage_=leverage, + ): + Trade.commit() + trade = Trade.get_trades([Trade.is_open.is_(True), Trade.pair == pair]).first() + return trade + else: + raise RPCException(f'Failed to enter position for {pair}.') def _rpc_delete(self, trade_id: int) -> Dict[str, Union[str, int]]: """ From 0f97ef0d7bb0616e1e8bbfe679d4af047b97325b Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 25 Nov 2022 16:08:33 +0100 Subject: [PATCH 178/232] Reset stoploss_order_id when order is canceled closes #7766 --- freqtrade/freqtradebot.py | 5 +++-- tests/test_freqtradebot.py | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index e47c4f7a3..34d18b3d8 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -827,6 +827,8 @@ class FreqtradeBot(LoggingMixin): co = self.exchange.cancel_stoploss_order_with_result( trade.stoploss_order_id, trade.pair, trade.amount) trade.update_order(co) + # Reset stoploss order id. + trade.stoploss_order_id = None except InvalidOrderException: logger.exception(f"Could not cancel stoploss order {trade.stoploss_order_id}") return trade @@ -1011,7 +1013,7 @@ class FreqtradeBot(LoggingMixin): def handle_trade(self, trade: Trade) -> bool: """ - Sells/exits_short the current pair if the threshold is reached and updates the trade record. + Exits the current pair if the threshold is reached and updates the trade record. :return: True if trade has been sold/exited_short, False otherwise """ if not trade.is_open: @@ -1168,7 +1170,6 @@ class FreqtradeBot(LoggingMixin): if self.create_stoploss_order(trade=trade, stop_price=trade.stoploss_or_liquidation): return False else: - trade.stoploss_order_id = None logger.warning('Stoploss order was cancelled, but unable to recreate one.') # Finally we check if stoploss on exchange should be moved up because of trailing. diff --git a/tests/test_freqtradebot.py b/tests/test_freqtradebot.py index 83b7e9b27..b71b5b387 100644 --- a/tests/test_freqtradebot.py +++ b/tests/test_freqtradebot.py @@ -1498,6 +1498,7 @@ def test_handle_stoploss_on_exchange_trailing( }) ) assert freqtrade.handle_trade(trade) is True + assert trade.stoploss_order_id is None @pytest.mark.parametrize("is_short", [False, True]) From 8ee8b6e9432bb3d0282838278690d3464d0f8f55 Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 25 Nov 2022 16:31:21 +0100 Subject: [PATCH 179/232] Improve hyperopt list output closes #7789 --- freqtrade/optimize/hyperopt_tools.py | 9 ++++++--- freqtrade/optimize/optimize_reports.py | 10 +++++----- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/freqtrade/optimize/hyperopt_tools.py b/freqtrade/optimize/hyperopt_tools.py index 393c055c4..f8e9ad617 100755 --- a/freqtrade/optimize/hyperopt_tools.py +++ b/freqtrade/optimize/hyperopt_tools.py @@ -17,6 +17,7 @@ from freqtrade.enums import HyperoptState from freqtrade.exceptions import OperationalException from freqtrade.misc import deep_merge_dicts, round_coin_value, round_dict, safe_value_fallback2 from freqtrade.optimize.hyperopt_epoch_filters import hyperopt_filter_epochs +from freqtrade.optimize.optimize_reports import generate_wins_draws_losses logger = logging.getLogger(__name__) @@ -325,8 +326,10 @@ class HyperoptTools(): # New mode, using backtest result for metrics trials['results_metrics.winsdrawslosses'] = trials.apply( - lambda x: f"{x['results_metrics.wins']} {x['results_metrics.draws']:>4} " - f"{x['results_metrics.losses']:>4}", axis=1) + lambda x: generate_wins_draws_losses( + x['results_metrics.wins'], x['results_metrics.draws'], + x['results_metrics.losses'] + ), axis=1) trials = trials[['Best', 'current_epoch', 'results_metrics.total_trades', 'results_metrics.winsdrawslosses', @@ -337,7 +340,7 @@ class HyperoptTools(): 'loss', 'is_initial_point', 'is_random', 'is_best']] trials.columns = [ - 'Best', 'Epoch', 'Trades', ' Win Draw Loss', 'Avg profit', + 'Best', 'Epoch', 'Trades', ' Win Draw Loss Win%', 'Avg profit', 'Total profit', 'Profit', 'Avg duration', 'max_drawdown', 'max_drawdown_account', 'max_drawdown_abs', 'Objective', 'is_initial_point', 'is_random', 'is_best' ] diff --git a/freqtrade/optimize/optimize_reports.py b/freqtrade/optimize/optimize_reports.py index c406f866b..8ad37e7d8 100644 --- a/freqtrade/optimize/optimize_reports.py +++ b/freqtrade/optimize/optimize_reports.py @@ -86,7 +86,7 @@ def _get_line_header(first_column: str, stake_currency: str, 'Win Draw Loss Win%'] -def _generate_wins_draws_losses(wins, draws, losses): +def generate_wins_draws_losses(wins, draws, losses): if wins > 0 and losses == 0: wl_ratio = '100' elif wins == 0: @@ -600,7 +600,7 @@ def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: st output = [[ t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'], t['profit_total_pct'], t['duration_avg'], - _generate_wins_draws_losses(t['wins'], t['draws'], t['losses']) + generate_wins_draws_losses(t['wins'], t['draws'], t['losses']) ] for t in pair_results] # Ignore type as floatfmt does allow tuples but mypy does not know that return tabulate(output, headers=headers, @@ -626,7 +626,7 @@ def text_table_exit_reason(exit_reason_stats: List[Dict[str, Any]], stake_curren output = [[ t.get('exit_reason', t.get('sell_reason')), t['trades'], - _generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), + generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), t['profit_mean_pct'], t['profit_sum_pct'], round_coin_value(t['profit_total_abs'], stake_currency, False), t['profit_total_pct'], @@ -656,7 +656,7 @@ def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_curr t['profit_total_abs'], t['profit_total_pct'], t['duration_avg'], - _generate_wins_draws_losses( + generate_wins_draws_losses( t['wins'], t['draws'], t['losses'])] for t in tag_results] @@ -715,7 +715,7 @@ def text_table_strategy(strategy_results, stake_currency: str) -> str: output = [[ t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'], t['profit_total_pct'], t['duration_avg'], - _generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown] + generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown] for t, drawdown in zip(strategy_results, drawdown)] # Ignore type as floatfmt does allow tuples but mypy does not know that return tabulate(output, headers=headers, From 8c014bd365adbada47c3bb1f869effbf60b69749 Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 25 Nov 2022 16:36:41 +0100 Subject: [PATCH 180/232] Export trade-counts to csv closes #7789 --- freqtrade/optimize/hyperopt_tools.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/freqtrade/optimize/hyperopt_tools.py b/freqtrade/optimize/hyperopt_tools.py index f8e9ad617..1687502ad 100755 --- a/freqtrade/optimize/hyperopt_tools.py +++ b/freqtrade/optimize/hyperopt_tools.py @@ -473,6 +473,7 @@ class HyperoptTools(): 'results_metrics.profit_total', 'Stake currency', 'results_metrics.profit_total_abs', 'results_metrics.holding_avg', + 'results_metrics.trade_count_long', 'results_metrics.trade_count_short', 'loss', 'is_initial_point', 'is_best'] perc_multi = 100 @@ -480,7 +481,9 @@ class HyperoptTools(): trials = trials[base_metrics + param_metrics] base_columns = ['Best', 'Epoch', 'Trades', 'Avg profit', 'Median profit', 'Total profit', - 'Stake currency', 'Profit', 'Avg duration', 'Objective', + 'Stake currency', 'Profit', 'Avg duration', + 'Trade count long', 'Trade count short', + 'Objective', 'is_initial_point', 'is_best'] param_columns = list(results[0]['params_dict'].keys()) trials.columns = base_columns + param_columns From 79c041b62d5e5ab89969b656aa058f2c674b6334 Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 25 Nov 2022 16:56:47 +0100 Subject: [PATCH 181/232] Update tests for new export format --- freqtrade/optimize/hyperopt_tools.py | 3 +-- tests/commands/test_commands.py | 2 +- tests/conftest.py | 24 ++++++++++++------------ 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/freqtrade/optimize/hyperopt_tools.py b/freqtrade/optimize/hyperopt_tools.py index 1687502ad..7007ec55e 100755 --- a/freqtrade/optimize/hyperopt_tools.py +++ b/freqtrade/optimize/hyperopt_tools.py @@ -470,8 +470,7 @@ class HyperoptTools(): base_metrics = ['Best', 'current_epoch', 'results_metrics.total_trades', 'results_metrics.profit_mean', 'results_metrics.profit_median', - 'results_metrics.profit_total', - 'Stake currency', + 'results_metrics.profit_total', 'Stake currency', 'results_metrics.profit_total_abs', 'results_metrics.holding_avg', 'results_metrics.trade_count_long', 'results_metrics.trade_count_short', 'loss', 'is_initial_point', 'is_best'] diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index d3bceb004..a1d73f7ef 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -1271,7 +1271,7 @@ def test_hyperopt_list(mocker, capsys, caplog, saved_hyperopt_results, tmpdir): assert csv_file.is_file() line = csv_file.read_text() assert ('Best,1,2,-1.25%,-1.2222,-0.00125625,,-2.51,"3,930.0 m",0.43662' in line - or "Best,1,2,-1.25%,-1.2222,-0.00125625,,-2.51,2 days 17:30:00,0.43662" in line) + or "Best,1,2,-1.25%,-1.2222,-0.00125625,,-2.51,2 days 17:30:00,2,0,0.43662" in line) csv_file.unlink() diff --git a/tests/conftest.py b/tests/conftest.py index d228c64b7..f3fc908e7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2679,7 +2679,7 @@ def saved_hyperopt_results(): 'params_dict': { 'mfi-value': 15, 'fastd-value': 20, 'adx-value': 25, 'rsi-value': 28, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 88, 'sell-fastd-value': 97, 'sell-adx-value': 51, 'sell-rsi-value': 67, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1190, 'roi_t2': 541, 'roi_t3': 408, 'roi_p1': 0.026035863879169705, 'roi_p2': 0.12508730043628782, 'roi_p3': 0.27766427921605896, 'stoploss': -0.2562930402099556}, # noqa: E501 'params_details': {'buy': {'mfi-value': 15, 'fastd-value': 20, 'adx-value': 25, 'rsi-value': 28, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 88, 'sell-fastd-value': 97, 'sell-adx-value': 51, 'sell-rsi-value': 67, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.4287874435315165, 408: 0.15112316431545753, 949: 0.026035863879169705, 2139: 0}, 'stoploss': {'stoploss': -0.2562930402099556}}, # noqa: E501 - 'results_metrics': {'total_trades': 2, 'wins': 0, 'draws': 0, 'losses': 2, 'profit_mean': -0.01254995, 'profit_median': -0.012222, 'profit_total': -0.00125625, 'profit_total_abs': -2.50999, 'max_drawdown': 0.23, 'max_drawdown_abs': -0.00125625, 'holding_avg': timedelta(minutes=3930.0), 'stake_currency': 'BTC', 'strategy_name': 'SampleStrategy'}, # noqa: E501 + 'results_metrics': {'total_trades': 2, 'trade_count_long': 2, 'trade_count_short': 0, 'wins': 0, 'draws': 0, 'losses': 2, 'profit_mean': -0.01254995, 'profit_median': -0.012222, 'profit_total': -0.00125625, 'profit_total_abs': -2.50999, 'max_drawdown': 0.23, 'max_drawdown_abs': -0.00125625, 'holding_avg': timedelta(minutes=3930.0), 'stake_currency': 'BTC', 'strategy_name': 'SampleStrategy'}, # noqa: E501 'results_explanation': ' 2 trades. Avg profit -1.25%. Total profit -0.00125625 BTC ( -2.51Σ%). Avg duration 3930.0 min.', # noqa: E501 'total_profit': -0.00125625, 'current_epoch': 1, @@ -2696,7 +2696,7 @@ def saved_hyperopt_results(): 'sell': {'sell-mfi-value': 96, 'sell-fastd-value': 68, 'sell-adx-value': 63, 'sell-rsi-value': 81, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, # noqa: E501 'roi': {0: 0.4449309386008759, 140: 0.11955965746663, 823: 0.06403981740598495, 1157: 0}, # noqa: E501 'stoploss': {'stoploss': -0.338070047333259}}, - 'results_metrics': {'total_trades': 1, 'wins': 0, 'draws': 0, 'losses': 1, 'profit_mean': 0.012357, 'profit_median': -0.012222, 'profit_total': 6.185e-05, 'profit_total_abs': 0.12357, 'max_drawdown': 0.23, 'max_drawdown_abs': -0.00125625, 'holding_avg': timedelta(minutes=1200.0)}, # noqa: E501 + 'results_metrics': {'total_trades': 1, 'trade_count_long': 1, 'trade_count_short': 0, 'wins': 0, 'draws': 0, 'losses': 1, 'profit_mean': 0.012357, 'profit_median': -0.012222, 'profit_total': 6.185e-05, 'profit_total_abs': 0.12357, 'max_drawdown': 0.23, 'max_drawdown_abs': -0.00125625, 'holding_avg': timedelta(minutes=1200.0)}, # noqa: E501 'results_explanation': ' 1 trades. Avg profit 0.12%. Total profit 0.00006185 BTC ( 0.12Σ%). Avg duration 1200.0 min.', # noqa: E501 'total_profit': 6.185e-05, 'current_epoch': 2, @@ -2707,7 +2707,7 @@ def saved_hyperopt_results(): 'loss': 14.241196856510731, 'params_dict': {'mfi-value': 25, 'fastd-value': 16, 'adx-value': 29, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 98, 'sell-fastd-value': 72, 'sell-adx-value': 51, 'sell-rsi-value': 82, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 889, 'roi_t2': 533, 'roi_t3': 263, 'roi_p1': 0.04759065393663096, 'roi_p2': 0.1488819964638463, 'roi_p3': 0.4102801822104605, 'stoploss': -0.05394588767607611}, # noqa: E501 'params_details': {'buy': {'mfi-value': 25, 'fastd-value': 16, 'adx-value': 29, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 98, 'sell-fastd-value': 72, 'sell-adx-value': 51, 'sell-rsi-value': 82, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.6067528326109377, 263: 0.19647265040047726, 796: 0.04759065393663096, 1685: 0}, 'stoploss': {'stoploss': -0.05394588767607611}}, # noqa: E501 - 'results_metrics': {'total_trades': 621, 'wins': 320, 'draws': 0, 'losses': 301, 'profit_mean': -0.043883302093397747, 'profit_median': -0.012222, 'profit_total': -0.13639474, 'profit_total_abs': -272.515306, 'max_drawdown': 0.25, 'max_drawdown_abs': -272.515306, 'holding_avg': timedelta(minutes=1691.207729468599)}, # noqa: E501 + 'results_metrics': {'total_trades': 621, 'trade_count_long': 621, 'trade_count_short': 0, 'wins': 320, 'draws': 0, 'losses': 301, 'profit_mean': -0.043883302093397747, 'profit_median': -0.012222, 'profit_total': -0.13639474, 'profit_total_abs': -272.515306, 'max_drawdown': 0.25, 'max_drawdown_abs': -272.515306, 'holding_avg': timedelta(minutes=1691.207729468599)}, # noqa: E501 'results_explanation': ' 621 trades. Avg profit -0.44%. Total profit -0.13639474 BTC (-272.52Σ%). Avg duration 1691.2 min.', # noqa: E501 'total_profit': -0.13639474, 'current_epoch': 3, @@ -2718,14 +2718,14 @@ def saved_hyperopt_results(): 'loss': 100000, 'params_dict': {'mfi-value': 13, 'fastd-value': 35, 'adx-value': 39, 'rsi-value': 29, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 87, 'sell-fastd-value': 54, 'sell-adx-value': 63, 'sell-rsi-value': 93, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1402, 'roi_t2': 676, 'roi_t3': 215, 'roi_p1': 0.06264755784937427, 'roi_p2': 0.14258587851894644, 'roi_p3': 0.20671291201040828, 'stoploss': -0.11818343570194478}, # noqa: E501 'params_details': {'buy': {'mfi-value': 13, 'fastd-value': 35, 'adx-value': 39, 'rsi-value': 29, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 54, 'sell-adx-value': 63, 'sell-rsi-value': 93, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.411946348378729, 215: 0.2052334363683207, 891: 0.06264755784937427, 2293: 0}, 'stoploss': {'stoploss': -0.11818343570194478}}, # noqa: E501 - 'results_metrics': {'total_trades': 0, 'wins': 0, 'draws': 0, 'losses': 0, 'profit_mean': None, 'profit_median': None, 'profit_total': 0, 'profit': 0.0, 'holding_avg': timedelta()}, # noqa: E501 + 'results_metrics': {'total_trades': 0, 'trade_count_long': 0, 'trade_count_short': 0, 'wins': 0, 'draws': 0, 'losses': 0, 'profit_mean': None, 'profit_median': None, 'profit_total': 0, 'profit': 0.0, 'holding_avg': timedelta()}, # noqa: E501 'results_explanation': ' 0 trades. Avg profit nan%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration nan min.', # noqa: E501 'total_profit': 0, 'current_epoch': 4, 'is_initial_point': True, 'is_random': False, 'is_best': False # noqa: E501 }, { 'loss': 0.22195522184191518, 'params_dict': {'mfi-value': 17, 'fastd-value': 21, 'adx-value': 38, 'rsi-value': 33, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 87, 'sell-fastd-value': 82, 'sell-adx-value': 78, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 1269, 'roi_t2': 601, 'roi_t3': 444, 'roi_p1': 0.07280999507931168, 'roi_p2': 0.08946698095898986, 'roi_p3': 0.1454876733325284, 'stoploss': -0.18181041180901014}, # noqa: E501 'params_details': {'buy': {'mfi-value': 17, 'fastd-value': 21, 'adx-value': 38, 'rsi-value': 33, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 82, 'sell-adx-value': 78, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.3077646493708299, 444: 0.16227697603830155, 1045: 0.07280999507931168, 2314: 0}, 'stoploss': {'stoploss': -0.18181041180901014}}, # noqa: E501 - 'results_metrics': {'total_trades': 14, 'wins': 6, 'draws': 0, 'losses': 8, 'profit_mean': -0.003539515, 'profit_median': -0.012222, 'profit_total': -0.002480140000000001, 'profit_total_abs': -4.955321, 'max_drawdown': 0.34, 'max_drawdown_abs': -4.955321, 'holding_avg': timedelta(minutes=3402.8571428571427)}, # noqa: E501 + 'results_metrics': {'total_trades': 14, 'trade_count_long': 14, 'trade_count_short': 0, 'wins': 6, 'draws': 0, 'losses': 8, 'profit_mean': -0.003539515, 'profit_median': -0.012222, 'profit_total': -0.002480140000000001, 'profit_total_abs': -4.955321, 'max_drawdown': 0.34, 'max_drawdown_abs': -4.955321, 'holding_avg': timedelta(minutes=3402.8571428571427)}, # noqa: E501 'results_explanation': ' 14 trades. Avg profit -0.35%. Total profit -0.00248014 BTC ( -4.96Σ%). Avg duration 3402.9 min.', # noqa: E501 'total_profit': -0.002480140000000001, 'current_epoch': 5, @@ -2736,7 +2736,7 @@ def saved_hyperopt_results(): 'loss': 0.545315889154162, 'params_dict': {'mfi-value': 22, 'fastd-value': 43, 'adx-value': 46, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'bb_lower', 'sell-mfi-value': 87, 'sell-fastd-value': 65, 'sell-adx-value': 94, 'sell-rsi-value': 63, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 319, 'roi_t2': 556, 'roi_t3': 216, 'roi_p1': 0.06251955472249589, 'roi_p2': 0.11659519602202795, 'roi_p3': 0.0953744132197762, 'stoploss': -0.024551752215582423}, # noqa: E501 'params_details': {'buy': {'mfi-value': 22, 'fastd-value': 43, 'adx-value': 46, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 65, 'sell-adx-value': 94, 'sell-rsi-value': 63, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.2744891639643, 216: 0.17911475074452382, 772: 0.06251955472249589, 1091: 0}, 'stoploss': {'stoploss': -0.024551752215582423}}, # noqa: E501 - 'results_metrics': {'total_trades': 39, 'wins': 20, 'draws': 0, 'losses': 19, 'profit_mean': -0.0021400679487179478, 'profit_median': -0.012222, 'profit_total': -0.0041773, 'profit_total_abs': -8.346264999999997, 'max_drawdown': 0.45, 'max_drawdown_abs': -4.955321, 'holding_avg': timedelta(minutes=636.9230769230769)}, # noqa: E501 + 'results_metrics': {'total_trades': 39, 'trade_count_long': 39, 'trade_count_short': 0, 'wins': 20, 'draws': 0, 'losses': 19, 'profit_mean': -0.0021400679487179478, 'profit_median': -0.012222, 'profit_total': -0.0041773, 'profit_total_abs': -8.346264999999997, 'max_drawdown': 0.45, 'max_drawdown_abs': -4.955321, 'holding_avg': timedelta(minutes=636.9230769230769)}, # noqa: E501 'results_explanation': ' 39 trades. Avg profit -0.21%. Total profit -0.00417730 BTC ( -8.35Σ%). Avg duration 636.9 min.', # noqa: E501 'total_profit': -0.0041773, 'current_epoch': 6, @@ -2749,7 +2749,7 @@ def saved_hyperopt_results(): 'params_details': { 'buy': {'mfi-value': 13, 'fastd-value': 41, 'adx-value': 21, 'rsi-value': 29, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 99, 'sell-fastd-value': 60, 'sell-adx-value': 81, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.4837436938134452, 145: 0.10853310701097472, 765: 0.0586919200378493, 1536: 0}, # noqa: E501 'stoploss': {'stoploss': -0.14613268022709905}}, # noqa: E501 - 'results_metrics': {'total_trades': 318, 'wins': 100, 'draws': 0, 'losses': 218, 'profit_mean': -0.0039833954716981146, 'profit_median': -0.012222, 'profit_total': -0.06339929, 'profit_total_abs': -126.67197600000004, 'max_drawdown': 0.50, 'max_drawdown_abs': -200.955321, 'holding_avg': timedelta(minutes=3140.377358490566)}, # noqa: E501 + 'results_metrics': {'total_trades': 318, 'trade_count_long': 318, 'trade_count_short': 0, 'wins': 100, 'draws': 0, 'losses': 218, 'profit_mean': -0.0039833954716981146, 'profit_median': -0.012222, 'profit_total': -0.06339929, 'profit_total_abs': -126.67197600000004, 'max_drawdown': 0.50, 'max_drawdown_abs': -200.955321, 'holding_avg': timedelta(minutes=3140.377358490566)}, # noqa: E501 'results_explanation': ' 318 trades. Avg profit -0.40%. Total profit -0.06339929 BTC (-126.67Σ%). Avg duration 3140.4 min.', # noqa: E501 'total_profit': -0.06339929, 'current_epoch': 7, @@ -2760,7 +2760,7 @@ def saved_hyperopt_results(): 'loss': 20.0, # noqa: E501 'params_dict': {'mfi-value': 24, 'fastd-value': 43, 'adx-value': 33, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'sar_reversal', 'sell-mfi-value': 89, 'sell-fastd-value': 74, 'sell-adx-value': 70, 'sell-rsi-value': 70, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': False, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal', 'roi_t1': 1149, 'roi_t2': 375, 'roi_t3': 289, 'roi_p1': 0.05571820757172588, 'roi_p2': 0.0606240398618907, 'roi_p3': 0.1729012220156157, 'stoploss': -0.1588514289110401}, # noqa: E501 'params_details': {'buy': {'mfi-value': 24, 'fastd-value': 43, 'adx-value': 33, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 89, 'sell-fastd-value': 74, 'sell-adx-value': 70, 'sell-rsi-value': 70, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': False, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, 'roi': {0: 0.2892434694492323, 289: 0.11634224743361658, 664: 0.05571820757172588, 1813: 0}, 'stoploss': {'stoploss': -0.1588514289110401}}, # noqa: E501 - 'results_metrics': {'total_trades': 1, 'wins': 0, 'draws': 1, 'losses': 0, 'profit_mean': 0.0, 'profit_median': 0.0, 'profit_total': 0.0, 'profit_total_abs': 0.0, 'max_drawdown': 0.0, 'max_drawdown_abs': 0.52, 'holding_avg': timedelta(minutes=5340.0)}, # noqa: E501 + 'results_metrics': {'total_trades': 1, 'trade_count_long': 1, 'trade_count_short': 0, 'wins': 0, 'draws': 1, 'losses': 0, 'profit_mean': 0.0, 'profit_median': 0.0, 'profit_total': 0.0, 'profit_total_abs': 0.0, 'max_drawdown': 0.0, 'max_drawdown_abs': 0.52, 'holding_avg': timedelta(minutes=5340.0)}, # noqa: E501 'results_explanation': ' 1 trades. Avg profit 0.00%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration 5340.0 min.', # noqa: E501 'total_profit': 0.0, 'current_epoch': 8, @@ -2771,7 +2771,7 @@ def saved_hyperopt_results(): 'loss': 2.4731817780991223, 'params_dict': {'mfi-value': 22, 'fastd-value': 20, 'adx-value': 29, 'rsi-value': 40, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'sar_reversal', 'sell-mfi-value': 97, 'sell-fastd-value': 65, 'sell-adx-value': 81, 'sell-rsi-value': 64, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1012, 'roi_t2': 584, 'roi_t3': 422, 'roi_p1': 0.036764323603472565, 'roi_p2': 0.10335480573205287, 'roi_p3': 0.10322347377503042, 'stoploss': -0.2780610808108503}, # noqa: E501 'params_details': {'buy': {'mfi-value': 22, 'fastd-value': 20, 'adx-value': 29, 'rsi-value': 40, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 97, 'sell-fastd-value': 65, 'sell-adx-value': 81, 'sell-rsi-value': 64, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.2433426031105559, 422: 0.14011912933552545, 1006: 0.036764323603472565, 2018: 0}, 'stoploss': {'stoploss': -0.2780610808108503}}, # noqa: E501 - 'results_metrics': {'total_trades': 229, 'wins': 150, 'draws': 0, 'losses': 79, 'profit_mean': -0.0038433433624454144, 'profit_median': -0.012222, 'profit_total': -0.044050070000000004, 'profit_total_abs': -88.01256299999999, 'max_drawdown': 0.41, 'max_drawdown_abs': -150.955321, 'holding_avg': timedelta(minutes=6505.676855895196)}, # noqa: E501 + 'results_metrics': {'total_trades': 229, 'trade_count_long': 229, 'trade_count_short': 0, 'wins': 150, 'draws': 0, 'losses': 79, 'profit_mean': -0.0038433433624454144, 'profit_median': -0.012222, 'profit_total': -0.044050070000000004, 'profit_total_abs': -88.01256299999999, 'max_drawdown': 0.41, 'max_drawdown_abs': -150.955321, 'holding_avg': timedelta(minutes=6505.676855895196)}, # noqa: E501 'results_explanation': ' 229 trades. Avg profit -0.38%. Total profit -0.04405007 BTC ( -88.01Σ%). Avg duration 6505.7 min.', # noqa: E501 'total_profit': -0.044050070000000004, # noqa: E501 'current_epoch': 9, @@ -2782,7 +2782,7 @@ def saved_hyperopt_results(): 'loss': -0.2604606005845212, # noqa: E501 'params_dict': {'mfi-value': 23, 'fastd-value': 24, 'adx-value': 22, 'rsi-value': 24, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 97, 'sell-fastd-value': 70, 'sell-adx-value': 64, 'sell-rsi-value': 80, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal', 'roi_t1': 792, 'roi_t2': 464, 'roi_t3': 215, 'roi_p1': 0.04594053535385903, 'roi_p2': 0.09623192684243963, 'roi_p3': 0.04428219070850663, 'stoploss': -0.16992287161634415}, # noqa: E501 'params_details': {'buy': {'mfi-value': 23, 'fastd-value': 24, 'adx-value': 22, 'rsi-value': 24, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 97, 'sell-fastd-value': 70, 'sell-adx-value': 64, 'sell-rsi-value': 80, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, 'roi': {0: 0.18645465290480528, 215: 0.14217246219629864, 679: 0.04594053535385903, 1471: 0}, 'stoploss': {'stoploss': -0.16992287161634415}}, # noqa: E501 - 'results_metrics': {'total_trades': 4, 'wins': 0, 'draws': 0, 'losses': 4, 'profit_mean': 0.001080385, 'profit_median': -0.012222, 'profit_total': 0.00021629, 'profit_total_abs': 0.432154, 'max_drawdown': 0.13, 'max_drawdown_abs': -4.955321, 'holding_avg': timedelta(minutes=2850.0)}, # noqa: E501 + 'results_metrics': {'total_trades': 4, 'trade_count_long': 4, 'trade_count_short': 0, 'wins': 0, 'draws': 0, 'losses': 4, 'profit_mean': 0.001080385, 'profit_median': -0.012222, 'profit_total': 0.00021629, 'profit_total_abs': 0.432154, 'max_drawdown': 0.13, 'max_drawdown_abs': -4.955321, 'holding_avg': timedelta(minutes=2850.0)}, # noqa: E501 'results_explanation': ' 4 trades. Avg profit 0.11%. Total profit 0.00021629 BTC ( 0.43Σ%). Avg duration 2850.0 min.', # noqa: E501 'total_profit': 0.00021629, 'current_epoch': 10, @@ -2794,7 +2794,7 @@ def saved_hyperopt_results(): 'params_dict': {'mfi-value': 20, 'fastd-value': 32, 'adx-value': 49, 'rsi-value': 23, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower', 'sell-mfi-value': 75, 'sell-fastd-value': 56, 'sell-adx-value': 61, 'sell-rsi-value': 62, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 579, 'roi_t2': 614, 'roi_t3': 273, 'roi_p1': 0.05307643172744114, 'roi_p2': 0.1352282078262871, 'roi_p3': 0.1913307406325751, 'stoploss': -0.25728526022513887}, # noqa: E501 'params_details': {'buy': {'mfi-value': 20, 'fastd-value': 32, 'adx-value': 49, 'rsi-value': 23, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 75, 'sell-fastd-value': 56, 'sell-adx-value': 61, 'sell-rsi-value': 62, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.3796353801863034, 273: 0.18830463955372825, 887: 0.05307643172744114, 1466: 0}, 'stoploss': {'stoploss': -0.25728526022513887}}, # noqa: E501 # New Hyperopt mode! - 'results_metrics': {'total_trades': 117, 'wins': 67, 'draws': 0, 'losses': 50, 'profit_mean': -0.012698609145299145, 'profit_median': -0.012222, 'profit_total': -0.07436117, 'profit_total_abs': -148.573727, 'max_drawdown': 0.52, 'max_drawdown_abs': -224.955321, 'holding_avg': timedelta(minutes=4282.5641025641025)}, # noqa: E501 + 'results_metrics': {'total_trades': 117, 'trade_count_long': 117, 'trade_count_short': 0, 'wins': 67, 'draws': 0, 'losses': 50, 'profit_mean': -0.012698609145299145, 'profit_median': -0.012222, 'profit_total': -0.07436117, 'profit_total_abs': -148.573727, 'max_drawdown': 0.52, 'max_drawdown_abs': -224.955321, 'holding_avg': timedelta(minutes=4282.5641025641025)}, # noqa: E501 'results_explanation': ' 117 trades. Avg profit -1.27%. Total profit -0.07436117 BTC (-148.57Σ%). Avg duration 4282.6 min.', # noqa: E501 'total_profit': -0.07436117, 'current_epoch': 11, @@ -2805,7 +2805,7 @@ def saved_hyperopt_results(): 'loss': 100000, 'params_dict': {'mfi-value': 10, 'fastd-value': 36, 'adx-value': 31, 'rsi-value': 22, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'sar_reversal', 'sell-mfi-value': 80, 'sell-fastd-value': 71, 'sell-adx-value': 60, 'sell-rsi-value': 85, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1156, 'roi_t2': 581, 'roi_t3': 408, 'roi_p1': 0.06860454019988212, 'roi_p2': 0.12473718444931989, 'roi_p3': 0.2896360635226823, 'stoploss': -0.30889015124682806}, # noqa: E501 'params_details': {'buy': {'mfi-value': 10, 'fastd-value': 36, 'adx-value': 31, 'rsi-value': 22, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 80, 'sell-fastd-value': 71, 'sell-adx-value': 60, 'sell-rsi-value': 85, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.4829777881718843, 408: 0.19334172464920202, 989: 0.06860454019988212, 2145: 0}, 'stoploss': {'stoploss': -0.30889015124682806}}, # noqa: E501 - 'results_metrics': {'total_trades': 0, 'wins': 0, 'draws': 0, 'losses': 0, 'profit_mean': None, 'profit_median': None, 'profit_total': 0, 'profit_total_abs': 0.0, 'max_drawdown': 0.0, 'max_drawdown_abs': 0.0, 'holding_avg': timedelta()}, # noqa: E501 + 'results_metrics': {'total_trades': 0, 'trade_count_long': 0, 'trade_count_short': 0, 'wins': 0, 'draws': 0, 'losses': 0, 'profit_mean': None, 'profit_median': None, 'profit_total': 0, 'profit_total_abs': 0.0, 'max_drawdown': 0.0, 'max_drawdown_abs': 0.0, 'holding_avg': timedelta()}, # noqa: E501 'results_explanation': ' 0 trades. Avg profit nan%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration nan min.', # noqa: E501 'total_profit': 0, 'current_epoch': 12, From 756921b16a060e06aebc1d9ad49d242c1f6fe97b Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 25 Nov 2022 17:05:49 +0100 Subject: [PATCH 182/232] Update fthypt file --- tests/testdata/strategy_SampleStrategy.fthypt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/testdata/strategy_SampleStrategy.fthypt b/tests/testdata/strategy_SampleStrategy.fthypt index 6a64a9c4f..e71102043 100644 --- a/tests/testdata/strategy_SampleStrategy.fthypt +++ b/tests/testdata/strategy_SampleStrategy.fthypt @@ -1,5 +1,5 @@ -{"loss":100000,"params_dict":{"mfi-value":"20","fastd-value":"21","adx-value":"26","rsi-value":"23","mfi-enabled":true,"fastd-enabled":false,"adx-enabled":false,"rsi-enabled":true,"trigger":"sar_reversal","sell-mfi-value":"97","sell-fastd-value":"85","sell-adx-value":"55","sell-rsi-value":"76","sell-mfi-enabled":true,"sell-fastd-enabled":false,"sell-adx-enabled":true,"sell-rsi-enabled":true,"sell-trigger":"sell-bb_upper","roi_t1":"34","roi_t2":"28","roi_t3":"32","roi_p1":0.031,"roi_p2":0.033,"roi_p3":0.146,"stoploss":-0.05},"params_details":{"buy":{"mfi-value":"20","fastd-value":"21","adx-value":"26","rsi-value":"23","mfi-enabled":true,"fastd-enabled":false,"adx-enabled":false,"rsi-enabled":true,"trigger":"sar_reversal"},"sell":{"sell-mfi-value":"97","sell-fastd-value":"85","sell-adx-value":"55","sell-rsi-value":"76","sell-mfi-enabled":true,"sell-fastd-enabled":false,"sell-adx-enabled":true,"sell-rsi-enabled":true,"sell-trigger":"sell-bb_upper"},"roi":"{0: 0.21, 32: 0.064, 60: 0.031, 94: 0}","stoploss":{"stoploss":-0.05}},"params_not_optimized":{"buy":{},"sell":{}},"results_metrics":{"trades":[],"locks":[],"best_pair":{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},"worst_pair":{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},"results_per_pair":[{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"LTC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"ETC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"XLM/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"TRX/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"ADA/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"sell_reason_summary":[],"left_open_trades":[{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"total_trades":0,"total_volume":0.0,"avg_stake_amount":0,"profit_mean":0,"profit_median":0,"profit_total":0.0,"profit_total_abs":0,"backtest_start":"2018-01-10 07:25:00","backtest_start_ts":1515569100000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":1620793107,"backtest_run_end_ts":1620793107,"trades_per_day":0.0,"market_change":0,"pairlist":["ETH/BTC","LTC/BTC","ETC/BTC","XLM/BTC","TRX/BTC","ADA/BTC"],"stake_amount":0.05,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":1000,"dry_run_wallet":1000,"final_balance":1000,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timerange":"","enable_protections":false,"strategy_name":"SampleStrategy","stoploss":-0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{"60":0.01,"30":0.02,"0":0.04},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":0.0,"ignore_roi_if_entry_signal":false,"backtest_best_day":0,"backtest_worst_day":0,"backtest_best_day_abs":0,"backtest_worst_day_abs":0,"winning_days":0,"draw_days":0,"losing_days":0,"wins":0,"losses":0,"draws":0,"holding_avg":"0:00:00","winner_holding_avg":"0:00:00","loser_holding_avg":"0:00:00","max_drawdown":0.0,"max_drawdown_abs":0.0,"max_drawdown_low":0.0,"max_drawdown_high":0.0,"drawdown_start":"1970-01-01 00:00:00+00:00","drawdown_start_ts":0,"drawdown_end":"1970-01-01 00:00:00+00:00","drawdown_end_ts":0,"csum_min":0,"csum_max":0},"results_explanation":" 0 trades. 0/0/0 Wins/Draws/Losses. Avg profit 0.00%. Median profit 0.00%. Total profit 0.00000000 BTC ( 0.00\u03A3%). Avg duration 0:00:00 min.","total_profit":0.0,"current_epoch":1,"is_initial_point":true,"is_best":false} -{"loss":100000,"params_dict":{"mfi-value":"14","fastd-value":"43","adx-value":"30","rsi-value":"24","mfi-enabled":true,"fastd-enabled":true,"adx-enabled":false,"rsi-enabled":true,"trigger":"sar_reversal","sell-mfi-value":"97","sell-fastd-value":"71","sell-adx-value":"82","sell-rsi-value":"99","sell-mfi-enabled":false,"sell-fastd-enabled":false,"sell-adx-enabled":false,"sell-rsi-enabled":true,"sell-trigger":"sell-bb_upper","roi_t1":"84","roi_t2":"35","roi_t3":"19","roi_p1":0.024,"roi_p2":0.022,"roi_p3":0.061,"stoploss":-0.083},"params_details":{"buy":{"mfi-value":"14","fastd-value":"43","adx-value":"30","rsi-value":"24","mfi-enabled":true,"fastd-enabled":true,"adx-enabled":false,"rsi-enabled":true,"trigger":"sar_reversal"},"sell":{"sell-mfi-value":"97","sell-fastd-value":"71","sell-adx-value":"82","sell-rsi-value":"99","sell-mfi-enabled":false,"sell-fastd-enabled":false,"sell-adx-enabled":false,"sell-rsi-enabled":true,"sell-trigger":"sell-bb_upper"},"roi":"{0: 0.107, 19: 0.046, 54: 0.024, 138: 0}","stoploss":{"stoploss":-0.083}},"params_not_optimized":{"buy":{},"sell":{}},"results_metrics":{"trades":[],"locks":[],"best_pair":{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},"worst_pair":{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},"results_per_pair":[{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"LTC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"ETC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"XLM/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"TRX/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"ADA/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"sell_reason_summary":[],"left_open_trades":[{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"total_trades":0,"total_volume":0.0,"avg_stake_amount":0,"profit_mean":0,"profit_median":0,"profit_total":0.0,"profit_total_abs":0,"backtest_start":"2018-01-10 07:25:00","backtest_start_ts":1515569100000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":1620793107,"backtest_run_end_ts":1620793108,"trades_per_day":0.0,"market_change":0,"pairlist":["ETH/BTC","LTC/BTC","ETC/BTC","XLM/BTC","TRX/BTC","ADA/BTC"],"stake_amount":0.05,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":1000,"dry_run_wallet":1000,"final_balance":1000,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timerange":"","enable_protections":false,"strategy_name":"SampleStrategy","stoploss":-0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{"60":0.01,"30":0.02,"0":0.04},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":0.0,"ignore_roi_if_entry_signal":false,"backtest_best_day":0,"backtest_worst_day":0,"backtest_best_day_abs":0,"backtest_worst_day_abs":0,"winning_days":0,"draw_days":0,"losing_days":0,"wins":0,"losses":0,"draws":0,"holding_avg":"0:00:00","winner_holding_avg":"0:00:00","loser_holding_avg":"0:00:00","max_drawdown":0.0,"max_drawdown_abs":0.0,"max_drawdown_low":0.0,"max_drawdown_high":0.0,"drawdown_start":"1970-01-01 00:00:00+00:00","drawdown_start_ts":0,"drawdown_end":"1970-01-01 00:00:00+00:00","drawdown_end_ts":0,"csum_min":0,"csum_max":0},"results_explanation":" 0 trades. 0/0/0 Wins/Draws/Losses. Avg profit 0.00%. Median profit 0.00%. Total profit 0.00000000 BTC ( 0.00\u03A3%). Avg duration 0:00:00 min.","total_profit":0.0,"current_epoch":2,"is_initial_point":true,"is_best":false} -{"loss":2.183447401951895,"params_dict":{"mfi-value":"14","fastd-value":"15","adx-value":"40","rsi-value":"36","mfi-enabled":false,"fastd-enabled":true,"adx-enabled":false,"rsi-enabled":false,"trigger":"sar_reversal","sell-mfi-value":"92","sell-fastd-value":"84","sell-adx-value":"61","sell-rsi-value":"61","sell-mfi-enabled":true,"sell-fastd-enabled":true,"sell-adx-enabled":true,"sell-rsi-enabled":true,"sell-trigger":"sell-bb_upper","roi_t1":"68","roi_t2":"41","roi_t3":"21","roi_p1":0.015,"roi_p2":0.064,"roi_p3":0.126,"stoploss":-0.024},"params_details":{"buy":{"mfi-value":"14","fastd-value":"15","adx-value":"40","rsi-value":"36","mfi-enabled":false,"fastd-enabled":true,"adx-enabled":false,"rsi-enabled":false,"trigger":"sar_reversal"},"sell":{"sell-mfi-value":"92","sell-fastd-value":"84","sell-adx-value":"61","sell-rsi-value":"61","sell-mfi-enabled":true,"sell-fastd-enabled":true,"sell-adx-enabled":true,"sell-rsi-enabled":true,"sell-trigger":"sell-bb_upper"},"roi":"{0: 0.20500000000000002, 21: 0.079, 62: 0.015, 130: 0}","stoploss":{"stoploss":-0.024}},"params_not_optimized":{"buy":{},"sell":{}},"results_metrics":{"trades":[{"pair":"LTC/BTC","stake_amount":0.05,"amount":2.94115571,"open_date":"2018-01-11 11:40:00+00:00","close_date":"2018-01-11 19:40:00+00:00","open_rate":0.01700012,"close_rate":0.017119538805820372,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":480,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.01659211712,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.01659211712,"stop_loss_ratio":-0.024,"min_rate":0.01689809,"max_rate":0.0171462,"is_open":false,"open_timestamp":1515670800000.0,"close_timestamp":1515699600000.0},{"pair":"ETH/BTC","stake_amount":0.05,"amount":0.57407318,"open_date":"2018-01-12 11:05:00+00:00","close_date":"2018-01-12 12:30:00+00:00","open_rate":0.08709691,"close_rate":0.08901977203712995,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":85,"profit_ratio":0.01494768,"profit_abs":0.00075,"sell_reason":"roi","initial_stop_loss_abs":0.08500658416,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.08500658416,"stop_loss_ratio":-0.024,"min_rate":0.08702974000000001,"max_rate":0.08929248000000001,"is_open":false,"open_timestamp":1515755100000.0,"close_timestamp":1515760200000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":2.93166236,"open_date":"2018-01-12 03:30:00+00:00","close_date":"2018-01-12 13:05:00+00:00","open_rate":0.01705517,"close_rate":0.01717497550928249,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":575,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.016645845920000003,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.016645845920000003,"stop_loss_ratio":-0.024,"min_rate":0.0169841,"max_rate":0.01719135,"is_open":false,"open_timestamp":1515727800000.0,"close_timestamp":1515762300000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":2.96876855,"open_date":"2018-01-13 03:50:00+00:00","close_date":"2018-01-13 06:05:00+00:00","open_rate":0.016842,"close_rate":0.016960308078273957,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":135,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.016437792,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.016437792,"stop_loss_ratio":-0.024,"min_rate":0.016836999999999998,"max_rate":0.017,"is_open":false,"open_timestamp":1515815400000.0,"close_timestamp":1515823500000.0},{"pair":"ETH/BTC","stake_amount":0.05,"amount":0.53163205,"open_date":"2018-01-13 13:25:00+00:00","close_date":"2018-01-13 15:35:00+00:00","open_rate":0.09405001,"close_rate":0.09471067238835926,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":130,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.09179280976,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.09179280976,"stop_loss_ratio":-0.024,"min_rate":0.09369894000000001,"max_rate":0.09479997,"is_open":false,"open_timestamp":1515849900000.0,"close_timestamp":1515857700000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":19.23816853,"open_date":"2018-01-13 15:30:00+00:00","close_date":"2018-01-13 16:20:00+00:00","open_rate":0.0025989999999999997,"close_rate":0.0028232990466633217,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":50,"profit_ratio":0.07872446,"profit_abs":0.00395,"sell_reason":"roi","initial_stop_loss_abs":0.002536624,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.002536624,"stop_loss_ratio":-0.024,"min_rate":0.00259525,"max_rate":0.0028288700000000003,"is_open":false,"open_timestamp":1515857400000.0,"close_timestamp":1515860400000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":492.80504632,"open_date":"2018-01-14 21:35:00+00:00","close_date":"2018-01-14 23:15:00+00:00","open_rate":0.00010146000000000001,"close_rate":0.00010369995985950828,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":100,"profit_ratio":0.01494768,"profit_abs":0.00075,"sell_reason":"roi","initial_stop_loss_abs":9.902496e-05,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":9.902496e-05,"stop_loss_ratio":-0.024,"min_rate":0.0001012,"max_rate":0.00010414,"is_open":false,"open_timestamp":1515965700000.0,"close_timestamp":1515971700000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":2.92398174,"open_date":"2018-01-15 12:45:00+00:00","close_date":"2018-01-15 21:05:00+00:00","open_rate":0.01709997,"close_rate":0.01722009021073758,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":500,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.016689570719999998,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.016689570719999998,"stop_loss_ratio":-0.024,"min_rate":0.01694,"max_rate":0.01725,"is_open":false,"open_timestamp":1516020300000.0,"close_timestamp":1516050300000.0},{"pair":"XLM/BTC","stake_amount":0.05,"amount":1111.60515785,"open_date":"2018-01-15 19:50:00+00:00","close_date":"2018-01-15 23:45:00+00:00","open_rate":4.4980000000000006e-05,"close_rate":4.390048e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":235,"profit_ratio":-0.03080817,"profit_abs":-0.0015458,"sell_reason":"stop_loss","initial_stop_loss_abs":4.390048e-05,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":4.390048e-05,"stop_loss_ratio":-0.024,"min_rate":4.409e-05,"max_rate":4.502e-05,"is_open":false,"open_timestamp":1516045800000.0,"close_timestamp":1516059900000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":519.80455349,"open_date":"2018-01-21 03:55:00+00:00","close_date":"2018-01-21 04:05:00+00:00","open_rate":9.619e-05,"close_rate":9.388144e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":10,"profit_ratio":-0.03080817,"profit_abs":-0.0015458,"sell_reason":"stop_loss","initial_stop_loss_abs":9.388144e-05,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":9.388144e-05,"stop_loss_ratio":-0.024,"min_rate":9.568e-05,"max_rate":9.673e-05,"is_open":false,"open_timestamp":1516506900000.0,"close_timestamp":1516507500000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":3.029754,"open_date":"2018-01-20 22:15:00+00:00","close_date":"2018-01-21 07:45:00+00:00","open_rate":0.01650299,"close_rate":0.016106918239999997,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":570,"profit_ratio":-0.03080817,"profit_abs":-0.0015458,"sell_reason":"stop_loss","initial_stop_loss_abs":0.016106918239999997,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.016106918239999997,"stop_loss_ratio":-0.024,"min_rate":0.0162468,"max_rate":0.01663179,"is_open":false,"open_timestamp":1516486500000.0,"close_timestamp":1516520700000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":18.75461832,"open_date":"2018-01-21 13:00:00+00:00","close_date":"2018-01-21 16:25:00+00:00","open_rate":0.00266601,"close_rate":0.00260202576,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":205,"profit_ratio":-0.03080817,"profit_abs":-0.0015458,"sell_reason":"stop_loss","initial_stop_loss_abs":0.00260202576,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.00260202576,"stop_loss_ratio":-0.024,"min_rate":0.0026290800000000002,"max_rate":0.00269384,"is_open":false,"open_timestamp":1516539600000.0,"close_timestamp":1516551900000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":552.18111541,"open_date":"2018-01-22 02:10:00+00:00","close_date":"2018-01-22 04:20:00+00:00","open_rate":9.055e-05,"close_rate":9.118607626693427e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":130,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":8.83768e-05,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":8.83768e-05,"stop_loss_ratio":-0.024,"min_rate":9.013e-05,"max_rate":9.197e-05,"is_open":false,"open_timestamp":1516587000000.0,"close_timestamp":1516594800000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":2.99733237,"open_date":"2018-01-22 03:20:00+00:00","close_date":"2018-01-22 13:50:00+00:00","open_rate":0.0166815,"close_rate":0.016281143999999997,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":630,"profit_ratio":-0.03080817,"profit_abs":-0.0015458,"sell_reason":"stop_loss","initial_stop_loss_abs":0.016281143999999997,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.016281143999999997,"stop_loss_ratio":-0.024,"min_rate":0.01641443,"max_rate":0.016800000000000002,"is_open":false,"open_timestamp":1516591200000.0,"close_timestamp":1516629000000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":503.52467271,"open_date":"2018-01-23 08:55:00+00:00","close_date":"2018-01-23 09:40:00+00:00","open_rate":9.93e-05,"close_rate":9.69168e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":45,"profit_ratio":-0.03080817,"profit_abs":-0.0015458,"sell_reason":"stop_loss","initial_stop_loss_abs":9.69168e-05,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":9.69168e-05,"stop_loss_ratio":-0.024,"min_rate":9.754e-05,"max_rate":0.00010025,"is_open":false,"open_timestamp":1516697700000.0,"close_timestamp":1516700400000.0},{"pair":"ETH/BTC","stake_amount":0.05,"amount":0.55148073,"open_date":"2018-01-24 02:10:00+00:00","close_date":"2018-01-24 04:40:00+00:00","open_rate":0.090665,"close_rate":0.09130188409433015,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":150,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.08848903999999999,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.08848903999999999,"stop_loss_ratio":-0.024,"min_rate":0.090665,"max_rate":0.09146000000000001,"is_open":false,"open_timestamp":1516759800000.0,"close_timestamp":1516768800000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":19.10584639,"open_date":"2018-01-24 19:20:00+00:00","close_date":"2018-01-24 21:35:00+00:00","open_rate":0.002617,"close_rate":0.0026353833416959357,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":135,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.002554192,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.002554192,"stop_loss_ratio":-0.024,"min_rate":0.002617,"max_rate":0.00264999,"is_open":false,"open_timestamp":1516821600000.0,"close_timestamp":1516829700000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":19.34602691,"open_date":"2018-01-25 14:35:00+00:00","close_date":"2018-01-25 16:35:00+00:00","open_rate":0.00258451,"close_rate":0.002641568926241846,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":120,"profit_ratio":0.01494768,"profit_abs":0.00075,"sell_reason":"roi","initial_stop_loss_abs":0.00252248176,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.00252248176,"stop_loss_ratio":-0.024,"min_rate":0.00258451,"max_rate":0.00264579,"is_open":false,"open_timestamp":1516890900000.0,"close_timestamp":1516898100000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":3.11910295,"open_date":"2018-01-24 23:05:00+00:00","close_date":"2018-01-25 16:55:00+00:00","open_rate":0.01603025,"close_rate":0.016142855870546913,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":1070,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.015645523999999997,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.015645523999999997,"stop_loss_ratio":-0.024,"min_rate":0.015798760000000002,"max_rate":0.01617,"is_open":false,"open_timestamp":1516835100000.0,"close_timestamp":1516899300000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":553.70985604,"open_date":"2018-01-26 19:30:00+00:00","close_date":"2018-01-26 23:30:00+00:00","open_rate":9.03e-05,"close_rate":9.093432012042147e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":240,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":8.813279999999999e-05,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":8.813279999999999e-05,"stop_loss_ratio":-0.024,"min_rate":8.961e-05,"max_rate":9.1e-05,"is_open":false,"open_timestamp":1516995000000.0,"close_timestamp":1517009400000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":19.22929005,"open_date":"2018-01-26 21:15:00+00:00","close_date":"2018-01-28 03:50:00+00:00","open_rate":0.0026002,"close_rate":0.0026184653286502758,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":1835,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.0025377952,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.0025377952,"stop_loss_ratio":-0.024,"min_rate":0.00254702,"max_rate":0.00262797,"is_open":false,"open_timestamp":1517001300000.0,"close_timestamp":1517111400000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":3.15677093,"open_date":"2018-01-27 22:05:00+00:00","close_date":"2018-01-28 10:45:00+00:00","open_rate":0.01583897,"close_rate":0.015950232207727046,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":760,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.01545883472,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.01545883472,"stop_loss_ratio":-0.024,"min_rate":0.015700000000000002,"max_rate":0.01596521,"is_open":false,"open_timestamp":1517090700000.0,"close_timestamp":1517136300000.0}],"locks":[],"best_pair":{"key":"ETC/BTC","trades":5,"profit_mean":0.012572794000000002,"profit_mean_pct":1.2572794000000003,"profit_sum":0.06286397,"profit_sum_pct":6.29,"profit_total_abs":0.0031542000000000002,"profit_total":3.1542000000000002e-06,"profit_total_pct":0.0,"duration_avg":"7:49:00","wins":2,"draws":2,"losses":1},"worst_pair":{"key":"LTC/BTC","trades":8,"profit_mean":-0.0077020425,"profit_mean_pct":-0.77020425,"profit_sum":-0.06161634,"profit_sum_pct":-6.16,"profit_total_abs":-0.0030916,"profit_total":-3.0915999999999998e-06,"profit_total_pct":-0.0,"duration_avg":"9:50:00","wins":0,"draws":6,"losses":2},"results_per_pair":[{"key":"ETC/BTC","trades":5,"profit_mean":0.012572794000000002,"profit_mean_pct":1.2572794000000003,"profit_sum":0.06286397,"profit_sum_pct":6.29,"profit_total_abs":0.0031542000000000002,"profit_total":3.1542000000000002e-06,"profit_total_pct":0.0,"duration_avg":"7:49:00","wins":2,"draws":2,"losses":1},{"key":"ETH/BTC","trades":3,"profit_mean":0.00498256,"profit_mean_pct":0.498256,"profit_sum":0.01494768,"profit_sum_pct":1.49,"profit_total_abs":0.00075,"profit_total":7.5e-07,"profit_total_pct":0.0,"duration_avg":"2:02:00","wins":1,"draws":2,"losses":0},{"key":"ADA/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"XLM/BTC","trades":1,"profit_mean":-0.03080817,"profit_mean_pct":-3.080817,"profit_sum":-0.03080817,"profit_sum_pct":-3.08,"profit_total_abs":-0.0015458,"profit_total":-1.5457999999999999e-06,"profit_total_pct":-0.0,"duration_avg":"3:55:00","wins":0,"draws":0,"losses":1},{"key":"TRX/BTC","trades":5,"profit_mean":-0.009333732,"profit_mean_pct":-0.9333732000000001,"profit_sum":-0.04666866,"profit_sum_pct":-4.67,"profit_total_abs":-0.0023416,"profit_total":-2.3416e-06,"profit_total_pct":-0.0,"duration_avg":"1:45:00","wins":1,"draws":2,"losses":2},{"key":"LTC/BTC","trades":8,"profit_mean":-0.0077020425,"profit_mean_pct":-0.77020425,"profit_sum":-0.06161634,"profit_sum_pct":-6.16,"profit_total_abs":-0.0030916,"profit_total":-3.0915999999999998e-06,"profit_total_pct":-0.0,"duration_avg":"9:50:00","wins":0,"draws":6,"losses":2},{"key":"TOTAL","trades":22,"profit_mean":-0.0027855236363636365,"profit_mean_pct":-0.27855236363636365,"profit_sum":-0.06128152,"profit_sum_pct":-6.13,"profit_total_abs":-0.0030748,"profit_total":-3.0747999999999998e-06,"profit_total_pct":-0.0,"duration_avg":"6:12:00","wins":4,"draws":12,"losses":6}],"sell_reason_summary":[{"sell_reason":"roi","trades":16,"wins":4,"draws":12,"losses":0,"profit_mean":0.00772296875,"profit_mean_pct":0.77,"profit_sum":0.1235675,"profit_sum_pct":12.36,"profit_total_abs":0.006200000000000001,"profit_total":0.041189166666666666,"profit_total_pct":4.12},{"sell_reason":"stop_loss","trades":6,"wins":0,"draws":0,"losses":6,"profit_mean":-0.03080817,"profit_mean_pct":-3.08,"profit_sum":-0.18484902,"profit_sum_pct":-18.48,"profit_total_abs":-0.0092748,"profit_total":-0.06161634,"profit_total_pct":-6.16}],"left_open_trades":[{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"total_trades":22,"total_volume":1.1000000000000003,"avg_stake_amount":0.05000000000000002,"profit_mean":-0.0027855236363636365,"profit_median":0.0,"profit_total":-3.0747999999999998e-06,"profit_total_abs":-0.0030748,"backtest_start":"2018-01-10 07:25:00","backtest_start_ts":1515569100000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":1620793107,"backtest_run_end_ts":1620793108,"trades_per_day":1.16,"market_change":0,"pairlist":["ETH/BTC","LTC/BTC","ETC/BTC","XLM/BTC","TRX/BTC","ADA/BTC"],"stake_amount":0.05,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":1000,"dry_run_wallet":1000,"final_balance":999.9969252,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timerange":"","enable_protections":false,"strategy_name":"SampleStrategy","stoploss":-0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{"60":0.01,"30":0.02,"0":0.04},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":0.0,"ignore_roi_if_entry_signal":false,"backtest_best_day":0.07872446,"backtest_worst_day":-0.09242451,"backtest_best_day_abs":0.00395,"backtest_worst_day_abs":-0.0046374,"winning_days":4,"draw_days":10,"losing_days":4,"wins":4,"losses":6,"draws":12,"holding_avg":"6:12:00","winner_holding_avg":"1:29:00","loser_holding_avg":"4:42:00","max_drawdown":0.18484901999999998,"max_drawdown_abs":0.0092748,"drawdown_start":"2018-01-14 23:15:00","drawdown_start_ts":1515971700000.0,"drawdown_end":"2018-01-23 09:40:00","drawdown_end_ts":1516700400000.0,"max_drawdown_low":-0.0038247999999999997,"max_drawdown_high":0.00545,"csum_min":999.9961752,"csum_max":1000.00545},"results_explanation":" 22 trades. 4/12/6 Wins/Draws/Losses. Avg profit -0.28%. Median profit 0.00%. Total profit -0.00307480 BTC ( -0.00\u03A3%). Avg duration 6:12:00 min.","total_profit":-3.0747999999999998e-06,"current_epoch":3,"is_initial_point":true,"is_best":true} -{"loss":-4.9544427978437175,"params_dict":{"mfi-value":"23","fastd-value":"40","adx-value":"50","rsi-value":"27","mfi-enabled":false,"fastd-enabled":true,"adx-enabled":true,"rsi-enabled":true,"trigger":"bb_lower","sell-mfi-value":"87","sell-fastd-value":"60","sell-adx-value":"81","sell-rsi-value":"69","sell-mfi-enabled":true,"sell-fastd-enabled":true,"sell-adx-enabled":false,"sell-rsi-enabled":false,"sell-trigger":"sell-sar_reversal","roi_t1":"105","roi_t2":"43","roi_t3":"12","roi_p1":0.03,"roi_p2":0.036,"roi_p3":0.103,"stoploss":-0.081},"params_details":{"buy":{"mfi-value":"23","fastd-value":"40","adx-value":"50","rsi-value":"27","mfi-enabled":false,"fastd-enabled":true,"adx-enabled":true,"rsi-enabled":true,"trigger":"bb_lower"},"sell":{"sell-mfi-value":"87","sell-fastd-value":"60","sell-adx-value":"81","sell-rsi-value":"69","sell-mfi-enabled":true,"sell-fastd-enabled":true,"sell-adx-enabled":false,"sell-rsi-enabled":false,"sell-trigger":"sell-sar_reversal"},"roi":"{0: 0.16899999999999998, 12: 0.066, 55: 0.03, 160: 0}","stoploss":{"stoploss":-0.081}},"params_not_optimized":{"buy":{},"sell":{}},"results_metrics":{"trades":[{"pair":"XLM/BTC","stake_amount":0.05,"amount":1086.95652174,"open_date":"2018-01-13 13:30:00+00:00","close_date":"2018-01-13 16:30:00+00:00","open_rate":4.6e-05,"close_rate":4.632313095835424e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":180,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":4.2274e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":4.2274e-05,"stop_loss_ratio":-0.081,"min_rate":4.4980000000000006e-05,"max_rate":4.673e-05,"is_open":false,"open_timestamp":1515850200000.0,"close_timestamp":1515861000000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":851.35365231,"open_date":"2018-01-15 14:50:00+00:00","close_date":"2018-01-15 16:15:00+00:00","open_rate":5.873000000000001e-05,"close_rate":6.0910642247867544e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":85,"profit_ratio":0.02989537,"profit_abs":0.0015,"sell_reason":"roi","initial_stop_loss_abs":5.397287000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":5.397287000000001e-05,"stop_loss_ratio":-0.081,"min_rate":5.873000000000001e-05,"max_rate":6.120000000000001e-05,"is_open":false,"open_timestamp":1516027800000.0,"close_timestamp":1516032900000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":896.86098655,"open_date":"2018-01-16 00:35:00+00:00","close_date":"2018-01-16 03:15:00+00:00","open_rate":5.575000000000001e-05,"close_rate":5.6960000000000004e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":160,"profit_ratio":0.01457705,"profit_abs":0.0007314,"sell_reason":"roi","initial_stop_loss_abs":5.123425000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":5.123425000000001e-05,"stop_loss_ratio":-0.081,"min_rate":5.575000000000001e-05,"max_rate":5.730000000000001e-05,"is_open":false,"open_timestamp":1516062900000.0,"close_timestamp":1516072500000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":747.160789,"open_date":"2018-01-16 22:30:00+00:00","close_date":"2018-01-16 22:45:00+00:00","open_rate":6.692e-05,"close_rate":7.182231811339689e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":15,"profit_ratio":0.06576981,"profit_abs":0.0033,"sell_reason":"roi","initial_stop_loss_abs":6.149948000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":6.149948000000001e-05,"stop_loss_ratio":-0.081,"min_rate":6.692e-05,"max_rate":7.566e-05,"is_open":false,"open_timestamp":1516141800000.0,"close_timestamp":1516142700000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":720.5649229,"open_date":"2018-01-17 15:15:00+00:00","close_date":"2018-01-17 16:40:00+00:00","open_rate":6.939000000000001e-05,"close_rate":7.19664475664827e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":85,"profit_ratio":0.02989537,"profit_abs":0.0015,"sell_reason":"roi","initial_stop_loss_abs":6.376941000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":6.376941000000001e-05,"stop_loss_ratio":-0.081,"min_rate":6.758e-05,"max_rate":7.244e-05,"is_open":false,"open_timestamp":1516202100000.0,"close_timestamp":1516207200000.0},{"pair":"XLM/BTC","stake_amount":0.05,"amount":1144.42664225,"open_date":"2018-01-18 22:20:00+00:00","close_date":"2018-01-19 00:35:00+00:00","open_rate":4.3690000000000004e-05,"close_rate":4.531220772704466e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":135,"profit_ratio":0.02989537,"profit_abs":0.0015,"sell_reason":"roi","initial_stop_loss_abs":4.015111e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":4.015111e-05,"stop_loss_ratio":-0.081,"min_rate":4.3690000000000004e-05,"max_rate":4.779e-05,"is_open":false,"open_timestamp":1516314000000.0,"close_timestamp":1516322100000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":876.57784011,"open_date":"2018-01-18 22:25:00+00:00","close_date":"2018-01-19 01:05:00+00:00","open_rate":5.704e-05,"close_rate":5.792e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":160,"profit_ratio":0.00834457,"profit_abs":0.00041869,"sell_reason":"roi","initial_stop_loss_abs":5.2419760000000006e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":5.2419760000000006e-05,"stop_loss_ratio":-0.081,"min_rate":5.704e-05,"max_rate":5.8670000000000006e-05,"is_open":false,"open_timestamp":1516314300000.0,"close_timestamp":1516323900000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":525.59655209,"open_date":"2018-01-20 05:05:00+00:00","close_date":"2018-01-20 06:25:00+00:00","open_rate":9.513e-05,"close_rate":9.86621726041144e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":80,"profit_ratio":0.02989537,"profit_abs":0.0015,"sell_reason":"roi","initial_stop_loss_abs":8.742447000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":8.742447000000001e-05,"stop_loss_ratio":-0.081,"min_rate":9.513e-05,"max_rate":9.95e-05,"is_open":false,"open_timestamp":1516424700000.0,"close_timestamp":1516429500000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":920.64076597,"open_date":"2018-01-26 07:40:00+00:00","close_date":"2018-01-26 10:20:00+00:00","open_rate":5.431000000000001e-05,"close_rate":5.474000000000001e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":160,"profit_ratio":0.0008867,"profit_abs":4.449e-05,"sell_reason":"roi","initial_stop_loss_abs":4.991089000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":4.991089000000001e-05,"stop_loss_ratio":-0.081,"min_rate":5.3670000000000006e-05,"max_rate":5.5e-05,"is_open":false,"open_timestamp":1516952400000.0,"close_timestamp":1516962000000.0},{"pair":"XLM/BTC","stake_amount":0.05,"amount":944.28706327,"open_date":"2018-01-28 04:35:00+00:00","close_date":"2018-01-30 04:45:00+00:00","open_rate":5.2950000000000006e-05,"close_rate":4.995000000000001e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":2890,"profit_ratio":-0.06323759,"profit_abs":-0.00317295,"sell_reason":"force_sell","initial_stop_loss_abs":4.866105000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":4.866105000000001e-05,"stop_loss_ratio":-0.081,"min_rate":4.980000000000001e-05,"max_rate":5.3280000000000005e-05,"is_open":true,"open_timestamp":1517114100000.0,"close_timestamp":1517287500000.0}],"locks":[],"best_pair":{"key":"TRX/BTC","trades":3,"profit_mean":0.04185351666666667,"profit_mean_pct":4.185351666666667,"profit_sum":0.12556055,"profit_sum_pct":12.56,"profit_total_abs":0.0063,"profit_total":6.3e-06,"profit_total_pct":0.0,"duration_avg":"1:00:00","wins":3,"draws":0,"losses":0},"worst_pair":{"key":"XLM/BTC","trades":3,"profit_mean":-0.01111407333333333,"profit_mean_pct":-1.111407333333333,"profit_sum":-0.03334221999999999,"profit_sum_pct":-3.33,"profit_total_abs":-0.0016729499999999999,"profit_total":-1.6729499999999998e-06,"profit_total_pct":-0.0,"duration_avg":"17:48:00","wins":1,"draws":1,"losses":1},"results_per_pair":[{"key":"TRX/BTC","trades":3,"profit_mean":0.04185351666666667,"profit_mean_pct":4.185351666666667,"profit_sum":0.12556055,"profit_sum_pct":12.56,"profit_total_abs":0.0063,"profit_total":6.3e-06,"profit_total_pct":0.0,"duration_avg":"1:00:00","wins":3,"draws":0,"losses":0},{"key":"ADA/BTC","trades":4,"profit_mean":0.0134259225,"profit_mean_pct":1.34259225,"profit_sum":0.05370369,"profit_sum_pct":5.37,"profit_total_abs":0.00269458,"profit_total":2.69458e-06,"profit_total_pct":0.0,"duration_avg":"2:21:00","wins":4,"draws":0,"losses":0},{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"LTC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"ETC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"XLM/BTC","trades":3,"profit_mean":-0.01111407333333333,"profit_mean_pct":-1.111407333333333,"profit_sum":-0.03334221999999999,"profit_sum_pct":-3.33,"profit_total_abs":-0.0016729499999999999,"profit_total":-1.6729499999999998e-06,"profit_total_pct":-0.0,"duration_avg":"17:48:00","wins":1,"draws":1,"losses":1},{"key":"TOTAL","trades":10,"profit_mean":0.014592201999999999,"profit_mean_pct":1.4592201999999999,"profit_sum":0.14592201999999999,"profit_sum_pct":14.59,"profit_total_abs":0.00732163,"profit_total":7.32163e-06,"profit_total_pct":0.0,"duration_avg":"6:35:00","wins":8,"draws":1,"losses":1}],"sell_reason_summary":[{"sell_reason":"roi","trades":9,"wins":8,"draws":1,"losses":0,"profit_mean":0.023239956666666665,"profit_mean_pct":2.32,"profit_sum":0.20915961,"profit_sum_pct":20.92,"profit_total_abs":0.01049458,"profit_total":0.06971987,"profit_total_pct":6.97},{"sell_reason":"force_sell","trades":1,"wins":0,"draws":0,"losses":1,"profit_mean":-0.06323759,"profit_mean_pct":-6.32,"profit_sum":-0.06323759,"profit_sum_pct":-6.32,"profit_total_abs":-0.00317295,"profit_total":-0.021079196666666664,"profit_total_pct":-2.11}],"left_open_trades":[{"key":"XLM/BTC","trades":1,"profit_mean":-0.06323759,"profit_mean_pct":-6.323759,"profit_sum":-0.06323759,"profit_sum_pct":-6.32,"profit_total_abs":-0.00317295,"profit_total":-3.17295e-06,"profit_total_pct":-0.0,"duration_avg":"2 days, 0:10:00","wins":0,"draws":0,"losses":1},{"key":"TOTAL","trades":1,"profit_mean":-0.06323759,"profit_mean_pct":-6.323759,"profit_sum":-0.06323759,"profit_sum_pct":-6.32,"profit_total_abs":-0.00317295,"profit_total":-3.17295e-06,"profit_total_pct":-0.0,"duration_avg":"2 days, 0:10:00","wins":0,"draws":0,"losses":1}],"total_trades":10,"total_volume":0.5,"avg_stake_amount":0.05,"profit_mean":0.014592201999999999,"profit_median":0.02223621,"profit_total":7.32163e-06,"profit_total_abs":0.00732163,"backtest_start":"2018-01-10 07:25:00","backtest_start_ts":1515569100000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":1620793107,"backtest_run_end_ts":1620793108,"trades_per_day":0.53,"market_change":0,"pairlist":["ETH/BTC","LTC/BTC","ETC/BTC","XLM/BTC","TRX/BTC","ADA/BTC"],"stake_amount":0.05,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":1000,"dry_run_wallet":1000,"final_balance":1000.00732163,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timerange":"","enable_protections":false,"strategy_name":"SampleStrategy","stoploss":-0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{"60":0.01,"30":0.02,"0":0.04},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":0.0,"ignore_roi_if_entry_signal":false,"backtest_best_day":0.08034685999999999,"backtest_worst_day":-0.06323759,"backtest_best_day_abs":0.0040314,"backtest_worst_day_abs":-0.00317295,"winning_days":6,"draw_days":11,"losing_days":1,"wins":8,"losses":1,"draws":1,"holding_avg":"6:35:00","winner_holding_avg":"1:50:00","loser_holding_avg":"2 days, 0:10:00","max_drawdown":0.06323759000000001,"max_drawdown_abs":0.00317295,"drawdown_start":"2018-01-26 10:20:00","drawdown_start_ts":1516962000000.0,"drawdown_end":"2018-01-30 04:45:00","drawdown_end_ts":1517287500000.0,"max_drawdown_low":0.007321629999999998,"max_drawdown_high":0.010494579999999998,"csum_min":1000.0,"csum_max":1000.01049458},"results_explanation":" 10 trades. 8/1/1 Wins/Draws/Losses. Avg profit 1.46%. Median profit 2.22%. Total profit 0.00732163 BTC ( 0.00\u03A3%). Avg duration 6:35:00 min.","total_profit":7.32163e-06,"current_epoch":4,"is_initial_point":true,"is_best":true} -{"loss":0.16709185414267655,"params_dict":{"mfi-value":"10","fastd-value":"45","adx-value":"28","rsi-value":"37","mfi-enabled":false,"fastd-enabled":false,"adx-enabled":true,"rsi-enabled":true,"trigger":"macd_cross_signal","sell-mfi-value":"85","sell-fastd-value":"56","sell-adx-value":"98","sell-rsi-value":"89","sell-mfi-enabled":true,"sell-fastd-enabled":false,"sell-adx-enabled":true,"sell-rsi-enabled":false,"sell-trigger":"sell-sar_reversal","roi_t1":"85","roi_t2":"11","roi_t3":"24","roi_p1":0.04,"roi_p2":0.043,"roi_p3":0.053,"stoploss":-0.057},"params_details":{"buy":{"mfi-value":"10","fastd-value":"45","adx-value":"28","rsi-value":"37","mfi-enabled":false,"fastd-enabled":false,"adx-enabled":true,"rsi-enabled":true,"trigger":"macd_cross_signal"},"sell":{"sell-mfi-value":"85","sell-fastd-value":"56","sell-adx-value":"98","sell-rsi-value":"89","sell-mfi-enabled":true,"sell-fastd-enabled":false,"sell-adx-enabled":true,"sell-rsi-enabled":false,"sell-trigger":"sell-sar_reversal"},"roi":"{0: 0.13599999999999998, 24: 0.08299999999999999, 35: 0.04, 120: 0}","stoploss":{"stoploss":-0.057}},"params_not_optimized":{"buy":{},"sell":{}},"results_metrics":{"trades":[{"pair":"ETH/BTC","stake_amount":0.05,"amount":0.56173464,"open_date":"2018-01-10 19:15:00+00:00","close_date":"2018-01-10 21:15:00+00:00","open_rate":0.08901,"close_rate":0.09112999000000001,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":120,"profit_ratio":0.01667571,"profit_abs":0.0008367,"sell_reason":"roi","initial_stop_loss_abs":0.08393643,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":0.08393643,"stop_loss_ratio":-0.057,"min_rate":0.08894498,"max_rate":0.09116998,"is_open":false,"open_timestamp":1515611700000.0,"close_timestamp":1515618900000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":794.65988557,"open_date":"2018-01-13 11:30:00+00:00","close_date":"2018-01-13 15:10:00+00:00","open_rate":6.292e-05,"close_rate":5.9333559999999994e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":220,"profit_ratio":-0.06357798,"profit_abs":-0.00319003,"sell_reason":"stop_loss","initial_stop_loss_abs":5.9333559999999994e-05,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":5.9333559999999994e-05,"stop_loss_ratio":-0.057,"min_rate":5.9900000000000006e-05,"max_rate":6.353e-05,"is_open":false,"open_timestamp":1515843000000.0,"close_timestamp":1515856200000.0},{"pair":"XLM/BTC","stake_amount":0.05,"amount":1086.95652174,"open_date":"2018-01-13 14:35:00+00:00","close_date":"2018-01-13 21:40:00+00:00","open_rate":4.6e-05,"close_rate":4.632313095835424e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":425,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":4.3378e-05,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":4.3378e-05,"stop_loss_ratio":-0.057,"min_rate":4.4980000000000006e-05,"max_rate":4.6540000000000005e-05,"is_open":false,"open_timestamp":1515854100000.0,"close_timestamp":1515879600000.0},{"pair":"ETH/BTC","stake_amount":0.05,"amount":0.53757603,"open_date":"2018-01-15 13:15:00+00:00","close_date":"2018-01-15 15:15:00+00:00","open_rate":0.0930101,"close_rate":0.09366345745107878,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":120,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.0877085243,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":0.0877085243,"stop_loss_ratio":-0.057,"min_rate":0.09188489999999999,"max_rate":0.09380000000000001,"is_open":false,"open_timestamp":1516022100000.0,"close_timestamp":1516029300000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":17.07469496,"open_date":"2018-01-15 14:35:00+00:00","close_date":"2018-01-15 16:35:00+00:00","open_rate":0.00292831,"close_rate":0.00297503,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":120,"profit_ratio":0.00886772,"profit_abs":0.00044494,"sell_reason":"roi","initial_stop_loss_abs":0.0027613963299999997,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":0.0027613963299999997,"stop_loss_ratio":-0.057,"min_rate":0.00292831,"max_rate":0.00301259,"is_open":false,"open_timestamp":1516026900000.0,"close_timestamp":1516034100000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":702.44450688,"open_date":"2018-01-17 04:25:00+00:00","close_date":"2018-01-17 05:00:00+00:00","open_rate":7.118e-05,"close_rate":7.453721023582538e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":35,"profit_ratio":0.03986049,"profit_abs":0.002,"sell_reason":"roi","initial_stop_loss_abs":6.712274e-05,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":6.712274e-05,"stop_loss_ratio":-0.057,"min_rate":7.118e-05,"max_rate":7.658000000000002e-05,"is_open":false,"open_timestamp":1516163100000.0,"close_timestamp":1516165200000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":18.86756854,"open_date":"2018-01-20 06:05:00+00:00","close_date":"2018-01-20 08:05:00+00:00","open_rate":0.00265005,"close_rate":0.00266995,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":120,"profit_ratio":0.00048133,"profit_abs":2.415e-05,"sell_reason":"roi","initial_stop_loss_abs":0.00249899715,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":0.00249899715,"stop_loss_ratio":-0.057,"min_rate":0.00265005,"max_rate":0.00271,"is_open":false,"open_timestamp":1516428300000.0,"close_timestamp":1516435500000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":966.18357488,"open_date":"2018-01-22 03:25:00+00:00","close_date":"2018-01-22 07:05:00+00:00","open_rate":5.1750000000000004e-05,"close_rate":5.211352232814853e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":220,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":4.8800250000000004e-05,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":4.8800250000000004e-05,"stop_loss_ratio":-0.057,"min_rate":5.1750000000000004e-05,"max_rate":5.2170000000000004e-05,"is_open":false,"open_timestamp":1516591500000.0,"close_timestamp":1516604700000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":18.95303438,"open_date":"2018-01-23 13:10:00+00:00","close_date":"2018-01-23 16:00:00+00:00","open_rate":0.0026381,"close_rate":0.002656631560461616,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":170,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.0024877283,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":0.0024877283,"stop_loss_ratio":-0.057,"min_rate":0.0026100000000000003,"max_rate":0.00266,"is_open":false,"open_timestamp":1516713000000.0,"close_timestamp":1516723200000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":912.40875912,"open_date":"2018-01-26 06:30:00+00:00","close_date":"2018-01-26 10:45:00+00:00","open_rate":5.480000000000001e-05,"close_rate":5.518494731560462e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":255,"profit_ratio":-0.0,"profit_abs":-0.0,"sell_reason":"roi","initial_stop_loss_abs":5.1676400000000006e-05,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":5.1676400000000006e-05,"stop_loss_ratio":-0.057,"min_rate":5.3670000000000006e-05,"max_rate":5.523e-05,"is_open":false,"open_timestamp":1516948200000.0,"close_timestamp":1516963500000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":909.58704748,"open_date":"2018-01-27 02:10:00+00:00","close_date":"2018-01-27 05:40:00+00:00","open_rate":5.4970000000000004e-05,"close_rate":5.535614149523332e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":210,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":5.183671e-05,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":5.183671e-05,"stop_loss_ratio":-0.057,"min_rate":5.472000000000001e-05,"max_rate":5.556e-05,"is_open":false,"open_timestamp":1517019000000.0,"close_timestamp":1517031600000.0}],"locks":[],"best_pair":{"key":"TRX/BTC","trades":1,"profit_mean":0.03986049,"profit_mean_pct":3.986049,"profit_sum":0.03986049,"profit_sum_pct":3.99,"profit_total_abs":0.002,"profit_total":2e-06,"profit_total_pct":0.0,"duration_avg":"0:35:00","wins":1,"draws":0,"losses":0},"worst_pair":{"key":"ADA/BTC","trades":4,"profit_mean":-0.015894495,"profit_mean_pct":-1.5894495000000002,"profit_sum":-0.06357798,"profit_sum_pct":-6.36,"profit_total_abs":-0.00319003,"profit_total":-3.19003e-06,"profit_total_pct":-0.0,"duration_avg":"3:46:00","wins":0,"draws":3,"losses":1},"results_per_pair":[{"key":"TRX/BTC","trades":1,"profit_mean":0.03986049,"profit_mean_pct":3.986049,"profit_sum":0.03986049,"profit_sum_pct":3.99,"profit_total_abs":0.002,"profit_total":2e-06,"profit_total_pct":0.0,"duration_avg":"0:35:00","wins":1,"draws":0,"losses":0},{"key":"ETH/BTC","trades":2,"profit_mean":0.008337855,"profit_mean_pct":0.8337855,"profit_sum":0.01667571,"profit_sum_pct":1.67,"profit_total_abs":0.0008367,"profit_total":8.367e-07,"profit_total_pct":0.0,"duration_avg":"2:00:00","wins":1,"draws":1,"losses":0},{"key":"ETC/BTC","trades":3,"profit_mean":0.0031163500000000004,"profit_mean_pct":0.31163500000000005,"profit_sum":0.009349050000000001,"profit_sum_pct":0.93,"profit_total_abs":0.00046909,"profit_total":4.6909000000000003e-07,"profit_total_pct":0.0,"duration_avg":"2:17:00","wins":2,"draws":1,"losses":0},{"key":"LTC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"XLM/BTC","trades":1,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"7:05:00","wins":0,"draws":1,"losses":0},{"key":"ADA/BTC","trades":4,"profit_mean":-0.015894495,"profit_mean_pct":-1.5894495000000002,"profit_sum":-0.06357798,"profit_sum_pct":-6.36,"profit_total_abs":-0.00319003,"profit_total":-3.19003e-06,"profit_total_pct":-0.0,"duration_avg":"3:46:00","wins":0,"draws":3,"losses":1},{"key":"TOTAL","trades":11,"profit_mean":0.00020975181818181756,"profit_mean_pct":0.020975181818181757,"profit_sum":0.002307269999999993,"profit_sum_pct":0.23,"profit_total_abs":0.00011576000000000034,"profit_total":1.1576000000000034e-07,"profit_total_pct":0.0,"duration_avg":"3:03:00","wins":4,"draws":6,"losses":1}],"sell_reason_summary":[{"sell_reason":"roi","trades":10,"wins":4,"draws":6,"losses":0,"profit_mean":0.0065885250000000005,"profit_mean_pct":0.66,"profit_sum":0.06588525,"profit_sum_pct":6.59,"profit_total_abs":0.0033057900000000003,"profit_total":0.021961750000000002,"profit_total_pct":2.2},{"sell_reason":"stop_loss","trades":1,"wins":0,"draws":0,"losses":1,"profit_mean":-0.06357798,"profit_mean_pct":-6.36,"profit_sum":-0.06357798,"profit_sum_pct":-6.36,"profit_total_abs":-0.00319003,"profit_total":-0.021192660000000002,"profit_total_pct":-2.12}],"left_open_trades":[{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"total_trades":11,"total_volume":0.55,"avg_stake_amount":0.05,"profit_mean":0.00020975181818181756,"profit_median":0.0,"profit_total":1.1576000000000034e-07,"profit_total_abs":0.00011576000000000034,"backtest_start":"2018-01-10 07:25:00","backtest_start_ts":1515569100000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":1620793107,"backtest_run_end_ts":1620793108,"trades_per_day":0.58,"market_change":0,"pairlist":["ETH/BTC","LTC/BTC","ETC/BTC","XLM/BTC","TRX/BTC","ADA/BTC"],"stake_amount":0.05,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":1000,"dry_run_wallet":1000,"final_balance":1000.00011576,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timerange":"","enable_protections":false,"strategy_name":"SampleStrategy","stoploss":-0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{"60":0.01,"30":0.02,"0":0.04},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":0.0,"ignore_roi_if_entry_signal":false,"backtest_best_day":0.03986049,"backtest_worst_day":-0.06357798,"backtest_best_day_abs":0.002,"backtest_worst_day_abs":-0.00319003,"winning_days":4,"draw_days":13,"losing_days":1,"wins":4,"losses":1,"draws":6,"holding_avg":"3:03:00","winner_holding_avg":"1:39:00","loser_holding_avg":"3:40:00","max_drawdown":0.06357798,"max_drawdown_abs":0.00319003,"drawdown_start":"2018-01-10 21:15:00","drawdown_start_ts":1515618900000.0,"drawdown_end":"2018-01-13 15:10:00","drawdown_end_ts":1515856200000.0,"max_drawdown_low":-0.00235333,"max_drawdown_high":0.0008367,"csum_min":999.99764667,"csum_max":1000.0008367},"results_explanation":" 11 trades. 4/6/1 Wins/Draws/Losses. Avg profit 0.02%. Median profit 0.00%. Total profit 0.00011576 BTC ( 0.00\u03A3%). Avg duration 3:03:00 min.","total_profit":1.1576000000000034e-07,"current_epoch":5,"is_initial_point":true,"is_best":false} +{"loss":100000,"params_dict":{"mfi-value":"20","fastd-value":"21","adx-value":"26","rsi-value":"23","mfi-enabled":true,"fastd-enabled":false,"adx-enabled":false,"rsi-enabled":true,"trigger":"sar_reversal","sell-mfi-value":"97","sell-fastd-value":"85","sell-adx-value":"55","sell-rsi-value":"76","sell-mfi-enabled":true,"sell-fastd-enabled":false,"sell-adx-enabled":true,"sell-rsi-enabled":true,"sell-trigger":"sell-bb_upper","roi_t1":"34","roi_t2":"28","roi_t3":"32","roi_p1":0.031,"roi_p2":0.033,"roi_p3":0.146,"stoploss":-0.05},"params_details":{"buy":{"mfi-value":"20","fastd-value":"21","adx-value":"26","rsi-value":"23","mfi-enabled":true,"fastd-enabled":false,"adx-enabled":false,"rsi-enabled":true,"trigger":"sar_reversal"},"sell":{"sell-mfi-value":"97","sell-fastd-value":"85","sell-adx-value":"55","sell-rsi-value":"76","sell-mfi-enabled":true,"sell-fastd-enabled":false,"sell-adx-enabled":true,"sell-rsi-enabled":true,"sell-trigger":"sell-bb_upper"},"roi":"{0: 0.21, 32: 0.064, 60: 0.031, 94: 0}","stoploss":{"stoploss":-0.05}},"params_not_optimized":{"buy":{},"sell":{}},"results_metrics":{"trades":[],"locks":[],"best_pair":{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},"worst_pair":{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},"results_per_pair":[{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"LTC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"ETC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"XLM/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"TRX/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"ADA/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"sell_reason_summary":[],"left_open_trades":[{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"total_trades":0,"trade_count_long":0,"trade_count_short":0,"total_volume":0.0,"avg_stake_amount":0,"profit_mean":0,"profit_median":0,"profit_total":0.0,"profit_total_abs":0,"backtest_start":"2018-01-10 07:25:00","backtest_start_ts":1515569100000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":1620793107,"backtest_run_end_ts":1620793107,"trades_per_day":0.0,"market_change":0,"pairlist":["ETH/BTC","LTC/BTC","ETC/BTC","XLM/BTC","TRX/BTC","ADA/BTC"],"stake_amount":0.05,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":1000,"dry_run_wallet":1000,"final_balance":1000,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timerange":"","enable_protections":false,"strategy_name":"SampleStrategy","stoploss":-0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{"60":0.01,"30":0.02,"0":0.04},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":0.0,"ignore_roi_if_entry_signal":false,"backtest_best_day":0,"backtest_worst_day":0,"backtest_best_day_abs":0,"backtest_worst_day_abs":0,"winning_days":0,"draw_days":0,"losing_days":0,"wins":0,"losses":0,"draws":0,"holding_avg":"0:00:00","winner_holding_avg":"0:00:00","loser_holding_avg":"0:00:00","max_drawdown":0.0,"max_drawdown_abs":0.0,"max_drawdown_low":0.0,"max_drawdown_high":0.0,"drawdown_start":"1970-01-01 00:00:00+00:00","drawdown_start_ts":0,"drawdown_end":"1970-01-01 00:00:00+00:00","drawdown_end_ts":0,"csum_min":0,"csum_max":0},"results_explanation":" 0 trades. 0/0/0 Wins/Draws/Losses. Avg profit 0.00%. Median profit 0.00%. Total profit 0.00000000 BTC ( 0.00\u03A3%). Avg duration 0:00:00 min.","total_profit":0.0,"current_epoch":1,"is_initial_point":true,"is_best":false} +{"loss":100000,"params_dict":{"mfi-value":"14","fastd-value":"43","adx-value":"30","rsi-value":"24","mfi-enabled":true,"fastd-enabled":true,"adx-enabled":false,"rsi-enabled":true,"trigger":"sar_reversal","sell-mfi-value":"97","sell-fastd-value":"71","sell-adx-value":"82","sell-rsi-value":"99","sell-mfi-enabled":false,"sell-fastd-enabled":false,"sell-adx-enabled":false,"sell-rsi-enabled":true,"sell-trigger":"sell-bb_upper","roi_t1":"84","roi_t2":"35","roi_t3":"19","roi_p1":0.024,"roi_p2":0.022,"roi_p3":0.061,"stoploss":-0.083},"params_details":{"buy":{"mfi-value":"14","fastd-value":"43","adx-value":"30","rsi-value":"24","mfi-enabled":true,"fastd-enabled":true,"adx-enabled":false,"rsi-enabled":true,"trigger":"sar_reversal"},"sell":{"sell-mfi-value":"97","sell-fastd-value":"71","sell-adx-value":"82","sell-rsi-value":"99","sell-mfi-enabled":false,"sell-fastd-enabled":false,"sell-adx-enabled":false,"sell-rsi-enabled":true,"sell-trigger":"sell-bb_upper"},"roi":"{0: 0.107, 19: 0.046, 54: 0.024, 138: 0}","stoploss":{"stoploss":-0.083}},"params_not_optimized":{"buy":{},"sell":{}},"results_metrics":{"trades":[],"locks":[],"best_pair":{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},"worst_pair":{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},"results_per_pair":[{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"LTC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"ETC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"XLM/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"TRX/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"ADA/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"sell_reason_summary":[],"left_open_trades":[{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0,"profit_sum_pct":0.0,"profit_total_abs":0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"total_trades":0,"trade_count_long":0,"trade_count_short":0,"total_volume":0.0,"avg_stake_amount":0,"profit_mean":0,"profit_median":0,"profit_total":0.0,"profit_total_abs":0,"backtest_start":"2018-01-10 07:25:00","backtest_start_ts":1515569100000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":1620793107,"backtest_run_end_ts":1620793108,"trades_per_day":0.0,"market_change":0,"pairlist":["ETH/BTC","LTC/BTC","ETC/BTC","XLM/BTC","TRX/BTC","ADA/BTC"],"stake_amount":0.05,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":1000,"dry_run_wallet":1000,"final_balance":1000,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timerange":"","enable_protections":false,"strategy_name":"SampleStrategy","stoploss":-0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{"60":0.01,"30":0.02,"0":0.04},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":0.0,"ignore_roi_if_entry_signal":false,"backtest_best_day":0,"backtest_worst_day":0,"backtest_best_day_abs":0,"backtest_worst_day_abs":0,"winning_days":0,"draw_days":0,"losing_days":0,"wins":0,"losses":0,"draws":0,"holding_avg":"0:00:00","winner_holding_avg":"0:00:00","loser_holding_avg":"0:00:00","max_drawdown":0.0,"max_drawdown_abs":0.0,"max_drawdown_low":0.0,"max_drawdown_high":0.0,"drawdown_start":"1970-01-01 00:00:00+00:00","drawdown_start_ts":0,"drawdown_end":"1970-01-01 00:00:00+00:00","drawdown_end_ts":0,"csum_min":0,"csum_max":0},"results_explanation":" 0 trades. 0/0/0 Wins/Draws/Losses. Avg profit 0.00%. Median profit 0.00%. Total profit 0.00000000 BTC ( 0.00\u03A3%). Avg duration 0:00:00 min.","total_profit":0.0,"current_epoch":2,"is_initial_point":true,"is_best":false} +{"loss":2.183447401951895,"params_dict":{"mfi-value":"14","fastd-value":"15","adx-value":"40","rsi-value":"36","mfi-enabled":false,"fastd-enabled":true,"adx-enabled":false,"rsi-enabled":false,"trigger":"sar_reversal","sell-mfi-value":"92","sell-fastd-value":"84","sell-adx-value":"61","sell-rsi-value":"61","sell-mfi-enabled":true,"sell-fastd-enabled":true,"sell-adx-enabled":true,"sell-rsi-enabled":true,"sell-trigger":"sell-bb_upper","roi_t1":"68","roi_t2":"41","roi_t3":"21","roi_p1":0.015,"roi_p2":0.064,"roi_p3":0.126,"stoploss":-0.024},"params_details":{"buy":{"mfi-value":"14","fastd-value":"15","adx-value":"40","rsi-value":"36","mfi-enabled":false,"fastd-enabled":true,"adx-enabled":false,"rsi-enabled":false,"trigger":"sar_reversal"},"sell":{"sell-mfi-value":"92","sell-fastd-value":"84","sell-adx-value":"61","sell-rsi-value":"61","sell-mfi-enabled":true,"sell-fastd-enabled":true,"sell-adx-enabled":true,"sell-rsi-enabled":true,"sell-trigger":"sell-bb_upper"},"roi":"{0: 0.20500000000000002, 21: 0.079, 62: 0.015, 130: 0}","stoploss":{"stoploss":-0.024}},"params_not_optimized":{"buy":{},"sell":{}},"results_metrics":{"trades":[{"pair":"LTC/BTC","stake_amount":0.05,"amount":2.94115571,"open_date":"2018-01-11 11:40:00+00:00","close_date":"2018-01-11 19:40:00+00:00","open_rate":0.01700012,"close_rate":0.017119538805820372,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":480,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.01659211712,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.01659211712,"stop_loss_ratio":-0.024,"min_rate":0.01689809,"max_rate":0.0171462,"is_open":false,"open_timestamp":1515670800000.0,"close_timestamp":1515699600000.0},{"pair":"ETH/BTC","stake_amount":0.05,"amount":0.57407318,"open_date":"2018-01-12 11:05:00+00:00","close_date":"2018-01-12 12:30:00+00:00","open_rate":0.08709691,"close_rate":0.08901977203712995,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":85,"profit_ratio":0.01494768,"profit_abs":0.00075,"sell_reason":"roi","initial_stop_loss_abs":0.08500658416,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.08500658416,"stop_loss_ratio":-0.024,"min_rate":0.08702974000000001,"max_rate":0.08929248000000001,"is_open":false,"open_timestamp":1515755100000.0,"close_timestamp":1515760200000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":2.93166236,"open_date":"2018-01-12 03:30:00+00:00","close_date":"2018-01-12 13:05:00+00:00","open_rate":0.01705517,"close_rate":0.01717497550928249,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":575,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.016645845920000003,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.016645845920000003,"stop_loss_ratio":-0.024,"min_rate":0.0169841,"max_rate":0.01719135,"is_open":false,"open_timestamp":1515727800000.0,"close_timestamp":1515762300000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":2.96876855,"open_date":"2018-01-13 03:50:00+00:00","close_date":"2018-01-13 06:05:00+00:00","open_rate":0.016842,"close_rate":0.016960308078273957,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":135,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.016437792,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.016437792,"stop_loss_ratio":-0.024,"min_rate":0.016836999999999998,"max_rate":0.017,"is_open":false,"open_timestamp":1515815400000.0,"close_timestamp":1515823500000.0},{"pair":"ETH/BTC","stake_amount":0.05,"amount":0.53163205,"open_date":"2018-01-13 13:25:00+00:00","close_date":"2018-01-13 15:35:00+00:00","open_rate":0.09405001,"close_rate":0.09471067238835926,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":130,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.09179280976,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.09179280976,"stop_loss_ratio":-0.024,"min_rate":0.09369894000000001,"max_rate":0.09479997,"is_open":false,"open_timestamp":1515849900000.0,"close_timestamp":1515857700000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":19.23816853,"open_date":"2018-01-13 15:30:00+00:00","close_date":"2018-01-13 16:20:00+00:00","open_rate":0.0025989999999999997,"close_rate":0.0028232990466633217,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":50,"profit_ratio":0.07872446,"profit_abs":0.00395,"sell_reason":"roi","initial_stop_loss_abs":0.002536624,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.002536624,"stop_loss_ratio":-0.024,"min_rate":0.00259525,"max_rate":0.0028288700000000003,"is_open":false,"open_timestamp":1515857400000.0,"close_timestamp":1515860400000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":492.80504632,"open_date":"2018-01-14 21:35:00+00:00","close_date":"2018-01-14 23:15:00+00:00","open_rate":0.00010146000000000001,"close_rate":0.00010369995985950828,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":100,"profit_ratio":0.01494768,"profit_abs":0.00075,"sell_reason":"roi","initial_stop_loss_abs":9.902496e-05,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":9.902496e-05,"stop_loss_ratio":-0.024,"min_rate":0.0001012,"max_rate":0.00010414,"is_open":false,"open_timestamp":1515965700000.0,"close_timestamp":1515971700000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":2.92398174,"open_date":"2018-01-15 12:45:00+00:00","close_date":"2018-01-15 21:05:00+00:00","open_rate":0.01709997,"close_rate":0.01722009021073758,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":500,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.016689570719999998,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.016689570719999998,"stop_loss_ratio":-0.024,"min_rate":0.01694,"max_rate":0.01725,"is_open":false,"open_timestamp":1516020300000.0,"close_timestamp":1516050300000.0},{"pair":"XLM/BTC","stake_amount":0.05,"amount":1111.60515785,"open_date":"2018-01-15 19:50:00+00:00","close_date":"2018-01-15 23:45:00+00:00","open_rate":4.4980000000000006e-05,"close_rate":4.390048e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":235,"profit_ratio":-0.03080817,"profit_abs":-0.0015458,"sell_reason":"stop_loss","initial_stop_loss_abs":4.390048e-05,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":4.390048e-05,"stop_loss_ratio":-0.024,"min_rate":4.409e-05,"max_rate":4.502e-05,"is_open":false,"open_timestamp":1516045800000.0,"close_timestamp":1516059900000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":519.80455349,"open_date":"2018-01-21 03:55:00+00:00","close_date":"2018-01-21 04:05:00+00:00","open_rate":9.619e-05,"close_rate":9.388144e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":10,"profit_ratio":-0.03080817,"profit_abs":-0.0015458,"sell_reason":"stop_loss","initial_stop_loss_abs":9.388144e-05,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":9.388144e-05,"stop_loss_ratio":-0.024,"min_rate":9.568e-05,"max_rate":9.673e-05,"is_open":false,"open_timestamp":1516506900000.0,"close_timestamp":1516507500000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":3.029754,"open_date":"2018-01-20 22:15:00+00:00","close_date":"2018-01-21 07:45:00+00:00","open_rate":0.01650299,"close_rate":0.016106918239999997,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":570,"profit_ratio":-0.03080817,"profit_abs":-0.0015458,"sell_reason":"stop_loss","initial_stop_loss_abs":0.016106918239999997,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.016106918239999997,"stop_loss_ratio":-0.024,"min_rate":0.0162468,"max_rate":0.01663179,"is_open":false,"open_timestamp":1516486500000.0,"close_timestamp":1516520700000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":18.75461832,"open_date":"2018-01-21 13:00:00+00:00","close_date":"2018-01-21 16:25:00+00:00","open_rate":0.00266601,"close_rate":0.00260202576,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":205,"profit_ratio":-0.03080817,"profit_abs":-0.0015458,"sell_reason":"stop_loss","initial_stop_loss_abs":0.00260202576,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.00260202576,"stop_loss_ratio":-0.024,"min_rate":0.0026290800000000002,"max_rate":0.00269384,"is_open":false,"open_timestamp":1516539600000.0,"close_timestamp":1516551900000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":552.18111541,"open_date":"2018-01-22 02:10:00+00:00","close_date":"2018-01-22 04:20:00+00:00","open_rate":9.055e-05,"close_rate":9.118607626693427e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":130,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":8.83768e-05,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":8.83768e-05,"stop_loss_ratio":-0.024,"min_rate":9.013e-05,"max_rate":9.197e-05,"is_open":false,"open_timestamp":1516587000000.0,"close_timestamp":1516594800000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":2.99733237,"open_date":"2018-01-22 03:20:00+00:00","close_date":"2018-01-22 13:50:00+00:00","open_rate":0.0166815,"close_rate":0.016281143999999997,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":630,"profit_ratio":-0.03080817,"profit_abs":-0.0015458,"sell_reason":"stop_loss","initial_stop_loss_abs":0.016281143999999997,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.016281143999999997,"stop_loss_ratio":-0.024,"min_rate":0.01641443,"max_rate":0.016800000000000002,"is_open":false,"open_timestamp":1516591200000.0,"close_timestamp":1516629000000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":503.52467271,"open_date":"2018-01-23 08:55:00+00:00","close_date":"2018-01-23 09:40:00+00:00","open_rate":9.93e-05,"close_rate":9.69168e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":45,"profit_ratio":-0.03080817,"profit_abs":-0.0015458,"sell_reason":"stop_loss","initial_stop_loss_abs":9.69168e-05,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":9.69168e-05,"stop_loss_ratio":-0.024,"min_rate":9.754e-05,"max_rate":0.00010025,"is_open":false,"open_timestamp":1516697700000.0,"close_timestamp":1516700400000.0},{"pair":"ETH/BTC","stake_amount":0.05,"amount":0.55148073,"open_date":"2018-01-24 02:10:00+00:00","close_date":"2018-01-24 04:40:00+00:00","open_rate":0.090665,"close_rate":0.09130188409433015,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":150,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.08848903999999999,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.08848903999999999,"stop_loss_ratio":-0.024,"min_rate":0.090665,"max_rate":0.09146000000000001,"is_open":false,"open_timestamp":1516759800000.0,"close_timestamp":1516768800000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":19.10584639,"open_date":"2018-01-24 19:20:00+00:00","close_date":"2018-01-24 21:35:00+00:00","open_rate":0.002617,"close_rate":0.0026353833416959357,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":135,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.002554192,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.002554192,"stop_loss_ratio":-0.024,"min_rate":0.002617,"max_rate":0.00264999,"is_open":false,"open_timestamp":1516821600000.0,"close_timestamp":1516829700000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":19.34602691,"open_date":"2018-01-25 14:35:00+00:00","close_date":"2018-01-25 16:35:00+00:00","open_rate":0.00258451,"close_rate":0.002641568926241846,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":120,"profit_ratio":0.01494768,"profit_abs":0.00075,"sell_reason":"roi","initial_stop_loss_abs":0.00252248176,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.00252248176,"stop_loss_ratio":-0.024,"min_rate":0.00258451,"max_rate":0.00264579,"is_open":false,"open_timestamp":1516890900000.0,"close_timestamp":1516898100000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":3.11910295,"open_date":"2018-01-24 23:05:00+00:00","close_date":"2018-01-25 16:55:00+00:00","open_rate":0.01603025,"close_rate":0.016142855870546913,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":1070,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.015645523999999997,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.015645523999999997,"stop_loss_ratio":-0.024,"min_rate":0.015798760000000002,"max_rate":0.01617,"is_open":false,"open_timestamp":1516835100000.0,"close_timestamp":1516899300000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":553.70985604,"open_date":"2018-01-26 19:30:00+00:00","close_date":"2018-01-26 23:30:00+00:00","open_rate":9.03e-05,"close_rate":9.093432012042147e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":240,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":8.813279999999999e-05,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":8.813279999999999e-05,"stop_loss_ratio":-0.024,"min_rate":8.961e-05,"max_rate":9.1e-05,"is_open":false,"open_timestamp":1516995000000.0,"close_timestamp":1517009400000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":19.22929005,"open_date":"2018-01-26 21:15:00+00:00","close_date":"2018-01-28 03:50:00+00:00","open_rate":0.0026002,"close_rate":0.0026184653286502758,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":1835,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.0025377952,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.0025377952,"stop_loss_ratio":-0.024,"min_rate":0.00254702,"max_rate":0.00262797,"is_open":false,"open_timestamp":1517001300000.0,"close_timestamp":1517111400000.0},{"pair":"LTC/BTC","stake_amount":0.05,"amount":3.15677093,"open_date":"2018-01-27 22:05:00+00:00","close_date":"2018-01-28 10:45:00+00:00","open_rate":0.01583897,"close_rate":0.015950232207727046,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":760,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.01545883472,"initial_stop_loss_ratio":-0.024,"stop_loss_abs":0.01545883472,"stop_loss_ratio":-0.024,"min_rate":0.015700000000000002,"max_rate":0.01596521,"is_open":false,"open_timestamp":1517090700000.0,"close_timestamp":1517136300000.0}],"locks":[],"best_pair":{"key":"ETC/BTC","trades":5,"profit_mean":0.012572794000000002,"profit_mean_pct":1.2572794000000003,"profit_sum":0.06286397,"profit_sum_pct":6.29,"profit_total_abs":0.0031542000000000002,"profit_total":3.1542000000000002e-06,"profit_total_pct":0.0,"duration_avg":"7:49:00","wins":2,"draws":2,"losses":1},"worst_pair":{"key":"LTC/BTC","trades":8,"profit_mean":-0.0077020425,"profit_mean_pct":-0.77020425,"profit_sum":-0.06161634,"profit_sum_pct":-6.16,"profit_total_abs":-0.0030916,"profit_total":-3.0915999999999998e-06,"profit_total_pct":-0.0,"duration_avg":"9:50:00","wins":0,"draws":6,"losses":2},"results_per_pair":[{"key":"ETC/BTC","trades":5,"profit_mean":0.012572794000000002,"profit_mean_pct":1.2572794000000003,"profit_sum":0.06286397,"profit_sum_pct":6.29,"profit_total_abs":0.0031542000000000002,"profit_total":3.1542000000000002e-06,"profit_total_pct":0.0,"duration_avg":"7:49:00","wins":2,"draws":2,"losses":1},{"key":"ETH/BTC","trades":3,"profit_mean":0.00498256,"profit_mean_pct":0.498256,"profit_sum":0.01494768,"profit_sum_pct":1.49,"profit_total_abs":0.00075,"profit_total":7.5e-07,"profit_total_pct":0.0,"duration_avg":"2:02:00","wins":1,"draws":2,"losses":0},{"key":"ADA/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"XLM/BTC","trades":1,"profit_mean":-0.03080817,"profit_mean_pct":-3.080817,"profit_sum":-0.03080817,"profit_sum_pct":-3.08,"profit_total_abs":-0.0015458,"profit_total":-1.5457999999999999e-06,"profit_total_pct":-0.0,"duration_avg":"3:55:00","wins":0,"draws":0,"losses":1},{"key":"TRX/BTC","trades":5,"profit_mean":-0.009333732,"profit_mean_pct":-0.9333732000000001,"profit_sum":-0.04666866,"profit_sum_pct":-4.67,"profit_total_abs":-0.0023416,"profit_total":-2.3416e-06,"profit_total_pct":-0.0,"duration_avg":"1:45:00","wins":1,"draws":2,"losses":2},{"key":"LTC/BTC","trades":8,"profit_mean":-0.0077020425,"profit_mean_pct":-0.77020425,"profit_sum":-0.06161634,"profit_sum_pct":-6.16,"profit_total_abs":-0.0030916,"profit_total":-3.0915999999999998e-06,"profit_total_pct":-0.0,"duration_avg":"9:50:00","wins":0,"draws":6,"losses":2},{"key":"TOTAL","trades":22,"profit_mean":-0.0027855236363636365,"profit_mean_pct":-0.27855236363636365,"profit_sum":-0.06128152,"profit_sum_pct":-6.13,"profit_total_abs":-0.0030748,"profit_total":-3.0747999999999998e-06,"profit_total_pct":-0.0,"duration_avg":"6:12:00","wins":4,"draws":12,"losses":6}],"sell_reason_summary":[{"sell_reason":"roi","trades":16,"wins":4,"draws":12,"losses":0,"profit_mean":0.00772296875,"profit_mean_pct":0.77,"profit_sum":0.1235675,"profit_sum_pct":12.36,"profit_total_abs":0.006200000000000001,"profit_total":0.041189166666666666,"profit_total_pct":4.12},{"sell_reason":"stop_loss","trades":6,"wins":0,"draws":0,"losses":6,"profit_mean":-0.03080817,"profit_mean_pct":-3.08,"profit_sum":-0.18484902,"profit_sum_pct":-18.48,"profit_total_abs":-0.0092748,"profit_total":-0.06161634,"profit_total_pct":-6.16}],"left_open_trades":[{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"total_trades":22,"trade_count_long":22,"trade_count_short":0,"total_volume":1.1000000000000003,"avg_stake_amount":0.05000000000000002,"profit_mean":-0.0027855236363636365,"profit_median":0.0,"profit_total":-3.0747999999999998e-06,"profit_total_abs":-0.0030748,"backtest_start":"2018-01-10 07:25:00","backtest_start_ts":1515569100000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":1620793107,"backtest_run_end_ts":1620793108,"trades_per_day":1.16,"market_change":0,"pairlist":["ETH/BTC","LTC/BTC","ETC/BTC","XLM/BTC","TRX/BTC","ADA/BTC"],"stake_amount":0.05,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":1000,"dry_run_wallet":1000,"final_balance":999.9969252,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timerange":"","enable_protections":false,"strategy_name":"SampleStrategy","stoploss":-0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{"60":0.01,"30":0.02,"0":0.04},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":0.0,"ignore_roi_if_entry_signal":false,"backtest_best_day":0.07872446,"backtest_worst_day":-0.09242451,"backtest_best_day_abs":0.00395,"backtest_worst_day_abs":-0.0046374,"winning_days":4,"draw_days":10,"losing_days":4,"wins":4,"losses":6,"draws":12,"holding_avg":"6:12:00","winner_holding_avg":"1:29:00","loser_holding_avg":"4:42:00","max_drawdown":0.18484901999999998,"max_drawdown_abs":0.0092748,"drawdown_start":"2018-01-14 23:15:00","drawdown_start_ts":1515971700000.0,"drawdown_end":"2018-01-23 09:40:00","drawdown_end_ts":1516700400000.0,"max_drawdown_low":-0.0038247999999999997,"max_drawdown_high":0.00545,"csum_min":999.9961752,"csum_max":1000.00545},"results_explanation":" 22 trades. 4/12/6 Wins/Draws/Losses. Avg profit -0.28%. Median profit 0.00%. Total profit -0.00307480 BTC ( -0.00\u03A3%). Avg duration 6:12:00 min.","total_profit":-3.0747999999999998e-06,"current_epoch":3,"is_initial_point":true,"is_best":true} +{"loss":-4.9544427978437175,"params_dict":{"mfi-value":"23","fastd-value":"40","adx-value":"50","rsi-value":"27","mfi-enabled":false,"fastd-enabled":true,"adx-enabled":true,"rsi-enabled":true,"trigger":"bb_lower","sell-mfi-value":"87","sell-fastd-value":"60","sell-adx-value":"81","sell-rsi-value":"69","sell-mfi-enabled":true,"sell-fastd-enabled":true,"sell-adx-enabled":false,"sell-rsi-enabled":false,"sell-trigger":"sell-sar_reversal","roi_t1":"105","roi_t2":"43","roi_t3":"12","roi_p1":0.03,"roi_p2":0.036,"roi_p3":0.103,"stoploss":-0.081},"params_details":{"buy":{"mfi-value":"23","fastd-value":"40","adx-value":"50","rsi-value":"27","mfi-enabled":false,"fastd-enabled":true,"adx-enabled":true,"rsi-enabled":true,"trigger":"bb_lower"},"sell":{"sell-mfi-value":"87","sell-fastd-value":"60","sell-adx-value":"81","sell-rsi-value":"69","sell-mfi-enabled":true,"sell-fastd-enabled":true,"sell-adx-enabled":false,"sell-rsi-enabled":false,"sell-trigger":"sell-sar_reversal"},"roi":"{0: 0.16899999999999998, 12: 0.066, 55: 0.03, 160: 0}","stoploss":{"stoploss":-0.081}},"params_not_optimized":{"buy":{},"sell":{}},"results_metrics":{"trades":[{"pair":"XLM/BTC","stake_amount":0.05,"amount":1086.95652174,"open_date":"2018-01-13 13:30:00+00:00","close_date":"2018-01-13 16:30:00+00:00","open_rate":4.6e-05,"close_rate":4.632313095835424e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":180,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":4.2274e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":4.2274e-05,"stop_loss_ratio":-0.081,"min_rate":4.4980000000000006e-05,"max_rate":4.673e-05,"is_open":false,"open_timestamp":1515850200000.0,"close_timestamp":1515861000000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":851.35365231,"open_date":"2018-01-15 14:50:00+00:00","close_date":"2018-01-15 16:15:00+00:00","open_rate":5.873000000000001e-05,"close_rate":6.0910642247867544e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":85,"profit_ratio":0.02989537,"profit_abs":0.0015,"sell_reason":"roi","initial_stop_loss_abs":5.397287000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":5.397287000000001e-05,"stop_loss_ratio":-0.081,"min_rate":5.873000000000001e-05,"max_rate":6.120000000000001e-05,"is_open":false,"open_timestamp":1516027800000.0,"close_timestamp":1516032900000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":896.86098655,"open_date":"2018-01-16 00:35:00+00:00","close_date":"2018-01-16 03:15:00+00:00","open_rate":5.575000000000001e-05,"close_rate":5.6960000000000004e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":160,"profit_ratio":0.01457705,"profit_abs":0.0007314,"sell_reason":"roi","initial_stop_loss_abs":5.123425000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":5.123425000000001e-05,"stop_loss_ratio":-0.081,"min_rate":5.575000000000001e-05,"max_rate":5.730000000000001e-05,"is_open":false,"open_timestamp":1516062900000.0,"close_timestamp":1516072500000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":747.160789,"open_date":"2018-01-16 22:30:00+00:00","close_date":"2018-01-16 22:45:00+00:00","open_rate":6.692e-05,"close_rate":7.182231811339689e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":15,"profit_ratio":0.06576981,"profit_abs":0.0033,"sell_reason":"roi","initial_stop_loss_abs":6.149948000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":6.149948000000001e-05,"stop_loss_ratio":-0.081,"min_rate":6.692e-05,"max_rate":7.566e-05,"is_open":false,"open_timestamp":1516141800000.0,"close_timestamp":1516142700000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":720.5649229,"open_date":"2018-01-17 15:15:00+00:00","close_date":"2018-01-17 16:40:00+00:00","open_rate":6.939000000000001e-05,"close_rate":7.19664475664827e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":85,"profit_ratio":0.02989537,"profit_abs":0.0015,"sell_reason":"roi","initial_stop_loss_abs":6.376941000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":6.376941000000001e-05,"stop_loss_ratio":-0.081,"min_rate":6.758e-05,"max_rate":7.244e-05,"is_open":false,"open_timestamp":1516202100000.0,"close_timestamp":1516207200000.0},{"pair":"XLM/BTC","stake_amount":0.05,"amount":1144.42664225,"open_date":"2018-01-18 22:20:00+00:00","close_date":"2018-01-19 00:35:00+00:00","open_rate":4.3690000000000004e-05,"close_rate":4.531220772704466e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":135,"profit_ratio":0.02989537,"profit_abs":0.0015,"sell_reason":"roi","initial_stop_loss_abs":4.015111e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":4.015111e-05,"stop_loss_ratio":-0.081,"min_rate":4.3690000000000004e-05,"max_rate":4.779e-05,"is_open":false,"open_timestamp":1516314000000.0,"close_timestamp":1516322100000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":876.57784011,"open_date":"2018-01-18 22:25:00+00:00","close_date":"2018-01-19 01:05:00+00:00","open_rate":5.704e-05,"close_rate":5.792e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":160,"profit_ratio":0.00834457,"profit_abs":0.00041869,"sell_reason":"roi","initial_stop_loss_abs":5.2419760000000006e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":5.2419760000000006e-05,"stop_loss_ratio":-0.081,"min_rate":5.704e-05,"max_rate":5.8670000000000006e-05,"is_open":false,"open_timestamp":1516314300000.0,"close_timestamp":1516323900000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":525.59655209,"open_date":"2018-01-20 05:05:00+00:00","close_date":"2018-01-20 06:25:00+00:00","open_rate":9.513e-05,"close_rate":9.86621726041144e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":80,"profit_ratio":0.02989537,"profit_abs":0.0015,"sell_reason":"roi","initial_stop_loss_abs":8.742447000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":8.742447000000001e-05,"stop_loss_ratio":-0.081,"min_rate":9.513e-05,"max_rate":9.95e-05,"is_open":false,"open_timestamp":1516424700000.0,"close_timestamp":1516429500000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":920.64076597,"open_date":"2018-01-26 07:40:00+00:00","close_date":"2018-01-26 10:20:00+00:00","open_rate":5.431000000000001e-05,"close_rate":5.474000000000001e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":160,"profit_ratio":0.0008867,"profit_abs":4.449e-05,"sell_reason":"roi","initial_stop_loss_abs":4.991089000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":4.991089000000001e-05,"stop_loss_ratio":-0.081,"min_rate":5.3670000000000006e-05,"max_rate":5.5e-05,"is_open":false,"open_timestamp":1516952400000.0,"close_timestamp":1516962000000.0},{"pair":"XLM/BTC","stake_amount":0.05,"amount":944.28706327,"open_date":"2018-01-28 04:35:00+00:00","close_date":"2018-01-30 04:45:00+00:00","open_rate":5.2950000000000006e-05,"close_rate":4.995000000000001e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":2890,"profit_ratio":-0.06323759,"profit_abs":-0.00317295,"sell_reason":"force_sell","initial_stop_loss_abs":4.866105000000001e-05,"initial_stop_loss_ratio":-0.081,"stop_loss_abs":4.866105000000001e-05,"stop_loss_ratio":-0.081,"min_rate":4.980000000000001e-05,"max_rate":5.3280000000000005e-05,"is_open":true,"open_timestamp":1517114100000.0,"close_timestamp":1517287500000.0}],"locks":[],"best_pair":{"key":"TRX/BTC","trades":3,"profit_mean":0.04185351666666667,"profit_mean_pct":4.185351666666667,"profit_sum":0.12556055,"profit_sum_pct":12.56,"profit_total_abs":0.0063,"profit_total":6.3e-06,"profit_total_pct":0.0,"duration_avg":"1:00:00","wins":3,"draws":0,"losses":0},"worst_pair":{"key":"XLM/BTC","trades":3,"profit_mean":-0.01111407333333333,"profit_mean_pct":-1.111407333333333,"profit_sum":-0.03334221999999999,"profit_sum_pct":-3.33,"profit_total_abs":-0.0016729499999999999,"profit_total":-1.6729499999999998e-06,"profit_total_pct":-0.0,"duration_avg":"17:48:00","wins":1,"draws":1,"losses":1},"results_per_pair":[{"key":"TRX/BTC","trades":3,"profit_mean":0.04185351666666667,"profit_mean_pct":4.185351666666667,"profit_sum":0.12556055,"profit_sum_pct":12.56,"profit_total_abs":0.0063,"profit_total":6.3e-06,"profit_total_pct":0.0,"duration_avg":"1:00:00","wins":3,"draws":0,"losses":0},{"key":"ADA/BTC","trades":4,"profit_mean":0.0134259225,"profit_mean_pct":1.34259225,"profit_sum":0.05370369,"profit_sum_pct":5.37,"profit_total_abs":0.00269458,"profit_total":2.69458e-06,"profit_total_pct":0.0,"duration_avg":"2:21:00","wins":4,"draws":0,"losses":0},{"key":"ETH/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"LTC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"ETC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"XLM/BTC","trades":3,"profit_mean":-0.01111407333333333,"profit_mean_pct":-1.111407333333333,"profit_sum":-0.03334221999999999,"profit_sum_pct":-3.33,"profit_total_abs":-0.0016729499999999999,"profit_total":-1.6729499999999998e-06,"profit_total_pct":-0.0,"duration_avg":"17:48:00","wins":1,"draws":1,"losses":1},{"key":"TOTAL","trades":10,"profit_mean":0.014592201999999999,"profit_mean_pct":1.4592201999999999,"profit_sum":0.14592201999999999,"profit_sum_pct":14.59,"profit_total_abs":0.00732163,"profit_total":7.32163e-06,"profit_total_pct":0.0,"duration_avg":"6:35:00","wins":8,"draws":1,"losses":1}],"sell_reason_summary":[{"sell_reason":"roi","trades":9,"wins":8,"draws":1,"losses":0,"profit_mean":0.023239956666666665,"profit_mean_pct":2.32,"profit_sum":0.20915961,"profit_sum_pct":20.92,"profit_total_abs":0.01049458,"profit_total":0.06971987,"profit_total_pct":6.97},{"sell_reason":"force_sell","trades":1,"wins":0,"draws":0,"losses":1,"profit_mean":-0.06323759,"profit_mean_pct":-6.32,"profit_sum":-0.06323759,"profit_sum_pct":-6.32,"profit_total_abs":-0.00317295,"profit_total":-0.021079196666666664,"profit_total_pct":-2.11}],"left_open_trades":[{"key":"XLM/BTC","trades":1,"profit_mean":-0.06323759,"profit_mean_pct":-6.323759,"profit_sum":-0.06323759,"profit_sum_pct":-6.32,"profit_total_abs":-0.00317295,"profit_total":-3.17295e-06,"profit_total_pct":-0.0,"duration_avg":"2 days, 0:10:00","wins":0,"draws":0,"losses":1},{"key":"TOTAL","trades":1,"profit_mean":-0.06323759,"profit_mean_pct":-6.323759,"profit_sum":-0.06323759,"profit_sum_pct":-6.32,"profit_total_abs":-0.00317295,"profit_total":-3.17295e-06,"profit_total_pct":-0.0,"duration_avg":"2 days, 0:10:00","wins":0,"draws":0,"losses":1}],"total_trades":10,"trade_count_long":10,"trade_count_short":0,"total_volume":0.5,"avg_stake_amount":0.05,"profit_mean":0.014592201999999999,"profit_median":0.02223621,"profit_total":7.32163e-06,"profit_total_abs":0.00732163,"backtest_start":"2018-01-10 07:25:00","backtest_start_ts":1515569100000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":1620793107,"backtest_run_end_ts":1620793108,"trades_per_day":0.53,"market_change":0,"pairlist":["ETH/BTC","LTC/BTC","ETC/BTC","XLM/BTC","TRX/BTC","ADA/BTC"],"stake_amount":0.05,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":1000,"dry_run_wallet":1000,"final_balance":1000.00732163,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timerange":"","enable_protections":false,"strategy_name":"SampleStrategy","stoploss":-0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{"60":0.01,"30":0.02,"0":0.04},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":0.0,"ignore_roi_if_entry_signal":false,"backtest_best_day":0.08034685999999999,"backtest_worst_day":-0.06323759,"backtest_best_day_abs":0.0040314,"backtest_worst_day_abs":-0.00317295,"winning_days":6,"draw_days":11,"losing_days":1,"wins":8,"losses":1,"draws":1,"holding_avg":"6:35:00","winner_holding_avg":"1:50:00","loser_holding_avg":"2 days, 0:10:00","max_drawdown":0.06323759000000001,"max_drawdown_abs":0.00317295,"drawdown_start":"2018-01-26 10:20:00","drawdown_start_ts":1516962000000.0,"drawdown_end":"2018-01-30 04:45:00","drawdown_end_ts":1517287500000.0,"max_drawdown_low":0.007321629999999998,"max_drawdown_high":0.010494579999999998,"csum_min":1000.0,"csum_max":1000.01049458},"results_explanation":" 10 trades. 8/1/1 Wins/Draws/Losses. Avg profit 1.46%. Median profit 2.22%. Total profit 0.00732163 BTC ( 0.00\u03A3%). Avg duration 6:35:00 min.","total_profit":7.32163e-06,"current_epoch":4,"is_initial_point":true,"is_best":true} +{"loss":0.16709185414267655,"params_dict":{"mfi-value":"10","fastd-value":"45","adx-value":"28","rsi-value":"37","mfi-enabled":false,"fastd-enabled":false,"adx-enabled":true,"rsi-enabled":true,"trigger":"macd_cross_signal","sell-mfi-value":"85","sell-fastd-value":"56","sell-adx-value":"98","sell-rsi-value":"89","sell-mfi-enabled":true,"sell-fastd-enabled":false,"sell-adx-enabled":true,"sell-rsi-enabled":false,"sell-trigger":"sell-sar_reversal","roi_t1":"85","roi_t2":"11","roi_t3":"24","roi_p1":0.04,"roi_p2":0.043,"roi_p3":0.053,"stoploss":-0.057},"params_details":{"buy":{"mfi-value":"10","fastd-value":"45","adx-value":"28","rsi-value":"37","mfi-enabled":false,"fastd-enabled":false,"adx-enabled":true,"rsi-enabled":true,"trigger":"macd_cross_signal"},"sell":{"sell-mfi-value":"85","sell-fastd-value":"56","sell-adx-value":"98","sell-rsi-value":"89","sell-mfi-enabled":true,"sell-fastd-enabled":false,"sell-adx-enabled":true,"sell-rsi-enabled":false,"sell-trigger":"sell-sar_reversal"},"roi":"{0: 0.13599999999999998, 24: 0.08299999999999999, 35: 0.04, 120: 0}","stoploss":{"stoploss":-0.057}},"params_not_optimized":{"buy":{},"sell":{}},"results_metrics":{"trades":[{"pair":"ETH/BTC","stake_amount":0.05,"amount":0.56173464,"open_date":"2018-01-10 19:15:00+00:00","close_date":"2018-01-10 21:15:00+00:00","open_rate":0.08901,"close_rate":0.09112999000000001,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":120,"profit_ratio":0.01667571,"profit_abs":0.0008367,"sell_reason":"roi","initial_stop_loss_abs":0.08393643,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":0.08393643,"stop_loss_ratio":-0.057,"min_rate":0.08894498,"max_rate":0.09116998,"is_open":false,"open_timestamp":1515611700000.0,"close_timestamp":1515618900000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":794.65988557,"open_date":"2018-01-13 11:30:00+00:00","close_date":"2018-01-13 15:10:00+00:00","open_rate":6.292e-05,"close_rate":5.9333559999999994e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":220,"profit_ratio":-0.06357798,"profit_abs":-0.00319003,"sell_reason":"stop_loss","initial_stop_loss_abs":5.9333559999999994e-05,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":5.9333559999999994e-05,"stop_loss_ratio":-0.057,"min_rate":5.9900000000000006e-05,"max_rate":6.353e-05,"is_open":false,"open_timestamp":1515843000000.0,"close_timestamp":1515856200000.0},{"pair":"XLM/BTC","stake_amount":0.05,"amount":1086.95652174,"open_date":"2018-01-13 14:35:00+00:00","close_date":"2018-01-13 21:40:00+00:00","open_rate":4.6e-05,"close_rate":4.632313095835424e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":425,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":4.3378e-05,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":4.3378e-05,"stop_loss_ratio":-0.057,"min_rate":4.4980000000000006e-05,"max_rate":4.6540000000000005e-05,"is_open":false,"open_timestamp":1515854100000.0,"close_timestamp":1515879600000.0},{"pair":"ETH/BTC","stake_amount":0.05,"amount":0.53757603,"open_date":"2018-01-15 13:15:00+00:00","close_date":"2018-01-15 15:15:00+00:00","open_rate":0.0930101,"close_rate":0.09366345745107878,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":120,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.0877085243,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":0.0877085243,"stop_loss_ratio":-0.057,"min_rate":0.09188489999999999,"max_rate":0.09380000000000001,"is_open":false,"open_timestamp":1516022100000.0,"close_timestamp":1516029300000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":17.07469496,"open_date":"2018-01-15 14:35:00+00:00","close_date":"2018-01-15 16:35:00+00:00","open_rate":0.00292831,"close_rate":0.00297503,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":120,"profit_ratio":0.00886772,"profit_abs":0.00044494,"sell_reason":"roi","initial_stop_loss_abs":0.0027613963299999997,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":0.0027613963299999997,"stop_loss_ratio":-0.057,"min_rate":0.00292831,"max_rate":0.00301259,"is_open":false,"open_timestamp":1516026900000.0,"close_timestamp":1516034100000.0},{"pair":"TRX/BTC","stake_amount":0.05,"amount":702.44450688,"open_date":"2018-01-17 04:25:00+00:00","close_date":"2018-01-17 05:00:00+00:00","open_rate":7.118e-05,"close_rate":7.453721023582538e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":35,"profit_ratio":0.03986049,"profit_abs":0.002,"sell_reason":"roi","initial_stop_loss_abs":6.712274e-05,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":6.712274e-05,"stop_loss_ratio":-0.057,"min_rate":7.118e-05,"max_rate":7.658000000000002e-05,"is_open":false,"open_timestamp":1516163100000.0,"close_timestamp":1516165200000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":18.86756854,"open_date":"2018-01-20 06:05:00+00:00","close_date":"2018-01-20 08:05:00+00:00","open_rate":0.00265005,"close_rate":0.00266995,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":120,"profit_ratio":0.00048133,"profit_abs":2.415e-05,"sell_reason":"roi","initial_stop_loss_abs":0.00249899715,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":0.00249899715,"stop_loss_ratio":-0.057,"min_rate":0.00265005,"max_rate":0.00271,"is_open":false,"open_timestamp":1516428300000.0,"close_timestamp":1516435500000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":966.18357488,"open_date":"2018-01-22 03:25:00+00:00","close_date":"2018-01-22 07:05:00+00:00","open_rate":5.1750000000000004e-05,"close_rate":5.211352232814853e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":220,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":4.8800250000000004e-05,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":4.8800250000000004e-05,"stop_loss_ratio":-0.057,"min_rate":5.1750000000000004e-05,"max_rate":5.2170000000000004e-05,"is_open":false,"open_timestamp":1516591500000.0,"close_timestamp":1516604700000.0},{"pair":"ETC/BTC","stake_amount":0.05,"amount":18.95303438,"open_date":"2018-01-23 13:10:00+00:00","close_date":"2018-01-23 16:00:00+00:00","open_rate":0.0026381,"close_rate":0.002656631560461616,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":170,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":0.0024877283,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":0.0024877283,"stop_loss_ratio":-0.057,"min_rate":0.0026100000000000003,"max_rate":0.00266,"is_open":false,"open_timestamp":1516713000000.0,"close_timestamp":1516723200000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":912.40875912,"open_date":"2018-01-26 06:30:00+00:00","close_date":"2018-01-26 10:45:00+00:00","open_rate":5.480000000000001e-05,"close_rate":5.518494731560462e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":255,"profit_ratio":-0.0,"profit_abs":-0.0,"sell_reason":"roi","initial_stop_loss_abs":5.1676400000000006e-05,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":5.1676400000000006e-05,"stop_loss_ratio":-0.057,"min_rate":5.3670000000000006e-05,"max_rate":5.523e-05,"is_open":false,"open_timestamp":1516948200000.0,"close_timestamp":1516963500000.0},{"pair":"ADA/BTC","stake_amount":0.05,"amount":909.58704748,"open_date":"2018-01-27 02:10:00+00:00","close_date":"2018-01-27 05:40:00+00:00","open_rate":5.4970000000000004e-05,"close_rate":5.535614149523332e-05,"fee_open":0.0035,"fee_close":0.0035,"trade_duration":210,"profit_ratio":0.0,"profit_abs":0.0,"sell_reason":"roi","initial_stop_loss_abs":5.183671e-05,"initial_stop_loss_ratio":-0.057,"stop_loss_abs":5.183671e-05,"stop_loss_ratio":-0.057,"min_rate":5.472000000000001e-05,"max_rate":5.556e-05,"is_open":false,"open_timestamp":1517019000000.0,"close_timestamp":1517031600000.0}],"locks":[],"best_pair":{"key":"TRX/BTC","trades":1,"profit_mean":0.03986049,"profit_mean_pct":3.986049,"profit_sum":0.03986049,"profit_sum_pct":3.99,"profit_total_abs":0.002,"profit_total":2e-06,"profit_total_pct":0.0,"duration_avg":"0:35:00","wins":1,"draws":0,"losses":0},"worst_pair":{"key":"ADA/BTC","trades":4,"profit_mean":-0.015894495,"profit_mean_pct":-1.5894495000000002,"profit_sum":-0.06357798,"profit_sum_pct":-6.36,"profit_total_abs":-0.00319003,"profit_total":-3.19003e-06,"profit_total_pct":-0.0,"duration_avg":"3:46:00","wins":0,"draws":3,"losses":1},"results_per_pair":[{"key":"TRX/BTC","trades":1,"profit_mean":0.03986049,"profit_mean_pct":3.986049,"profit_sum":0.03986049,"profit_sum_pct":3.99,"profit_total_abs":0.002,"profit_total":2e-06,"profit_total_pct":0.0,"duration_avg":"0:35:00","wins":1,"draws":0,"losses":0},{"key":"ETH/BTC","trades":2,"profit_mean":0.008337855,"profit_mean_pct":0.8337855,"profit_sum":0.01667571,"profit_sum_pct":1.67,"profit_total_abs":0.0008367,"profit_total":8.367e-07,"profit_total_pct":0.0,"duration_avg":"2:00:00","wins":1,"draws":1,"losses":0},{"key":"ETC/BTC","trades":3,"profit_mean":0.0031163500000000004,"profit_mean_pct":0.31163500000000005,"profit_sum":0.009349050000000001,"profit_sum_pct":0.93,"profit_total_abs":0.00046909,"profit_total":4.6909000000000003e-07,"profit_total_pct":0.0,"duration_avg":"2:17:00","wins":2,"draws":1,"losses":0},{"key":"LTC/BTC","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0},{"key":"XLM/BTC","trades":1,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"7:05:00","wins":0,"draws":1,"losses":0},{"key":"ADA/BTC","trades":4,"profit_mean":-0.015894495,"profit_mean_pct":-1.5894495000000002,"profit_sum":-0.06357798,"profit_sum_pct":-6.36,"profit_total_abs":-0.00319003,"profit_total":-3.19003e-06,"profit_total_pct":-0.0,"duration_avg":"3:46:00","wins":0,"draws":3,"losses":1},{"key":"TOTAL","trades":11,"profit_mean":0.00020975181818181756,"profit_mean_pct":0.020975181818181757,"profit_sum":0.002307269999999993,"profit_sum_pct":0.23,"profit_total_abs":0.00011576000000000034,"profit_total":1.1576000000000034e-07,"profit_total_pct":0.0,"duration_avg":"3:03:00","wins":4,"draws":6,"losses":1}],"sell_reason_summary":[{"sell_reason":"roi","trades":10,"wins":4,"draws":6,"losses":0,"profit_mean":0.0065885250000000005,"profit_mean_pct":0.66,"profit_sum":0.06588525,"profit_sum_pct":6.59,"profit_total_abs":0.0033057900000000003,"profit_total":0.021961750000000002,"profit_total_pct":2.2},{"sell_reason":"stop_loss","trades":1,"wins":0,"draws":0,"losses":1,"profit_mean":-0.06357798,"profit_mean_pct":-6.36,"profit_sum":-0.06357798,"profit_sum_pct":-6.36,"profit_total_abs":-0.00319003,"profit_total":-0.021192660000000002,"profit_total_pct":-2.12}],"left_open_trades":[{"key":"TOTAL","trades":0,"profit_mean":0.0,"profit_mean_pct":0.0,"profit_sum":0.0,"profit_sum_pct":0.0,"profit_total_abs":0.0,"profit_total":0.0,"profit_total_pct":0.0,"duration_avg":"0:00","wins":0,"draws":0,"losses":0}],"total_trades":11,"trade_count_long":11,"trade_count_short":0,"total_volume":0.55,"avg_stake_amount":0.05,"profit_mean":0.00020975181818181756,"profit_median":0.0,"profit_total":1.1576000000000034e-07,"profit_total_abs":0.00011576000000000034,"backtest_start":"2018-01-10 07:25:00","backtest_start_ts":1515569100000,"backtest_end":"2018-01-30 04:45:00","backtest_end_ts":1517287500000,"backtest_days":19,"backtest_run_start_ts":1620793107,"backtest_run_end_ts":1620793108,"trades_per_day":0.58,"market_change":0,"pairlist":["ETH/BTC","LTC/BTC","ETC/BTC","XLM/BTC","TRX/BTC","ADA/BTC"],"stake_amount":0.05,"stake_currency":"BTC","stake_currency_decimals":8,"starting_balance":1000,"dry_run_wallet":1000,"final_balance":1000.00011576,"max_open_trades":3,"max_open_trades_setting":3,"timeframe":"5m","timerange":"","enable_protections":false,"strategy_name":"SampleStrategy","stoploss":-0.1,"trailing_stop":false,"trailing_stop_positive":null,"trailing_stop_positive_offset":0.0,"trailing_only_offset_is_reached":false,"use_custom_stoploss":false,"minimal_roi":{"60":0.01,"30":0.02,"0":0.04},"use_exit_signal":true,"exit_profit_only":false,"exit_profit_offset":0.0,"ignore_roi_if_entry_signal":false,"backtest_best_day":0.03986049,"backtest_worst_day":-0.06357798,"backtest_best_day_abs":0.002,"backtest_worst_day_abs":-0.00319003,"winning_days":4,"draw_days":13,"losing_days":1,"wins":4,"losses":1,"draws":6,"holding_avg":"3:03:00","winner_holding_avg":"1:39:00","loser_holding_avg":"3:40:00","max_drawdown":0.06357798,"max_drawdown_abs":0.00319003,"drawdown_start":"2018-01-10 21:15:00","drawdown_start_ts":1515618900000.0,"drawdown_end":"2018-01-13 15:10:00","drawdown_end_ts":1515856200000.0,"max_drawdown_low":-0.00235333,"max_drawdown_high":0.0008367,"csum_min":999.99764667,"csum_max":1000.0008367},"results_explanation":" 11 trades. 4/6/1 Wins/Draws/Losses. Avg profit 0.02%. Median profit 0.00%. Total profit 0.00011576 BTC ( 0.00\u03A3%). Avg duration 3:03:00 min.","total_profit":1.1576000000000034e-07,"current_epoch":5,"is_initial_point":true,"is_best":false} From afc00bc30a94abd64fee000535e66287fd91595f Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 25 Nov 2022 12:48:57 -0700 Subject: [PATCH 183/232] log warning if channel too far behind, add docstrings to message stream --- freqtrade/rpc/api_server/api_ws.py | 11 +++++++++- freqtrade/rpc/api_server/ws/channel.py | 12 ++++++----- freqtrade/rpc/api_server/ws/message_stream.py | 21 ++++++++++++------- 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 77950923d..a80250c1b 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -1,4 +1,5 @@ import logging +import time from typing import Any, Dict from fastapi import APIRouter, Depends @@ -33,8 +34,16 @@ async def channel_broadcaster(channel: WebSocketChannel, message_stream: Message """ Iterate over messages in the message stream and send them """ - async for message in message_stream: + async for message, ts in message_stream: if channel.subscribed_to(message.get('type')): + # Log a warning if this channel is behind + # on the message stream by a lot + if (time.time() - ts) > 60: + logger.warning("Channel {channel} is behind MessageStream by 1 minute," + " this can cause a memory leak if you see this message" + " often, consider reducing pair list size or amount of" + " consumers.") + await channel.send(message, timeout=True) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 7343bc306..a5f3b6216 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -59,6 +59,10 @@ class WebSocketChannel: def remote_addr(self): return self._websocket.remote_addr + @property + def avg_send_time(self): + return sum(self._send_times) / len(self._send_times) + def _calc_send_limit(self): """ Calculate the send high limit for this channel @@ -66,11 +70,9 @@ class WebSocketChannel: # Only update if we have enough data if len(self._send_times) == self._send_times.maxlen: - # At least 1s or twice the average of send times - self._send_high_limit = max( - (sum(self._send_times) / len(self._send_times)) * 2, - 1 - ) + # At least 1s or twice the average of send times, with a + # maximum of 3 seconds per message + self._send_high_limit = min(max(self.avg_send_time * 2, 1), 3) async def send( self, diff --git a/freqtrade/rpc/api_server/ws/message_stream.py b/freqtrade/rpc/api_server/ws/message_stream.py index 9592908ab..a55a0da3c 100644 --- a/freqtrade/rpc/api_server/ws/message_stream.py +++ b/freqtrade/rpc/api_server/ws/message_stream.py @@ -1,4 +1,5 @@ import asyncio +import time class MessageStream: @@ -11,14 +12,20 @@ class MessageStream: self._waiter = self._loop.create_future() def publish(self, message): - waiter, self._waiter = self._waiter, self._loop.create_future() - waiter.set_result((message, self._waiter)) + """ + Publish a message to this MessageStream - async def subscribe(self): + :param message: The message to publish + """ + waiter, self._waiter = self._waiter, self._loop.create_future() + waiter.set_result((message, time.time(), self._waiter)) + + async def __aiter__(self): + """ + Iterate over the messages in the message stream + """ waiter = self._waiter while True: # Shield the future from being cancelled by a task waiting on it - message, waiter = await asyncio.shield(waiter) - yield message - - __aiter__ = subscribe + message, ts, waiter = await asyncio.shield(waiter) + yield message, ts From f268187e9b357127151ae45704538aed6c89f7f5 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 25 Nov 2022 12:56:33 -0700 Subject: [PATCH 184/232] offload initial df computation to thread --- freqtrade/misc.py | 43 ++++++++++++++++++++++++++++++ freqtrade/rpc/api_server/api_ws.py | 3 ++- 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/freqtrade/misc.py b/freqtrade/misc.py index 2d2c7513a..349735dcd 100644 --- a/freqtrade/misc.py +++ b/freqtrade/misc.py @@ -1,9 +1,11 @@ """ Various tool function for Freqtrade and scripts """ +import asyncio import gzip import logging import re +import threading from datetime import datetime from pathlib import Path from typing import Any, Dict, Iterator, List, Mapping, Union @@ -301,3 +303,44 @@ def remove_entry_exit_signals(dataframe: pd.DataFrame): dataframe[SignalTagType.EXIT_TAG.value] = None return dataframe + + +def sync_to_async_iter(iter): + """ + Wrap blocking iterator into an asynchronous by + offloading computation to thread and using + pubsub pattern for yielding results + + :param iter: A synchronous iterator + :returns: An asynchronous iterator + """ + + loop = asyncio.get_event_loop() + q = asyncio.Queue(1) + exception = None + _END = object() + + async def yield_queue_items(): + while True: + next_item = await q.get() + if next_item is _END: + break + yield next_item + if exception is not None: + # The iterator has raised, propagate the exception + raise exception + + def iter_to_queue(): + nonlocal exception + try: + for item in iter: + # This runs outside the event loop thread, so we + # must use thread-safe API to talk to the queue. + asyncio.run_coroutine_threadsafe(q.put(item), loop).result() + except Exception as e: + exception = e + finally: + asyncio.run_coroutine_threadsafe(q.put(_END), loop).result() + + threading.Thread(target=iter_to_queue).start() + return yield_queue_items() diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index a80250c1b..6ecc1ef2a 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -7,6 +7,7 @@ from fastapi.websockets import WebSocket from pydantic import ValidationError from freqtrade.enums import RPCMessageType, RPCRequestType +from freqtrade.misc import sync_to_async_iter from freqtrade.rpc.api_server.api_auth import validate_ws_token from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel @@ -93,7 +94,7 @@ async def _process_consumer_request( limit = min(data.get('limit', 1500), 1500) if data else None # For every pair in the generator, send a separate message - for message in rpc._ws_request_analyzed_df(limit): + async for message in sync_to_async_iter(rpc._ws_request_analyzed_df(limit)): # Format response response = WSAnalyzedDFMessage(data=message) await channel.send(response.dict(exclude_none=True)) From 4aa4c6f49d27aa724ec8a120003c20215aa90195 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 25 Nov 2022 13:08:41 -0700 Subject: [PATCH 185/232] change sleep in channel send to 0 --- freqtrade/rpc/api_server/ws/channel.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index a5f3b6216..76e48d889 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -104,14 +104,9 @@ class WebSocketChannel: logger.info(f"Connection for {self} timed out, disconnecting") raise - # Without this sleep, messages would send to one channel - # first then another after the first one finished and prevent - # any normal Rest API calls from processing at the same time. - # With the sleep call, it gives control to the event - # loop to schedule other channel send methods, and helps - # throttle how fast we send. - # 0.01 = 100 messages/second max throughput - await asyncio.sleep(0.01) + # Explicitly give control back to event loop as + # websockets.send does not + await asyncio.sleep(0) async def recv(self): """ From bd95392eea3c4cdae7c5f97557a359599664ba34 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Fri, 25 Nov 2022 13:10:22 -0700 Subject: [PATCH 186/232] fix formatted string in warning message :) --- freqtrade/rpc/api_server/api_ws.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 6ecc1ef2a..9e7bb17a4 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -40,7 +40,7 @@ async def channel_broadcaster(channel: WebSocketChannel, message_stream: Message # Log a warning if this channel is behind # on the message stream by a lot if (time.time() - ts) > 60: - logger.warning("Channel {channel} is behind MessageStream by 1 minute," + logger.warning(f"Channel {channel} is behind MessageStream by 1 minute," " this can cause a memory leak if you see this message" " often, consider reducing pair list size or amount of" " consumers.") From 9f13d99b999047237055d0650812e58fef127ab5 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 11:32:39 +0100 Subject: [PATCH 187/232] improve parameter table, add better documentation for custom calculate_reward, add various helpful notes in docstrings etc --- docs/freqai-parameter-table.md | 37 ++++++--- docs/freqai-reinforcement-learning.md | 111 ++++++++++++++++---------- 2 files changed, 97 insertions(+), 51 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 9e16aec8f..0a71f3ec9 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -6,7 +6,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | Parameter | Description | |------------|-------------| -| | **General configuration parameters** +| | **General configuration parameters within the `config.freqai` tree** | `freqai` | **Required.**
The parent dictionary containing all the parameters for controlling FreqAI.
**Datatype:** Dictionary. | `train_period_days` | **Required.**
Number of days to use for the training data (width of the sliding window).
**Datatype:** Positive integer. | `backtest_period_days` | **Required.**
Number of days to inference from the trained model before sliding the `train_period_days` window defined above, and retraining the model during backtesting (more info [here](freqai-running.md#backtesting)). This can be fractional days, but beware that the provided `timerange` will be divided by this number to yield the number of trainings necessary to complete the backtest.
**Datatype:** Float. @@ -20,7 +20,11 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)).
**Datatype:** Boolean.
Default: `False`. | `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file.
**Datatype:** Boolean.
Default: `False` | `data_kitchen_thread_count` |
Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI)
**Datatype:** Positive integer. -| | **Feature parameters** + + +| Parameter | Description | +|------------|-------------| +| | **Feature parameters within the `freqai.feature_parameters` sub dictionary** | `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](freqai-feature-engineering.md).
**Datatype:** Dictionary. | `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base indicators dataset.
**Datatype:** List of timeframes (strings). | `include_corr_pairlist` | A list of correlated coins that FreqAI will add as additional features to all `pair_whitelist` coins. All indicators set in `populate_any_indicators` during feature engineering (see details [here](freqai-feature-engineering.md)) will be created for each correlated coin. The correlated coins features are added to the base indicators dataset.
**Datatype:** List of assets (strings). @@ -39,16 +43,28 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `noise_standard_deviation` | If set, FreqAI adds noise to the training features with the aim of preventing overfitting. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. `noise_standard_deviation` should be kept relative to the normalized space, i.e., between -1 and 1. In other words, since data in FreqAI is always normalized to be between -1 and 1, `noise_standard_deviation: 0.05` would result in 32% of the data being randomly increased/decreased by more than 2.5% (i.e., the percent of data falling within the first standard deviation).
**Datatype:** Integer.
Default: `0`. | `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset.
**Datatype:** Float.
Default: `30`. | `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it.
**Datatype:** Boolean.
Default: `False` (no reversal). -| | **Data split parameters** + + +| Parameter | Description | +|------------|-------------| +| | **Data split parameters within the `freqai.data_split_parameters` sub dictionary** | `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website).
**Datatype:** Dictionary. | `test_size` | The fraction of data that should be used for testing instead of training.
**Datatype:** Positive float < 1. | `shuffle` | Shuffle the training data points during training. Typically, to not remove the chronological order of data in time-series forecasting, this is set to `False`.
**Datatype:** Boolean.
Defaut: `False`. -| | **Model training parameters** + + +| Parameter | Description | +|------------|-------------| +| | **Model training parameters within the `freqai.model_training_parameters` sub dictionary** | `model_training_parameters` | A flexible dictionary that includes all parameters available by the selected model library. For example, if you use `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If you select a different model, this dictionary can contain any parameter from that model. A list of the currently available models can be found [here](freqai-configuration.md#using-different-prediction-models).
**Datatype:** Dictionary. | `n_estimators` | The number of boosted trees to fit in the training of the model.
**Datatype:** Integer. | `learning_rate` | Boosting learning rate during training of the model.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. -| | **Reinforcement Learning Parameters** + + +| Parameter | Description | +|------------|-------------| +| | **Reinforcement Learning Parameters within the `freqai.rl_config` sub dictionary** | `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. | `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. | `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process.
**Datatype:** int. @@ -56,10 +72,13 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. | `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. | `max_training_drawdown_pct` | The maximum drawdown that the agent is allowed to experience during training.
**Datatype:** float.
Default: 0.8 -| `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not).
**Datatype:** int. +| `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not). Recommended to leave this untouched, by default, this value is set to the total number of physical cores minus 1.
**Datatype:** int. | `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. + +| Parameter | Description | +|------------|-------------| | | **Extraneous parameters** -| `keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards.
**Datatype:** Boolean.
Default: `False`. -| `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction.
**Datatype:** Integer.
Default: `2`. -| `reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI).
**Datatype:** Boolean.
Default: `False`. +| `freqai.keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards.
**Datatype:** Boolean.
Default: `False`. +| `freqai.conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction.
**Datatype:** Integer.
Default: `2`. +| `freqai.reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI).
**Datatype:** Boolean.
Default: `False`. diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 0e4388cf1..48118bb2a 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -154,55 +154,82 @@ In order to configure the `Reinforcement Learner` the following dictionary must Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environment to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link). +!!! Note + If you would like to experiment with `continual_learning`, then you should set that value to `true` in the main `freqai` configuration dictionary. This will tell the Reinforcement Learning library to continue training new models from the final state of previous models, instead of retraining new models from scratch each time a retrain is initiated. + !!! Note Remember that the general `model_training_parameters` dictionary should contain all the model hyperparameter customizations for the particular `model_type`. For example, `PPO` parameters can be found [here](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html). -## Creating the reward +## Creating a custom reward function -As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: +As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `MyRLEnv` class (see below). A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards, but users are encouraged to create their own custom reinforcement learning model class (see below) and save it to `user_data/freqaimodels`. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: ```python - class MyRLEnv(Base5ActionRLEnv): - """ - User made custom environment. This class inherits from BaseEnvironment and gym.env. - Users can override any functions from those parent classes. Here is an example - of a user customized `calculate_reward()` function. - """ - def calculate_reward(self, action): - # first, penalize if the action is not valid - if not self._is_valid(action): - return -2 - pnl = self.get_unrealized_profit() + import from freqtrade.freqai.prediction_models ReinforcementLearner import ReinforcementLearner - factor = 100 - # reward agent for entering trades - if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ - and self._position == Positions.Neutral: - return 25 - # discourage agent from not entering trades - if action == Actions.Neutral.value and self._position == Positions.Neutral: - return -1 - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) - trade_duration = self._current_tick - self._last_trade_tick - if trade_duration <= max_trade_duration: - factor *= 1.5 - elif trade_duration > max_trade_duration: - factor *= 0.5 - # discourage sitting in position - if self._position in (Positions.Short, Positions.Long) and \ - action == Actions.Neutral.value: - return -1 * trade_duration / max_trade_duration - # close long - if action == Actions.Long_exit.value and self._position == Positions.Long: - if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(pnl * factor) - # close short - if action == Actions.Short_exit.value and self._position == Positions.Short: - if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) - return float(pnl * factor) - return 0. + class MyCoolRLModel(ReinforcementLearner): + """ + User created RL prediction model. + + Save this file to `freqtrade/user_data/freqaimodels` + + then use it with: + + freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat + + Here the users can override any of the functions + available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this + is where the user overrides `MyRLEnv` (see below), to define custom + `calculate_reward()` function, or to override any other parts of the environment. + + This class also allows users to override any other part of the IFreqaiModel tree. + For example, the user can override `def fit()` or `def train()` or `def predict()` + to take fine-tuned control over these processes. + + Another common override may be `def data_cleaning_predict()` where the user can + take fine-tuned control over the data handling pipeline. + """ + class MyRLEnv(Base5ActionRLEnv): + """ + User made custom environment. This class inherits from BaseEnvironment and gym.env. + Users can override any functions from those parent classes. Here is an example + of a user customized `calculate_reward()` function. + """ + def calculate_reward(self, action): + # first, penalize if the action is not valid + if not self._is_valid(action): + return -2 + pnl = self.get_unrealized_profit() + + factor = 100 + # reward agent for entering trades + if action in (Actions.Long_enter.value, Actions.Short_enter.value) \ + and self._position == Positions.Neutral: + return 25 + # discourage agent from not entering trades + if action == Actions.Neutral.value and self._position == Positions.Neutral: + return -1 + max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + trade_duration = self._current_tick - self._last_trade_tick + if trade_duration <= max_trade_duration: + factor *= 1.5 + elif trade_duration > max_trade_duration: + factor *= 0.5 + # discourage sitting in position + if self._position in (Positions.Short, Positions.Long) and \ + action == Actions.Neutral.value: + return -1 * trade_duration / max_trade_duration + # close long + if action == Actions.Long_exit.value and self._position == Positions.Long: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(pnl * factor) + # close short + if action == Actions.Short_exit.value and self._position == Positions.Short: + if pnl > self.profit_aim * self.rr: + factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + return float(pnl * factor) + return 0. ``` ### Using Tensorboard From 8dbfd2cacfcd3dcabf2e4e5b3eddf84269e850f9 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 11:51:08 +0100 Subject: [PATCH 188/232] improve docstring clarity about how to inherit from ReinforcementLearner, demonstrate inherittance with ReinforcementLearner_multiproc --- .../prediction_models/ReinforcementLearner.py | 27 ++++++++++- .../ReinforcementLearner_multiproc.py | 45 ++----------------- 2 files changed, 30 insertions(+), 42 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 063af5ff5..dcf7cf54b 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -14,7 +14,32 @@ logger = logging.getLogger(__name__) class ReinforcementLearner(BaseReinforcementLearningModel): """ - User created Reinforcement Learning Model prediction model. + Reinforcement Learning Model prediction model. + + Users can inherit from this class to make their own RL model with custom + environment/training controls. Define the file as follows: + + ``` + from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner + + class MyCoolRLModel(ReinforcementLearner): + ``` + + Save the file to `user_data/freqaimodels`, then run it with: + + freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat + + Here the users can override any of the functions + available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this + is where the user overrides `MyRLEnv` (see below), to define custom + `calculate_reward()` function, or to override any other parts of the environment. + + This class also allows users to override any other part of the IFreqaiModel tree. + For example, the user can override `def fit()` or `def train()` or `def predict()` + to take fine-tuned control over these processes. + + Another common override may be `def data_cleaning_predict()` where the user can + take fine-tuned control over the data handling pipeline. """ def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index baba16066..56636c1f6 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -1,61 +1,24 @@ import logging -from pathlib import Path from typing import Any, Dict # , Tuple # import numpy.typing as npt -import torch as th from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.RL.BaseReinforcementLearningModel import (BaseReinforcementLearningModel, - make_env) +from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner +from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env logger = logging.getLogger(__name__) -class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): +class ReinforcementLearner_multiproc(ReinforcementLearner): """ - User created Reinforcement Learning Model prediction model. + Demonstration of how to build vectorized environments """ - def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): - - train_df = data_dictionary["train_features"] - total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - - # model arch - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=self.net_arch) - - if dk.pair not in self.dd.model_dictionary or not self.continual_learning: - model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=Path( - dk.full_path / "tensorboard" / dk.pair.split('/')[0]), - **self.freqai_info['model_training_parameters'] - ) - else: - logger.info('Continual learning activated - starting training from previously ' - 'trained agent.') - model = self.dd.model_dictionary[dk.pair] - model.set_env(self.train_env) - - model.learn( - total_timesteps=int(total_timesteps), - callback=self.eval_callback - ) - - if Path(dk.data_path / "best_model.zip").is_file(): - logger.info('Callback found a best model.') - best_model = self.MODELCLASS.load(dk.data_path / "best_model") - return best_model - - logger.info('Couldnt find best model, using final model instead.') - - return model - def set_train_and_eval_environments(self, data_dictionary: Dict[str, Any], prices_train: DataFrame, prices_test: DataFrame, dk: FreqaiDataKitchen): From 81fd2e588ff8f97225f45071c59a46d42c88a269 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 12:11:59 +0100 Subject: [PATCH 189/232] ensure typing, remove unsued code --- docs/freqai-reinforcement-learning.md | 2 +- freqtrade/freqai/RL/Base5ActionRLEnv.py | 2 +- freqtrade/freqai/RL/BaseEnvironment.py | 74 ++++++++++--------- .../prediction_models/ReinforcementLearner.py | 6 +- .../ReinforcementLearner_test_4ac.py | 6 +- 5 files changed, 46 insertions(+), 44 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 48118bb2a..2a1ffc250 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -195,7 +195,7 @@ As you begin to modify the strategy and the prediction model, you will quickly r Users can override any functions from those parent classes. Here is an example of a user customized `calculate_reward()` function. """ - def calculate_reward(self, action): + def calculate_reward(self, action: int) -> float: # first, penalize if the action is not valid if not self._is_valid(action): return -2 diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 0d7672b2f..8012ff1af 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -158,7 +158,7 @@ class Base5ActionRLEnv(BaseEnvironment): (action == Actions.Long_exit.value and self._position == Positions.Short) or (action == Actions.Long_exit.value and self._position == Positions.Neutral)) - def _is_valid(self, action: int): + def _is_valid(self, action: int) -> bool: # trade signal """ Determine if the signal is valid. diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 6853377cb..7aa571697 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -208,13 +208,13 @@ class BaseEnvironment(gym.Env): """ return - def _is_valid(self, action: int): + def _is_valid(self, action: int) -> bool: """ Determine if the signal is valid.This is unique to the actions in the environment, and therefore must be inherited. """ - return + return True def add_entry_fee(self, price): return price * (1 + self.fee) @@ -230,7 +230,7 @@ class BaseEnvironment(gym.Env): self.history[key].append(value) @abstractmethod - def calculate_reward(self, action): + def calculate_reward(self, action: int) -> float: """ An example reward function. This is the one function that users will likely wish to inject their own creativity into. @@ -263,38 +263,40 @@ class BaseEnvironment(gym.Env): # assumes unit stake and no compounding self._total_profit += pnl - def most_recent_return(self, action: int): - """ - Calculate the tick to tick return if in a trade. - Return is generated from rising prices in Long - and falling prices in Short positions. - The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. - """ - # Long positions - if self._position == Positions.Long: - current_price = self.prices.iloc[self._current_tick].open - previous_price = self.prices.iloc[self._current_tick - 1].open - - if (self._position_history[self._current_tick - 1] == Positions.Short - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_entry_fee(previous_price) - - return np.log(current_price) - np.log(previous_price) - - # Short positions - if self._position == Positions.Short: - current_price = self.prices.iloc[self._current_tick].open - previous_price = self.prices.iloc[self._current_tick - 1].open - if (self._position_history[self._current_tick - 1] == Positions.Long - or self._position_history[self._current_tick - 1] == Positions.Neutral): - previous_price = self.add_exit_fee(previous_price) - - return np.log(previous_price) - np.log(current_price) - - return 0 - - def update_portfolio_log_returns(self, action): - self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) - def current_price(self) -> float: return self.prices.iloc[self._current_tick].open + + # Keeping around incase we want to start building more complex environment + # templates in the future. + # def most_recent_return(self): + # """ + # Calculate the tick to tick return if in a trade. + # Return is generated from rising prices in Long + # and falling prices in Short positions. + # The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee. + # """ + # # Long positions + # if self._position == Positions.Long: + # current_price = self.prices.iloc[self._current_tick].open + # previous_price = self.prices.iloc[self._current_tick - 1].open + + # if (self._position_history[self._current_tick - 1] == Positions.Short + # or self._position_history[self._current_tick - 1] == Positions.Neutral): + # previous_price = self.add_entry_fee(previous_price) + + # return np.log(current_price) - np.log(previous_price) + + # # Short positions + # if self._position == Positions.Short: + # current_price = self.prices.iloc[self._current_tick].open + # previous_price = self.prices.iloc[self._current_tick - 1].open + # if (self._position_history[self._current_tick - 1] == Positions.Long + # or self._position_history[self._current_tick - 1] == Positions.Neutral): + # previous_price = self.add_exit_fee(previous_price) + + # return np.log(previous_price) - np.log(current_price) + + # return 0 + + # def update_portfolio_log_returns(self, action): + # self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index dcf7cf54b..61b01e21b 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -89,7 +89,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): sets a custom reward based on profit and trade duration. """ - def calculate_reward(self, action): + def calculate_reward(self, action: int) -> float: """ An example reward function. This is the one function that users will likely wish to inject their own creativity into. @@ -103,7 +103,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return -2 pnl = self.get_unrealized_profit() - factor = 100 + factor = 100. # reward agent for entering trades if (action in (Actions.Long_enter.value, Actions.Short_enter.value) @@ -114,7 +114,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) - trade_duration = self._current_tick - self._last_trade_tick + trade_duration = self._current_tick - self._last_trade_tick # type: ignore if trade_duration <= max_trade_duration: factor *= 1.5 diff --git a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py index 9861acfd8..29e3e3b64 100644 --- a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py +++ b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py @@ -20,7 +20,7 @@ class ReinforcementLearner_test_4ac(ReinforcementLearner): sets a custom reward based on profit and trade duration. """ - def calculate_reward(self, action): + def calculate_reward(self, action: int) -> float: # first, penalize if the action is not valid if not self._is_valid(action): @@ -28,7 +28,7 @@ class ReinforcementLearner_test_4ac(ReinforcementLearner): pnl = self.get_unrealized_profit() rew = np.sign(pnl) * (pnl + 1) - factor = 100 + factor = 100. # reward agent for entering trades if (action in (Actions.Long_enter.value, Actions.Short_enter.value) @@ -39,7 +39,7 @@ class ReinforcementLearner_test_4ac(ReinforcementLearner): return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) - trade_duration = self._current_tick - self._last_trade_tick + trade_duration = self._current_tick - self._last_trade_tick # type: ignore if trade_duration <= max_trade_duration: factor *= 1.5 From bdfedb5fcb02b88c600ef25c88bbb5d939b8bd0a Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 26 Nov 2022 13:03:07 +0100 Subject: [PATCH 190/232] Improve typehints / reduce warnings from mypy --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 4 ++-- freqtrade/freqai/RL/Base5ActionRLEnv.py | 2 +- freqtrade/freqai/RL/BaseEnvironment.py | 4 ++-- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 0c719ea92..1a235801c 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -103,7 +103,7 @@ class Base4ActionRLEnv(BaseEnvironment): return observation, step_reward, self._done, info - def is_tradesignal(self, action: int): + def is_tradesignal(self, action: int) -> bool: """ Determine if the signal is a trade signal e.g.: agent wants a Actions.Long_exit while it is in a Positions.short @@ -117,7 +117,7 @@ class Base4ActionRLEnv(BaseEnvironment): (action == Actions.Long_enter.value and self._position == Positions.Long) or (action == Actions.Long_enter.value and self._position == Positions.Short)) - def _is_valid(self, action: int): + def _is_valid(self, action: int) -> bool: """ Determine if the signal is valid. e.g.: agent wants a Actions.Long_exit while it is in a Positions.short diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 8012ff1af..61abb8031 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -141,7 +141,7 @@ class Base5ActionRLEnv(BaseEnvironment): else: return self._current_tick - self._last_trade_tick - def is_tradesignal(self, action: int): + def is_tradesignal(self, action: int) -> bool: """ Determine if the signal is a trade signal e.g.: agent wants a Actions.Long_exit while it is in a Positions.short diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 7aa571697..3332e5a18 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -200,13 +200,13 @@ class BaseEnvironment(gym.Env): return 0. @abstractmethod - def is_tradesignal(self, action: int): + def is_tradesignal(self, action: int) -> bool: """ Determine if the signal is a trade signal. This is unique to the actions in the environment, and therefore must be inherited. """ - return + return True def _is_valid(self, action: int) -> bool: """ diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index bddac23b3..af9874d90 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -38,7 +38,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): User created Reinforcement Learning Model prediction class """ - def __init__(self, **kwargs): + def __init__(self, **kwargs) -> None: super().__init__(config=kwargs['config']) self.max_threads = min(self.freqai_info['rl_config'].get( 'cpu_count', 1), max(int(self.max_system_threads / 2), 1)) From cf2f12b47277ad8289cb92a67e8a198cf0cb59e4 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 26 Nov 2022 13:06:21 +0100 Subject: [PATCH 191/232] Headers between Tables -> Tables can be jumped to directly --- docs/freqai-parameter-table.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 0a71f3ec9..084c9118c 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -4,6 +4,8 @@ The table below will list all configuration parameters available for FreqAI. Som Mandatory parameters are marked as **Required** and have to be set in one of the suggested ways. +### General configuration parameters + | Parameter | Description | |------------|-------------| | | **General configuration parameters within the `config.freqai` tree** @@ -21,6 +23,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file.
**Datatype:** Boolean.
Default: `False` | `data_kitchen_thread_count` |
Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI)
**Datatype:** Positive integer. +### Feature parameters | Parameter | Description | |------------|-------------| @@ -44,6 +47,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset.
**Datatype:** Float.
Default: `30`. | `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it.
**Datatype:** Boolean.
Default: `False` (no reversal). +### Data split parameters | Parameter | Description | |------------|-------------| @@ -52,6 +56,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `test_size` | The fraction of data that should be used for testing instead of training.
**Datatype:** Positive float < 1. | `shuffle` | Shuffle the training data points during training. Typically, to not remove the chronological order of data in time-series forecasting, this is set to `False`.
**Datatype:** Boolean.
Defaut: `False`. +### Model training parameters | Parameter | Description | |------------|-------------| @@ -61,6 +66,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `learning_rate` | Boosting learning rate during training of the model.
**Datatype:** Float. | `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names.
**Datatype:** Float. +### Reinforcement Learning parameters | Parameter | Description | |------------|-------------| @@ -76,6 +82,8 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. +### Additional parameters + | Parameter | Description | |------------|-------------| | | **Extraneous parameters** From 8660ac9aa0010950a1f9227fe6c6048b3524ba84 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 26 Nov 2022 13:12:44 +0100 Subject: [PATCH 192/232] Fix import in docs --- docs/freqai-reinforcement-learning.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 2a1ffc250..6bcba96ff 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -165,7 +165,8 @@ Parameter details can be found [here](freqai-parameter-table.md), but in general As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `MyRLEnv` class (see below). A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards, but users are encouraged to create their own custom reinforcement learning model class (see below) and save it to `user_data/freqaimodels`. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: ```python - import from freqtrade.freqai.prediction_models ReinforcementLearner import ReinforcementLearner + from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner + from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv class MyCoolRLModel(ReinforcementLearner): """ From 7ebc8ee169afc3f8668e682c09712560152eb5d3 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 26 Nov 2022 13:32:18 +0100 Subject: [PATCH 193/232] Fix missing Optional typehint --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index af9874d90..709ded048 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -3,7 +3,7 @@ import logging from abc import abstractmethod from datetime import datetime, timezone from pathlib import Path -from typing import Any, Callable, Dict, Tuple, Type, Union +from typing import Any, Callable, Dict, Optional, Tuple, Type, Union import gym import numpy as np @@ -46,7 +46,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] self.train_env: Union[SubprocVecEnv, gym.Env] = None self.eval_env: Union[SubprocVecEnv, gym.Env] = None - self.eval_callback: EvalCallback = None + self.eval_callback: Optional[EvalCallback] = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] self.continual_learning = self.freqai_info.get('continual_learning', False) From 7b0a76fb7010eac44d7000626d9f167201b87f1a Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 25 Nov 2022 10:41:37 +0100 Subject: [PATCH 194/232] Improve typehint --- freqtrade/rpc/api_server/webserver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index e4eb3895d..92bded1c5 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -1,6 +1,6 @@ import logging from ipaddress import IPv4Address -from typing import Any, Dict +from typing import Any, Dict, Optional import orjson import uvicorn @@ -46,7 +46,7 @@ class ApiServer(RPCHandler): # Exchange - only available in webserver mode. _exchange = None # websocket message stuff - _message_stream = None + _message_stream: Optional[MessageStream] = None def __new__(cls, *args, **kwargs): """ From fcf13580f14aea8e889eaf1af82140eb17596d5c Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 26 Nov 2022 13:33:54 +0100 Subject: [PATCH 195/232] Revert "offload initial df computation to thread" This reverts commit f268187e9b357127151ae45704538aed6c89f7f5. --- freqtrade/misc.py | 43 ------------------------------ freqtrade/rpc/api_server/api_ws.py | 3 +-- 2 files changed, 1 insertion(+), 45 deletions(-) diff --git a/freqtrade/misc.py b/freqtrade/misc.py index 349735dcd..2d2c7513a 100644 --- a/freqtrade/misc.py +++ b/freqtrade/misc.py @@ -1,11 +1,9 @@ """ Various tool function for Freqtrade and scripts """ -import asyncio import gzip import logging import re -import threading from datetime import datetime from pathlib import Path from typing import Any, Dict, Iterator, List, Mapping, Union @@ -303,44 +301,3 @@ def remove_entry_exit_signals(dataframe: pd.DataFrame): dataframe[SignalTagType.EXIT_TAG.value] = None return dataframe - - -def sync_to_async_iter(iter): - """ - Wrap blocking iterator into an asynchronous by - offloading computation to thread and using - pubsub pattern for yielding results - - :param iter: A synchronous iterator - :returns: An asynchronous iterator - """ - - loop = asyncio.get_event_loop() - q = asyncio.Queue(1) - exception = None - _END = object() - - async def yield_queue_items(): - while True: - next_item = await q.get() - if next_item is _END: - break - yield next_item - if exception is not None: - # The iterator has raised, propagate the exception - raise exception - - def iter_to_queue(): - nonlocal exception - try: - for item in iter: - # This runs outside the event loop thread, so we - # must use thread-safe API to talk to the queue. - asyncio.run_coroutine_threadsafe(q.put(item), loop).result() - except Exception as e: - exception = e - finally: - asyncio.run_coroutine_threadsafe(q.put(_END), loop).result() - - threading.Thread(target=iter_to_queue).start() - return yield_queue_items() diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 9e7bb17a4..e183cd7e7 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -7,7 +7,6 @@ from fastapi.websockets import WebSocket from pydantic import ValidationError from freqtrade.enums import RPCMessageType, RPCRequestType -from freqtrade.misc import sync_to_async_iter from freqtrade.rpc.api_server.api_auth import validate_ws_token from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel @@ -94,7 +93,7 @@ async def _process_consumer_request( limit = min(data.get('limit', 1500), 1500) if data else None # For every pair in the generator, send a separate message - async for message in sync_to_async_iter(rpc._ws_request_analyzed_df(limit)): + for message in rpc._ws_request_analyzed_df(limit): # Format response response = WSAnalyzedDFMessage(data=message) await channel.send(response.dict(exclude_none=True)) From aaaa5a5f64dd4b1dec7d81fa0f1e7e2ede11f963 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 13:44:03 +0100 Subject: [PATCH 196/232] add documentation for net_arch, other small changes --- docs/freqai-parameter-table.md | 1 + docs/freqai-reinforcement-learning.md | 2 +- freqtrade/constants.py | 1 + freqtrade/freqai/RL/Base5ActionRLEnv.py | 31 ------------------------- 4 files changed, 3 insertions(+), 32 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 084c9118c..02426ec13 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -81,6 +81,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not). Recommended to leave this untouched, by default, this value is set to the total number of physical cores minus 1.
**Datatype:** int. | `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. +| `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[, dict(vf=[], pi=[])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each. ### Additional parameters diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 6bcba96ff..241ccc3e2 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -34,7 +34,7 @@ Setting up and running a Reinforcement Learning model is the same as running a R freqtrade trade --freqaimodel ReinforcementLearner --strategy MyRLStrategy --config config.json ``` -where `ReinforcementLearner` will use the templated `ReinforcementLearner` from `freqai/prediction_models/ReinforcementLearner`. The strategy, on the other hand, follows the same base [feature engineering](freqai-feature-engineering.md) with `populate_any_indicators` as a typical Regressor: +where `ReinforcementLearner` will use the templated `ReinforcementLearner` from `freqai/prediction_models/ReinforcementLearner` (or a custom user defined one located in `user_data/freqaimodels`). The strategy, on the other hand, follows the same base [feature engineering](freqai-feature-engineering.md) with `populate_any_indicators` as a typical Regressor: ```python def populate_any_indicators( diff --git a/freqtrade/constants.py b/freqtrade/constants.py index ba43e1328..3d7dbb13e 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -590,6 +590,7 @@ CONF_SCHEMA = { "cpu_count": {"type": "integer", "default": 1}, "model_type": {"type": "string", "default": "PPO"}, "policy_type": {"type": "string", "default": "MlpPolicy"}, + "net_arch": {"type": "list", "default": [128, 128]}, "model_reward_parameters": { "type": "object", "properties": { diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 61abb8031..ee43ac868 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -26,31 +26,6 @@ class Base5ActionRLEnv(BaseEnvironment): def set_action_space(self): self.action_space = spaces.Discrete(len(Actions)) - def reset(self): - - self._done = False - - if self.starting_point is True: - self._position_history = (self._start_tick * [None]) + [self._position] - else: - self._position_history = (self.window_size * [None]) + [self._position] - - self._current_tick = self._start_tick - self._last_trade_tick = None - self._position = Positions.Neutral - - self.total_reward = 0. - self._total_profit = 1. # unit - self.history = {} - self.trade_history = [] - self.portfolio_log_returns = np.zeros(len(self.prices)) - - self._profits = [(self._start_tick, 1)] - self.close_trade_profit = [] - self._total_unrealized_profit = 1 - - return self._get_observation() - def step(self, action: int): """ Logic for a single step (incrementing one candle in time) @@ -135,12 +110,6 @@ class Base5ActionRLEnv(BaseEnvironment): return observation, step_reward, self._done, info - def get_trade_duration(self): - if self._last_trade_tick is None: - return 0 - else: - return self._current_tick - self._last_trade_tick - def is_tradesignal(self, action: int) -> bool: """ Determine if the signal is a trade signal From be890b52fdb5afaaf12c1e8b7c0be52f83522935 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 13:44:48 +0100 Subject: [PATCH 197/232] remove np import --- freqtrade/freqai/RL/Base5ActionRLEnv.py | 1 - 1 file changed, 1 deletion(-) diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index ee43ac868..68b2e011b 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -1,7 +1,6 @@ import logging from enum import Enum -import numpy as np from gym import spaces from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions From b52f05923aac4fed453f03e3eae133884909038f Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 13:47:47 +0100 Subject: [PATCH 198/232] fix list to array in constants.py --- freqtrade/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 3d7dbb13e..878c38929 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -590,7 +590,7 @@ CONF_SCHEMA = { "cpu_count": {"type": "integer", "default": 1}, "model_type": {"type": "string", "default": "PPO"}, "policy_type": {"type": "string", "default": "MlpPolicy"}, - "net_arch": {"type": "list", "default": [128, 128]}, + "net_arch": {"type": "array", "default": [128, 128]}, "model_reward_parameters": { "type": "object", "properties": { From ce213b55a225c991365cceefc00c3f03ba303f94 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 26 Nov 2022 13:58:22 +0100 Subject: [PATCH 199/232] Bybit fix candle limit --- freqtrade/exchange/bybit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/exchange/bybit.py b/freqtrade/exchange/bybit.py index 641540c89..d14c7c192 100644 --- a/freqtrade/exchange/bybit.py +++ b/freqtrade/exchange/bybit.py @@ -20,7 +20,7 @@ class Bybit(Exchange): """ _ft_has: Dict = { - "ohlcv_candle_limit": 200, + "ohlcv_candle_limit": 1000, "ccxt_futures_name": "linear", "ohlcv_has_history": False, } From 9af62ad117fd564df1da25831feafd345a8d22ae Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 26 Nov 2022 14:09:05 +0100 Subject: [PATCH 200/232] Add note to dev docs about freqUI release --- docs/developer.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/developer.md b/docs/developer.md index f88754c50..b4961ac77 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -434,6 +434,11 @@ To keep the release-log short, best wrap the full git changelog into a collapsib ``` +### FreqUI release + +If FreqUI has been updated substantially, make sure to create a release before merging the release branch. +Make sure that freqUI CI on the release is finished and passed before merging the release. + ### Create github release / tag Once the PR against stable is merged (best right after merging): From a26b3a9ca8031753f406df690abd638b09ca8d31 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Sat, 26 Nov 2022 09:40:22 -0700 Subject: [PATCH 201/232] change sleep call back to 0.01 --- freqtrade/rpc/api_server/ws/channel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 76e48d889..c50aff8be 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -106,7 +106,7 @@ class WebSocketChannel: # Explicitly give control back to event loop as # websockets.send does not - await asyncio.sleep(0) + await asyncio.sleep(0.01) async def recv(self): """ From dba30393fb6f2c165ed7c5f34bcdfd1b34a75d74 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 26 Nov 2022 18:04:47 +0100 Subject: [PATCH 202/232] ensure extra_returns_per_train are set properly on first hist_preds build --- freqtrade/freqai/freqai_interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 94d471d13..0affabdc6 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -629,7 +629,7 @@ class IFreqaiModel(ABC): hist_preds_df['DI_values'] = 0 for return_str in dk.data['extra_returns_per_train']: - hist_preds_df[return_str] = 0 + hist_preds_df[return_str] = dk.data['extra_returns_per_train'][return_str] hist_preds_df['close_price'] = strat_df['close'] hist_preds_df['date_pred'] = strat_df['date'] From 51d21b413da418444d54a906f492a6a0999fef7f Mon Sep 17 00:00:00 2001 From: stm <37817561+initrv@users.noreply.github.com> Date: Sat, 26 Nov 2022 23:35:20 +0300 Subject: [PATCH 203/232] Fix 4ac update_total_profit _update_total_profit() must be executed before "self._position = Positions.Neutral" because _update_total_profit() calls get_unrealized_profit(), which returns 0 if position is neutral and total_profit is not updated --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 1a235801c..df4e79bea 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -73,8 +73,8 @@ class Base4ActionRLEnv(BaseEnvironment): trade_type = "short" self._last_trade_tick = self._current_tick elif action == Actions.Exit.value: - self._position = Positions.Neutral self._update_total_profit() + self._position = Positions.Neutral trade_type = "neutral" self._last_trade_tick = None else: From 21d7406291de50d183b12c9b694233918b46e961 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 27 Nov 2022 15:14:19 +0100 Subject: [PATCH 204/232] Temporary fix for kraken download closes #7790 will be removed once the patch is in ccxt. --- freqtrade/exchange/kraken.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/freqtrade/exchange/kraken.py b/freqtrade/exchange/kraken.py index f3a9486f2..5d8c1ad29 100644 --- a/freqtrade/exchange/kraken.py +++ b/freqtrade/exchange/kraken.py @@ -218,3 +218,19 @@ class Kraken(Exchange): fees = sum(df['open_fund'] * df['open_mark'] * amount * time_in_ratio) return fees if is_short else -fees + + def _trades_contracts_to_amount(self, trades: List) -> List: + """ + Fix "last" id issue for kraken data downloads + This whole override can probably be removed once the following + issue is closed in ccxt: https://github.com/ccxt/ccxt/issues/15827 + """ + super()._trades_contracts_to_amount(trades) + if ( + len(trades) > 0 + and isinstance(trades[-1].get('info'), list) + and len(trades[-1].get('info', [])) > 7 + ): + + trades[-1]['id'] = trades[-1].get('info', [])[-1] + return trades From e4a3efc7d4b0ce94a02407315e60e689a20af900 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 27 Nov 2022 15:44:14 +0100 Subject: [PATCH 205/232] Don't use strategy.stoploss too often discovered in #7760 --- freqtrade/freqtradebot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index 34d18b3d8..f9cb28c28 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -1151,7 +1151,7 @@ class FreqtradeBot(LoggingMixin): stoploss = ( self.edge.stoploss(pair=trade.pair) if self.edge else - self.strategy.stoploss / trade.leverage + trade.stop_loss_pct / trade.leverage ) if trade.is_short: stop_price = trade.open_rate * (1 - stoploss) From cf000a4c0090a56c8983666d67f9abca9e463c74 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 27 Nov 2022 16:08:54 +0100 Subject: [PATCH 206/232] Bump develop version to 2022.12-dev --- freqtrade/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/__init__.py b/freqtrade/__init__.py index ad80410ee..b44189cb0 100644 --- a/freqtrade/__init__.py +++ b/freqtrade/__init__.py @@ -1,5 +1,5 @@ """ Freqtrade bot """ -__version__ = '2022.11.dev' +__version__ = '2022.12.dev' if 'dev' in __version__: try: From fe00a651632e040860b70a80140c62487588199c Mon Sep 17 00:00:00 2001 From: Emre Date: Sun, 27 Nov 2022 21:34:07 +0300 Subject: [PATCH 207/232] FIx custom reward link --- docs/freqai-reinforcement-learning.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 241ccc3e2..741a9bbb4 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -1,14 +1,14 @@ # Reinforcement Learning !!! Note "Installation size" - Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?". + Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?". Users who prefer docker should ensure they use the docker image appended with `_freqairl`. ## Background and terminology ### What is RL and why does FreqAI need it? -Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-the-reward)). The reward is used to train weights in a neural network. +Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-a-custom-reward-function)). The reward is used to train weights in a neural network. A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployments.* @@ -16,9 +16,9 @@ Reinforcement learning is a natural progression for FreqAI, since it adds a new ### The RL interface -With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-the-reward). +With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-a-custom-reward-function). -We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. +We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-a-custom-reward-function), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. The framework is built on stable_baselines3 (torch) and OpenAI gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library. @@ -130,7 +130,7 @@ After users realize there are no labels to set, they will soon understand that t return df ``` -It is important to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short. +It is important to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short. ## Configuring the Reinforcement Learner @@ -170,21 +170,21 @@ As you begin to modify the strategy and the prediction model, you will quickly r class MyCoolRLModel(ReinforcementLearner): """ - User created RL prediction model. + User created RL prediction model. Save this file to `freqtrade/user_data/freqaimodels` then use it with: freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat - - Here the users can override any of the functions - available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this + + Here the users can override any of the functions + available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this is where the user overrides `MyRLEnv` (see below), to define custom `calculate_reward()` function, or to override any other parts of the environment. - + This class also allows users to override any other part of the IFreqaiModel tree. - For example, the user can override `def fit()` or `def train()` or `def predict()` + For example, the user can override `def fit()` or `def train()` or `def predict()` to take fine-tuned control over these processes. Another common override may be `def data_cleaning_predict()` where the user can @@ -253,7 +253,7 @@ FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5Action * the actions available in the `calculate_reward` * the actions consumed by the user strategy -Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-the-reward)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. +Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-a-custom-reward-function)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. !!! Note FreqAI does not provide by default, a long-only training environment. However, creating one should be as simple as copy-pasting one of the built in environments and removing the `short` actions (and all associated references to those). From 5b5859238b45795852d06ef6bc7d82681b9dee6a Mon Sep 17 00:00:00 2001 From: Emre Date: Sun, 27 Nov 2022 22:06:14 +0300 Subject: [PATCH 208/232] Fix typo --- docs/freqai-reinforcement-learning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 741a9bbb4..ae3f67ed1 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -24,7 +24,7 @@ The framework is built on stable_baselines3 (torch) and OpenAI gym for the base ### Important considerations -As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, ect) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. +As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. ## Running Reinforcement Learning From a85602eb9c68f5bb36cbb2e2aae2c61ad2518bc1 Mon Sep 17 00:00:00 2001 From: Joe Schr <8218910+TheJoeSchr@users.noreply.github.com> Date: Tue, 22 Nov 2022 11:41:28 +0100 Subject: [PATCH 209/232] add "how to run tests" --- docs/developer.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/developer.md b/docs/developer.md index b4961ac77..94923b035 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -49,6 +49,13 @@ For more information about the [Remote container extension](https://code.visuals New code should be covered by basic unittests. Depending on the complexity of the feature, Reviewers may request more in-depth unittests. If necessary, the Freqtrade team can assist and give guidance with writing good tests (however please don't expect anyone to write the tests for you). +#### How to run tests + +Use `py.test` in root folder to run all available testcases and confirm your local environment is setup correctly + +!!! Note "develop branch" + This assumes that you have `stable` branch checked out. Other branches may be work in progress with tests not working yet. + #### Checking log content in tests Freqtrade uses 2 main methods to check log content in tests, `log_has()` and `log_has_re()` (to check using regex, in case of dynamic log-messages). From 320535a227357366c3211175bfdae5d46c541680 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 27 Nov 2022 20:06:10 +0100 Subject: [PATCH 210/232] improve tests doc wording --- docs/developer.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/developer.md b/docs/developer.md index 94923b035..ea2e36ce1 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -51,10 +51,10 @@ If necessary, the Freqtrade team can assist and give guidance with writing good #### How to run tests -Use `py.test` in root folder to run all available testcases and confirm your local environment is setup correctly +Use `pytest` in root folder to run all available testcases and confirm your local environment is setup correctly -!!! Note "develop branch" - This assumes that you have `stable` branch checked out. Other branches may be work in progress with tests not working yet. +!!! Note "feature branches" + Tests are expected to pass on the `develop` and `stable` branches. Other branches may be work in progress with tests not working yet. #### Checking log content in tests From a02da08065a305ed822c5795befff39afdbc97c3 Mon Sep 17 00:00:00 2001 From: Emre Date: Sun, 27 Nov 2022 22:23:00 +0300 Subject: [PATCH 211/232] Fix typo --- docs/freqai-reinforcement-learning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index ae3f67ed1..226c02919 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -95,7 +95,7 @@ Most of the function remains the same as for typical Regressors, however, the fu informative[f"%-{pair}raw_low"] = informative["low"] ``` -Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. +Finally, there is no explicit "label" to make - instead the user need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: From 67d94692774eade7cb25c9ddb22ba81a5ce65ee0 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 27 Nov 2022 20:42:04 +0100 Subject: [PATCH 212/232] small wording fix --- docs/freqai-reinforcement-learning.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 226c02919..d690c7645 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -24,7 +24,7 @@ The framework is built on stable_baselines3 (torch) and OpenAI gym for the base ### Important considerations -As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. +As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL training environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks like `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. ## Running Reinforcement Learning @@ -95,7 +95,7 @@ Most of the function remains the same as for typical Regressors, however, the fu informative[f"%-{pair}raw_low"] = informative["low"] ``` -Finally, there is no explicit "label" to make - instead the user need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. +Finally, there is no explicit "label" to make - instead it is necessary to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: From 64d4a52a5615ff9d5ddc2be693d8a79c002d0c9f Mon Sep 17 00:00:00 2001 From: richardjozsa Date: Sun, 27 Nov 2022 20:43:50 +0100 Subject: [PATCH 213/232] Improve the RL learning process Improve the RL learning process by selecting random start point for the agent, it can help to block the agent to only learn on the selected period of time, while improving the quality of the model. --- freqtrade/freqai/RL/BaseEnvironment.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 3332e5a18..5d881ba32 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -9,6 +9,7 @@ import pandas as pd from gym import spaces from gym.utils import seeding from pandas import DataFrame +import random from freqtrade.data.dataprovider import DataProvider @@ -121,6 +122,9 @@ class BaseEnvironment(gym.Env): self._done = False if self.starting_point is True: + length_of_data = int(self._end_tick/4) + start_tick = random.randint(self.window_size+1, length_of_data) + self._start_tick = start_tick self._position_history = (self._start_tick * [None]) + [self._position] else: self._position_history = (self.window_size * [None]) + [self._position] From 25e041b98eabd62513d4c4494ed9e2b12100dd6e Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 27 Nov 2022 20:50:03 +0100 Subject: [PATCH 214/232] sneak in small change to FreqaiExampleHybridStrategy docstring and startup count --- freqtrade/templates/FreqaiExampleHybridStrategy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/templates/FreqaiExampleHybridStrategy.py b/freqtrade/templates/FreqaiExampleHybridStrategy.py index 26335956f..9d1842cd7 100644 --- a/freqtrade/templates/FreqaiExampleHybridStrategy.py +++ b/freqtrade/templates/FreqaiExampleHybridStrategy.py @@ -19,7 +19,7 @@ class FreqaiExampleHybridStrategy(IStrategy): Launching this strategy would be: - freqtrade trade --strategy FreqaiExampleHyridStrategy --strategy-path freqtrade/templates + freqtrade trade --strategy FreqaiExampleHybridStrategy --strategy-path freqtrade/templates --freqaimodel CatboostClassifier --config config_examples/config_freqai.example.json or the user simply adds this to their config: @@ -86,7 +86,7 @@ class FreqaiExampleHybridStrategy(IStrategy): process_only_new_candles = True stoploss = -0.05 use_exit_signal = True - startup_candle_count: int = 300 + startup_candle_count: int = 30 can_short = True # Hyperoptable parameters From 7fd6bc526e38537a8595abcbe562af6ac6f53729 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 27 Nov 2022 21:03:13 +0100 Subject: [PATCH 215/232] add randomize_starting_position to the rl_config --- docs/freqai-parameter-table.md | 1 + freqtrade/constants.py | 1 + freqtrade/freqai/RL/BaseEnvironment.py | 7 ++++--- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 02426ec13..f2a52a9b8 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -82,6 +82,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. | `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[, dict(vf=[], pi=[])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each. +| `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting.
**Datatype:** bool.
Default: `False`. ### Additional parameters diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 878c38929..d869b89f6 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -591,6 +591,7 @@ CONF_SCHEMA = { "model_type": {"type": "string", "default": "PPO"}, "policy_type": {"type": "string", "default": "MlpPolicy"}, "net_arch": {"type": "array", "default": [128, 128]}, + "randomize_startinng_position": {"type": "boolean", "default": False}, "model_reward_parameters": { "type": "object", "properties": { diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 5d881ba32..8f940dd1b 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -122,9 +122,10 @@ class BaseEnvironment(gym.Env): self._done = False if self.starting_point is True: - length_of_data = int(self._end_tick/4) - start_tick = random.randint(self.window_size+1, length_of_data) - self._start_tick = start_tick + if self.rl_config.get('randomize_starting_position', False): + length_of_data = int(self._end_tick / 4) + start_tick = random.randint(self.window_size + 1, length_of_data) + self._start_tick = start_tick self._position_history = (self._start_tick * [None]) + [self._position] else: self._position_history = (self.window_size * [None]) + [self._position] From 56518def42fab1fd3d89f12bcda281a1eff11ef7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 27 Nov 2022 21:06:01 +0100 Subject: [PATCH 216/232] isort --- freqtrade/freqai/RL/BaseEnvironment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 8f940dd1b..66bdb8435 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -1,4 +1,5 @@ import logging +import random from abc import abstractmethod from enum import Enum from typing import Optional @@ -9,7 +10,6 @@ import pandas as pd from gym import spaces from gym.utils import seeding from pandas import DataFrame -import random from freqtrade.data.dataprovider import DataProvider From f21dbbd8bb54a42203db28d28b017036e5e62d65 Mon Sep 17 00:00:00 2001 From: Emre Date: Mon, 28 Nov 2022 00:06:02 +0300 Subject: [PATCH 217/232] Update imports of custom model --- docs/freqai-reinforcement-learning.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index d690c7645..353d7a2cc 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -166,7 +166,8 @@ As you begin to modify the strategy and the prediction model, you will quickly r ```python from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner - from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv + from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions + class MyCoolRLModel(ReinforcementLearner): """ From 49e41925b01bfd4f66de4893afaa399f4347e829 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:00:42 +0000 Subject: [PATCH 218/232] Bump flake8 from 5.0.4 to 6.0.0 Bumps [flake8](https://github.com/pycqa/flake8) from 5.0.4 to 6.0.0. - [Release notes](https://github.com/pycqa/flake8/releases) - [Commits](https://github.com/pycqa/flake8/compare/5.0.4...6.0.0) --- updated-dependencies: - dependency-name: flake8 dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index b46c244b5..ffce3d696 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -7,7 +7,7 @@ -r docs/requirements-docs.txt coveralls==3.3.1 -flake8==5.0.4 +flake8==6.0.0 flake8-tidy-imports==4.8.0 mypy==0.991 pre-commit==2.20.0 From 7e75bc8fcf40e8f250e6c0cd082b87051c081d3d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:00:48 +0000 Subject: [PATCH 219/232] Bump sb3-contrib from 1.6.1 to 1.6.2 Bumps [sb3-contrib](https://github.com/Stable-Baselines-Team/stable-baselines3-contrib) from 1.6.1 to 1.6.2. - [Release notes](https://github.com/Stable-Baselines-Team/stable-baselines3-contrib/releases) - [Commits](https://github.com/Stable-Baselines-Team/stable-baselines3-contrib/compare/v1.6.1...v1.6.2) --- updated-dependencies: - dependency-name: sb3-contrib dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-freqai-rl.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index b6bd7ef15..2a0a04455 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -5,4 +5,4 @@ torch==1.12.1 stable-baselines3==1.6.1 gym==0.21 -sb3-contrib==1.6.1 +sb3-contrib==1.6.2 From 5aec51a16c37d44993e36d5cacfd8c01d464a93b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:00:55 +0000 Subject: [PATCH 220/232] Bump urllib3 from 1.26.12 to 1.26.13 Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.12 to 1.26.13. - [Release notes](https://github.com/urllib3/urllib3/releases) - [Changelog](https://github.com/urllib3/urllib3/blob/1.26.13/CHANGES.rst) - [Commits](https://github.com/urllib3/urllib3/compare/1.26.12...1.26.13) --- updated-dependencies: - dependency-name: urllib3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9555b90c..881ae04ae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ python-telegram-bot==13.14 arrow==1.2.3 cachetools==4.2.2 requests==2.28.1 -urllib3==1.26.12 +urllib3==1.26.13 jsonschema==4.17.0 TA-Lib==0.4.25 technical==1.3.0 From 924bbad199a0d65147d6208ae9e3d20136bbab9e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:00:58 +0000 Subject: [PATCH 221/232] Bump pyarrow from 10.0.0 to 10.0.1 Bumps [pyarrow](https://github.com/apache/arrow) from 10.0.0 to 10.0.1. - [Release notes](https://github.com/apache/arrow/releases) - [Commits](https://github.com/apache/arrow/compare/go/v10.0.0...go/v10.0.1) --- updated-dependencies: - dependency-name: pyarrow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9555b90c..cde6b0344 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,7 +22,7 @@ jinja2==3.1.2 tables==3.7.0 blosc==1.10.6 joblib==1.2.0 -pyarrow==10.0.0; platform_machine != 'armv7l' +pyarrow==10.0.1; platform_machine != 'armv7l' # find first, C search in arrays py_find_1st==1.1.5 From a46b09d400ec78d0b278c14212728df6b6c46345 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:01:01 +0000 Subject: [PATCH 222/232] Bump prompt-toolkit from 3.0.32 to 3.0.33 Bumps [prompt-toolkit](https://github.com/prompt-toolkit/python-prompt-toolkit) from 3.0.32 to 3.0.33. - [Release notes](https://github.com/prompt-toolkit/python-prompt-toolkit/releases) - [Changelog](https://github.com/prompt-toolkit/python-prompt-toolkit/blob/master/CHANGELOG) - [Commits](https://github.com/prompt-toolkit/python-prompt-toolkit/compare/3.0.32...3.0.33) --- updated-dependencies: - dependency-name: prompt-toolkit dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9555b90c..f598d2377 100644 --- a/requirements.txt +++ b/requirements.txt @@ -47,7 +47,7 @@ psutil==5.9.4 colorama==0.4.6 # Building config files interactively questionary==1.10.0 -prompt-toolkit==3.0.32 +prompt-toolkit==3.0.33 # Extensions to datetime library python-dateutil==2.8.2 From 348731598e633d23576f0b89f69423021c396c5c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:01:25 +0000 Subject: [PATCH 223/232] Bump ccxt from 2.1.96 to 2.2.36 Bumps [ccxt](https://github.com/ccxt/ccxt) from 2.1.96 to 2.2.36. - [Release notes](https://github.com/ccxt/ccxt/releases) - [Changelog](https://github.com/ccxt/ccxt/blob/master/exchanges.cfg) - [Commits](https://github.com/ccxt/ccxt/compare/2.1.96...2.2.36) --- updated-dependencies: - dependency-name: ccxt dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9555b90c..2cb829c3d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ numpy==1.23.5 pandas==1.5.1 pandas-ta==0.3.14b -ccxt==2.1.96 +ccxt==2.2.36 # Pin cryptography for now due to rust build errors with piwheels cryptography==38.0.1; platform_machine == 'armv7l' cryptography==38.0.3; platform_machine != 'armv7l' From 9c28cc810d4ee384773d02481226686b2cbc9715 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 05:33:45 +0000 Subject: [PATCH 224/232] Bump cryptography from 38.0.1 to 38.0.4 Bumps [cryptography](https://github.com/pyca/cryptography) from 38.0.1 to 38.0.4. - [Release notes](https://github.com/pyca/cryptography/releases) - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/38.0.1...38.0.4) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2cb829c3d..2e5293cf6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ pandas-ta==0.3.14b ccxt==2.2.36 # Pin cryptography for now due to rust build errors with piwheels cryptography==38.0.1; platform_machine == 'armv7l' -cryptography==38.0.3; platform_machine != 'armv7l' +cryptography==38.0.4; platform_machine != 'armv7l' aiohttp==3.8.3 SQLAlchemy==1.4.44 python-telegram-bot==13.14 From d73fd42769298721c3a2306540263d74c9172ed9 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 06:38:35 +0100 Subject: [PATCH 225/232] Fix flake8 error introduced with 6.0 update --- freqtrade/persistence/pairlock_middleware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/persistence/pairlock_middleware.py b/freqtrade/persistence/pairlock_middleware.py index ec57e91fc..69d8b098b 100644 --- a/freqtrade/persistence/pairlock_middleware.py +++ b/freqtrade/persistence/pairlock_middleware.py @@ -87,7 +87,7 @@ class PairLocks(): Get the lock that expires the latest for the pair given. """ locks = PairLocks.get_pair_locks(pair, now, side=side) - locks = sorted(locks, key=lambda l: l.lock_end_time, reverse=True) + locks = sorted(locks, key=lambda lock: lock.lock_end_time, reverse=True) return locks[0] if locks else None @staticmethod From dc03317cc89c4a75359c866bf9673e9305bde0f3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 07:02:54 +0000 Subject: [PATCH 226/232] Bump jsonschema from 4.17.0 to 4.17.1 Bumps [jsonschema](https://github.com/python-jsonschema/jsonschema) from 4.17.0 to 4.17.1. - [Release notes](https://github.com/python-jsonschema/jsonschema/releases) - [Changelog](https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst) - [Commits](https://github.com/python-jsonschema/jsonschema/compare/v4.17.0...v4.17.1) --- updated-dependencies: - dependency-name: jsonschema dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cc38bfc96..9ae85ac89 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ arrow==1.2.3 cachetools==4.2.2 requests==2.28.1 urllib3==1.26.13 -jsonschema==4.17.0 +jsonschema==4.17.1 TA-Lib==0.4.25 technical==1.3.0 tabulate==0.9.0 From 9880e9ab600832f6479bedeafbbce267ba92c6e3 Mon Sep 17 00:00:00 2001 From: Ikko Ashimine Date: Mon, 28 Nov 2022 17:10:17 +0900 Subject: [PATCH 227/232] Fix typo in strategy_analysis_example.md seperate -> separate --- docs/strategy_analysis_example.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/strategy_analysis_example.md b/docs/strategy_analysis_example.md index 1526ea038..bae4a9108 100644 --- a/docs/strategy_analysis_example.md +++ b/docs/strategy_analysis_example.md @@ -232,7 +232,7 @@ graph = generate_candlestick_graph(pair=pair, # Show graph inline # graph.show() -# Render graph in a seperate window +# Render graph in a separate window graph.show(renderer="browser") ``` From 9cbfa1201113afeb143fb22b3b9ee4be125c5263 Mon Sep 17 00:00:00 2001 From: Emre Date: Mon, 28 Nov 2022 16:02:17 +0300 Subject: [PATCH 228/232] Directly set model_type in base RL model --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 1 + freqtrade/freqai/data_drawer.py | 7 +------ 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 709ded048..e1381ab62 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -64,6 +64,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.policy_type = self.freqai_info['rl_config']['policy_type'] self.unset_outlier_removal() self.net_arch = self.rl_config.get('net_arch', [128, 128]) + self.dd.model_type = "stable_baselines" def unset_outlier_removal(self): """ diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 3b9352efe..ab41240e9 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -99,12 +99,7 @@ class FreqaiDataDrawer: self.empty_pair_dict: pair_info = { "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} - if 'Reinforcement' in self.config['freqaimodel']: - self.model_type = 'stable_baselines' - logger.warning('User passed a ReinforcementLearner model, FreqAI will ' - 'now use stable_baselines3 to save models.') - else: - self.model_type = self.freqai_info.get('model_save_type', 'joblib') + self.model_type = self.freqai_info.get('model_save_type', 'joblib') def update_metric_tracker(self, metric: str, value: float, pair: str) -> None: """ From e891c41760e38b41d57639933c1f986b6d8abcc3 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 18:20:30 +0100 Subject: [PATCH 229/232] Fix typo in ipynb, too. --- freqtrade/templates/strategy_analysis_example.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/templates/strategy_analysis_example.ipynb b/freqtrade/templates/strategy_analysis_example.ipynb index 77444a023..5fb14ab2f 100644 --- a/freqtrade/templates/strategy_analysis_example.ipynb +++ b/freqtrade/templates/strategy_analysis_example.ipynb @@ -328,7 +328,7 @@ "# Show graph inline\n", "# graph.show()\n", "\n", - "# Render graph in a seperate window\n", + "# Render graph in a separate window\n", "graph.show(renderer=\"browser\")\n" ] }, From 8efa8bc78a445067637f51cbd952b2e55552831a Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 19:35:17 +0100 Subject: [PATCH 230/232] Update stable-baselines3 to 1.6.2 --- requirements-freqai-rl.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index 2a0a04455..df541c701 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -3,6 +3,7 @@ # Required for freqai-rl torch==1.12.1 -stable-baselines3==1.6.1 -gym==0.21 +stable-baselines3==1.6.2 sb3-contrib==1.6.2 +# Gym is forced to this version by stable-baselines3. +gym==0.21 From 2c75b5e027d137b55904d78aeadc0063291b876a Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 13:26:27 +0000 Subject: [PATCH 231/232] Extract "live" test from regular tests --- .github/workflows/ci.yml | 55 +++++++++++++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0d5a7540d..334f7bec3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -66,12 +66,6 @@ jobs: - name: Tests run: | pytest --random-order --cov=freqtrade --cov-config=.coveragerc - if: matrix.python-version != '3.9' || matrix.os != 'ubuntu-22.04' - - - name: Tests incl. ccxt compatibility tests - run: | - pytest --random-order --cov=freqtrade --cov-config=.coveragerc --longrun - if: matrix.python-version == '3.9' && matrix.os == 'ubuntu-22.04' - name: Coveralls if: (runner.os == 'Linux' && matrix.python-version == '3.10' && matrix.os == 'ubuntu-22.04') @@ -310,9 +304,56 @@ jobs: details: Freqtrade doc test failed! webhookUrl: ${{ secrets.DISCORD_WEBHOOK }} + + build_linux_online: + # Run pytest with "live" checks + runs-on: ubuntu-22.04 + # permissions: + steps: + - uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.9" + + - name: Cache_dependencies + uses: actions/cache@v3 + id: cache + with: + path: ~/dependencies/ + key: ${{ runner.os }}-dependencies + + - name: pip cache (linux) + uses: actions/cache@v3 + if: runner.os == 'Linux' + with: + path: ~/.cache/pip + key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip + + - name: TA binary *nix + if: steps.cache.outputs.cache-hit != 'true' + run: | + cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd .. + + - name: Installation - *nix + if: runner.os == 'Linux' + run: | + python -m pip install --upgrade pip wheel + export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH + export TA_LIBRARY_PATH=${HOME}/dependencies/lib + export TA_INCLUDE_PATH=${HOME}/dependencies/include + pip install -r requirements-dev.txt + pip install -e . + + - name: Tests incl. ccxt compatibility tests + run: | + pytest --random-order --cov=freqtrade --cov-config=.coveragerc --longrun + + # Notify only once - when CI completes (and after deploy) in case it's successfull notify-complete: - needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit ] + needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit, build_linux_online ] runs-on: ubuntu-22.04 # Discord notification can't handle schedule events if: (github.event_name != 'schedule') From 5500c10f7853eeb09c08c59490807f2fb9df217f Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 19:40:43 +0100 Subject: [PATCH 232/232] Improve CI file layout --- .github/workflows/ci.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 334f7bec3..e730d1489 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -353,7 +353,15 @@ jobs: # Notify only once - when CI completes (and after deploy) in case it's successfull notify-complete: - needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit, build_linux_online ] + needs: [ + build_linux, + build_macos, + build_windows, + docs_check, + mypy_version_check, + pre-commit, + build_linux_online + ] runs-on: ubuntu-22.04 # Discord notification can't handle schedule events if: (github.event_name != 'schedule')