From a74d44eddeb477fb84fe4e055afce6d115372dde Mon Sep 17 00:00:00 2001 From: Stefano Ariestasia Date: Sat, 5 Nov 2022 14:58:36 +0900 Subject: [PATCH 1/5] optimize dataframe columns' type on backtest/HO --- freqtrade/data/converter.py | 47 +++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/freqtrade/data/converter.py b/freqtrade/data/converter.py index 98ed15489..cf2cbdfd4 100644 --- a/freqtrade/data/converter.py +++ b/freqtrade/data/converter.py @@ -12,7 +12,7 @@ from pandas import DataFrame, to_datetime from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS, DEFAULT_TRADES_COLUMNS, Config, TradeList from freqtrade.enums import CandleType - +import numpy as np logger = logging.getLogger(__name__) @@ -121,6 +121,45 @@ def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str) logger.debug(message) return df +def reduce_mem_usage(pair: str, df: DataFrame) -> DataFrame: + """ iterate through all the columns of a dataframe and modify the data type + to reduce memory usage. + """ + # start_mem = df.memory_usage().sum() / 1024**2 + # logger.info(f"Memory usage of dataframe for {pair} is {start_mem:.2f} MB") + + for col in df.columns[1:]: + col_type = df[col].dtype + + if col_type != object: + c_min = df[col].min() + c_max = df[col].max() + if str(col_type)[:3] == "int": + if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: + df[col] = df[col].astype(np.int8) + elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: + df[col] = df[col].astype(np.int16) + elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: + df[col] = df[col].astype(np.int32) + elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: + df[col] = df[col].astype(np.int64) + elif str(col_type)[:5] == "float": + if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: + df[col] = df[col].astype(np.float16) + elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: + df[col] = df[col].astype(np.float32) + else: + df[col] = df[col].astype(np.float64) + # else: + # logger.info(f"Column not optimized because the type is {str(col_type)}") + # else: + # df[col] = df[col].astype('category') + + # end_mem = df.memory_usage().sum() / 1024**2 + # logger.info("Memory usage after optimization is: {:.2f} MB".format(end_mem)) + # logger.info("Decreased by {:.1f}%".format(100 * (start_mem - end_mem) / start_mem)) + + return df def trim_dataframe(df: DataFrame, timerange, df_date_col: str = 'date', startup_candles: int = 0) -> DataFrame: @@ -155,10 +194,14 @@ def trim_dataframes(preprocessed: Dict[str, DataFrame], timerange, :return: Dict of trimmed dataframes """ processed: Dict[str, DataFrame] = {} - for pair, df in preprocessed.items(): trimed_df = trim_dataframe(df, timerange, startup_candles=startup_candles) if not trimed_df.empty: + # start_mem = trimed_df.memory_usage().sum() / 1024**2 + # logger.info(f"Memory usage of dataframe for {pair} before reduced is {start_mem:.2f} MB") + trimed_df = reduce_mem_usage(pair, trimed_df) + # end_mem = trimed_df.memory_usage().sum() / 1024**2 + # logger.info(f"Memory usage of dataframe for {pair} after reduced is {end_mem:.2f} MB") processed[pair] = trimed_df else: logger.warning(f'{pair} has no data left after adjusting for startup candles, ' From 404df7ae2061a66b72f3df19dfbaad56eed29f72 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 Dec 2022 20:02:44 +0900 Subject: [PATCH 2/5] fix isort --- freqtrade/data/converter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/data/converter.py b/freqtrade/data/converter.py index 290dea711..46bf648b8 100644 --- a/freqtrade/data/converter.py +++ b/freqtrade/data/converter.py @@ -12,7 +12,7 @@ from pandas import DataFrame, to_datetime from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS, DEFAULT_TRADES_COLUMNS, Config, TradeList from freqtrade.enums import CandleType -import numpy as np + logger = logging.getLogger(__name__) From 3b9052247f9b14c13f3993b485ec23d0289a6fbf Mon Sep 17 00:00:00 2001 From: Stefano Ariestasia Date: Thu, 8 Dec 2022 20:06:02 +0900 Subject: [PATCH 3/5] flake8 fix --- freqtrade/data/converter.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/freqtrade/data/converter.py b/freqtrade/data/converter.py index 46bf648b8..531a69647 100644 --- a/freqtrade/data/converter.py +++ b/freqtrade/data/converter.py @@ -121,16 +121,17 @@ def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str) logger.debug(message) return df + def reduce_mem_usage(pair: str, df: DataFrame) -> DataFrame: """ iterate through all the columns of a dataframe and modify the data type - to reduce memory usage. + to reduce memory usage. """ # start_mem = df.memory_usage().sum() / 1024**2 # logger.info(f"Memory usage of dataframe for {pair} is {start_mem:.2f} MB") - + for col in df.columns[1:]: col_type = df[col].dtype - + if col_type != object: c_min = df[col].min() c_max = df[col].max() @@ -142,7 +143,7 @@ def reduce_mem_usage(pair: str, df: DataFrame) -> DataFrame: elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: - df[col] = df[col].astype(np.int64) + df[col] = df[col].astype(np.int64) elif str(col_type)[:5] == "float": if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) @@ -158,9 +159,10 @@ def reduce_mem_usage(pair: str, df: DataFrame) -> DataFrame: # end_mem = df.memory_usage().sum() / 1024**2 # logger.info("Memory usage after optimization is: {:.2f} MB".format(end_mem)) # logger.info("Decreased by {:.1f}%".format(100 * (start_mem - end_mem) / start_mem)) - + return df + def trim_dataframe(df: DataFrame, timerange, df_date_col: str = 'date', startup_candles: int = 0) -> DataFrame: """ @@ -196,10 +198,10 @@ def trim_dataframes(preprocessed: Dict[str, DataFrame], timerange, trimed_df = trim_dataframe(df, timerange, startup_candles=startup_candles) if not trimed_df.empty: # start_mem = trimed_df.memory_usage().sum() / 1024**2 - # logger.info(f"Memory usage of dataframe for {pair} before reduced is {start_mem:.2f} MB") + # logger.info(f"Memory usage of df for {pair} before reduced is {start_mem:.2f} MB") trimed_df = reduce_mem_usage(pair, trimed_df) # end_mem = trimed_df.memory_usage().sum() / 1024**2 - # logger.info(f"Memory usage of dataframe for {pair} after reduced is {end_mem:.2f} MB") + # logger.info(f"Memory usage of df for {pair} after reduced is {end_mem:.2f} MB") processed[pair] = trimed_df else: logger.warning(f'{pair} has no data left after adjusting for startup candles, ' From b2791836a42627f252a42f2feb060a8db882380f Mon Sep 17 00:00:00 2001 From: Stefano Ariestasia Date: Mon, 9 Jan 2023 10:44:48 +0900 Subject: [PATCH 4/5] limit type to 32 --- freqtrade/data/converter.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/freqtrade/data/converter.py b/freqtrade/data/converter.py index 531a69647..03393905d 100644 --- a/freqtrade/data/converter.py +++ b/freqtrade/data/converter.py @@ -136,18 +136,18 @@ def reduce_mem_usage(pair: str, df: DataFrame) -> DataFrame: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == "int": - if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: - df[col] = df[col].astype(np.int8) - elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: - df[col] = df[col].astype(np.int16) - elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: + # if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: + # df[col] = df[col].astype(np.int8) + # elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: + # df[col] = df[col].astype(np.int16) + if c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) elif str(col_type)[:5] == "float": - if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: - df[col] = df[col].astype(np.float16) - elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: + # if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: + # df[col] = df[col].astype(np.float16) + if c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) From 5372e299c002e446d5b99d4902f5a689d18975e6 Mon Sep 17 00:00:00 2001 From: Stefano Ariestasia Date: Mon, 9 Jan 2023 12:33:27 +0900 Subject: [PATCH 5/5] fix typo --- freqtrade/data/converter.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/freqtrade/data/converter.py b/freqtrade/data/converter.py index 03393905d..e778c99d7 100644 --- a/freqtrade/data/converter.py +++ b/freqtrade/data/converter.py @@ -195,14 +195,14 @@ def trim_dataframes(preprocessed: Dict[str, DataFrame], timerange, """ processed: Dict[str, DataFrame] = {} for pair, df in preprocessed.items(): - trimed_df = trim_dataframe(df, timerange, startup_candles=startup_candles) - if not trimed_df.empty: - # start_mem = trimed_df.memory_usage().sum() / 1024**2 + trimmed_df = trim_dataframe(df, timerange, startup_candles=startup_candles) + if not trimmed_df.empty: + # start_mem = trimmed_df.memory_usage().sum() / 1024**2 # logger.info(f"Memory usage of df for {pair} before reduced is {start_mem:.2f} MB") - trimed_df = reduce_mem_usage(pair, trimed_df) - # end_mem = trimed_df.memory_usage().sum() / 1024**2 + trimmed_df = reduce_mem_usage(pair, trimmed_df) + # end_mem = trimmed_df.memory_usage().sum() / 1024**2 # logger.info(f"Memory usage of df for {pair} after reduced is {end_mem:.2f} MB") - processed[pair] = trimed_df + processed[pair] = trimmed_df else: logger.warning(f'{pair} has no data left after adjusting for startup candles, ' f'skipping.')