2018-02-04 07:33:54 +00:00
|
|
|
"""
|
|
|
|
Various tool function for Freqtrade and scripts
|
|
|
|
"""
|
2018-07-04 07:31:35 +00:00
|
|
|
import gzip
|
2017-11-08 21:43:47 +00:00
|
|
|
import logging
|
2018-03-17 21:44:47 +00:00
|
|
|
import re
|
2018-01-21 12:44:30 +00:00
|
|
|
from datetime import datetime
|
2018-03-17 21:12:42 +00:00
|
|
|
from typing import Dict
|
|
|
|
|
2018-01-21 12:44:30 +00:00
|
|
|
import numpy as np
|
2018-03-17 21:12:42 +00:00
|
|
|
from pandas import DataFrame
|
2018-12-28 09:01:16 +00:00
|
|
|
import rapidjson
|
2017-11-08 21:43:47 +00:00
|
|
|
|
2019-04-09 09:27:35 +00:00
|
|
|
|
2017-11-11 15:47:19 +00:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2017-09-08 22:31:40 +00:00
|
|
|
|
2018-03-17 21:12:42 +00:00
|
|
|
def shorten_date(_date: str) -> str:
|
2018-02-04 07:33:54 +00:00
|
|
|
"""
|
|
|
|
Trim the date so it fits on small screens
|
|
|
|
"""
|
|
|
|
new_date = re.sub('seconds?', 'sec', _date)
|
|
|
|
new_date = re.sub('minutes?', 'min', new_date)
|
|
|
|
new_date = re.sub('hours?', 'h', new_date)
|
|
|
|
new_date = re.sub('days?', 'd', new_date)
|
|
|
|
new_date = re.sub('^an?', '1', new_date)
|
|
|
|
return new_date
|
2017-09-08 22:31:40 +00:00
|
|
|
|
|
|
|
|
2018-01-21 12:44:30 +00:00
|
|
|
############################################
|
|
|
|
# Used by scripts #
|
|
|
|
# Matplotlib doesn't support ::datetime64, #
|
|
|
|
# so we need to convert it into ::datetime #
|
|
|
|
############################################
|
2018-03-17 21:12:42 +00:00
|
|
|
def datesarray_to_datetimearray(dates: np.ndarray) -> np.ndarray:
|
2018-01-21 12:44:30 +00:00
|
|
|
"""
|
|
|
|
Convert an pandas-array of timestamps into
|
|
|
|
An numpy-array of datetimes
|
|
|
|
:return: numpy-array of datetime
|
|
|
|
"""
|
2019-01-27 09:47:02 +00:00
|
|
|
return dates.dt.to_pydatetime()
|
2018-01-21 12:44:30 +00:00
|
|
|
|
|
|
|
|
2018-03-17 21:12:42 +00:00
|
|
|
def common_datearray(dfs: Dict[str, DataFrame]) -> np.ndarray:
|
2018-02-04 07:33:54 +00:00
|
|
|
"""
|
|
|
|
Return dates from Dataframe
|
2018-03-17 21:12:42 +00:00
|
|
|
:param dfs: Dict with format pair: pair_data
|
2018-02-04 07:33:54 +00:00
|
|
|
:return: List of dates
|
|
|
|
"""
|
2018-01-21 12:44:30 +00:00
|
|
|
alldates = {}
|
|
|
|
for pair, pair_data in dfs.items():
|
|
|
|
dates = datesarray_to_datetimearray(pair_data['date'])
|
|
|
|
for date in dates:
|
|
|
|
alldates[date] = 1
|
|
|
|
lst = []
|
|
|
|
for date, _ in alldates.items():
|
|
|
|
lst.append(date)
|
|
|
|
arr = np.array(lst)
|
|
|
|
return np.sort(arr, axis=0)
|
|
|
|
|
|
|
|
|
2018-03-30 21:30:23 +00:00
|
|
|
def file_dump_json(filename, data, is_zip=False) -> None:
|
2017-09-08 22:31:40 +00:00
|
|
|
"""
|
2018-02-04 07:33:54 +00:00
|
|
|
Dump JSON data into a file
|
|
|
|
:param filename: file to create
|
|
|
|
:param data: JSON Data to save
|
2017-09-08 22:31:40 +00:00
|
|
|
:return:
|
|
|
|
"""
|
2018-12-28 09:46:48 +00:00
|
|
|
logger.info(f'dumping json to "{filename}"')
|
2018-04-22 07:57:48 +00:00
|
|
|
|
2018-03-31 15:28:54 +00:00
|
|
|
if is_zip:
|
2018-03-30 21:30:23 +00:00
|
|
|
if not filename.endswith('.gz'):
|
|
|
|
filename = filename + '.gz'
|
|
|
|
with gzip.open(filename, 'w') as fp:
|
2018-12-28 09:01:16 +00:00
|
|
|
rapidjson.dump(data, fp, default=str, number_mode=rapidjson.NM_NATIVE)
|
2018-03-31 15:28:54 +00:00
|
|
|
else:
|
|
|
|
with open(filename, 'w') as fp:
|
2018-12-28 09:01:16 +00:00
|
|
|
rapidjson.dump(data, fp, default=str, number_mode=rapidjson.NM_NATIVE)
|
2018-03-25 11:38:17 +00:00
|
|
|
|
2018-12-28 09:46:48 +00:00
|
|
|
logger.debug(f'done json to "{filename}"')
|
|
|
|
|
2018-03-25 11:38:17 +00:00
|
|
|
|
2018-12-28 09:25:12 +00:00
|
|
|
def json_load(datafile):
|
2018-12-28 09:04:28 +00:00
|
|
|
"""
|
|
|
|
load data with rapidjson
|
|
|
|
Use this to have a consistent experience,
|
|
|
|
sete number_mode to "NM_NATIVE" for greatest speed
|
|
|
|
"""
|
2018-12-28 09:25:12 +00:00
|
|
|
return rapidjson.load(datafile, number_mode=rapidjson.NM_NATIVE)
|
|
|
|
|
|
|
|
|
|
|
|
def file_load_json(file):
|
|
|
|
|
|
|
|
gzipfile = file.with_suffix(file.suffix + '.gz')
|
|
|
|
|
|
|
|
# Try gzip file first, otherwise regular json file.
|
|
|
|
if gzipfile.is_file():
|
|
|
|
logger.debug('Loading ticker data from file %s', gzipfile)
|
|
|
|
with gzip.open(gzipfile) as tickerdata:
|
|
|
|
pairdata = json_load(tickerdata)
|
|
|
|
elif file.is_file():
|
|
|
|
logger.debug('Loading ticker data from file %s', file)
|
|
|
|
with open(file) as tickerdata:
|
|
|
|
pairdata = json_load(tickerdata)
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
return pairdata
|
2018-12-28 09:04:28 +00:00
|
|
|
|
|
|
|
|
2018-05-30 20:38:09 +00:00
|
|
|
def format_ms_time(date: int) -> str:
|
2018-03-25 11:38:17 +00:00
|
|
|
"""
|
|
|
|
convert MS date to readable format.
|
|
|
|
: epoch-string in ms
|
|
|
|
"""
|
|
|
|
return datetime.fromtimestamp(date/1000.0).strftime('%Y-%m-%dT%H:%M:%S')
|
2019-02-19 12:14:47 +00:00
|
|
|
|
|
|
|
|
|
|
|
def deep_merge_dicts(source, destination):
|
|
|
|
"""
|
2019-06-09 12:04:19 +00:00
|
|
|
Values from Source override destination, destination is returned (and modified!!)
|
|
|
|
Sample:
|
2019-02-19 12:14:47 +00:00
|
|
|
>>> a = { 'first' : { 'rows' : { 'pass' : 'dog', 'number' : '1' } } }
|
|
|
|
>>> b = { 'first' : { 'rows' : { 'fail' : 'cat', 'number' : '5' } } }
|
|
|
|
>>> merge(b, a) == { 'first' : { 'rows' : { 'pass' : 'dog', 'fail' : 'cat', 'number' : '5' } } }
|
|
|
|
True
|
|
|
|
"""
|
|
|
|
for key, value in source.items():
|
|
|
|
if isinstance(value, dict):
|
|
|
|
# get node or create one
|
|
|
|
node = destination.setdefault(key, {})
|
|
|
|
deep_merge_dicts(value, node)
|
|
|
|
else:
|
|
|
|
destination[key] = value
|
|
|
|
|
|
|
|
return destination
|