add doc for single precision, dont allow half precision, add test

This commit is contained in:
robcaulk 2022-11-04 18:10:04 +01:00
parent 3ccc120f92
commit 257c833831
3 changed files with 10 additions and 19 deletions

View File

@ -18,6 +18,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)). <br> **Datatype:** Positive integer.
| `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models. <br> **Datatype:** Boolean. <br> Default: `False`.
| `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)). <br> **Datatype:** Boolean. <br> Default: `False`.
| `convert_df_to_float32` | Recast all numeric columns to float32, with the objective of reducing ram/disk usage and decreasing train/inference timing. <br> **Datatype:** Boolean. <br> Default: `False`.
| | **Feature parameters**
| `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](freqai-feature-engineering.md). <br> **Datatype:** Dictionary.
| `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base indicators dataset. <br> **Datatype:** List of timeframes (strings).

View File

@ -1357,21 +1357,10 @@ class FreqaiDataKitchen:
for col in df.columns[1:]:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min:
df[col] = df[col].astype(np.int32)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min:
df[col] = df[col].astype(np.float32)
end_mem = df.memory_usage().sum() / 1024**2

View File

@ -27,13 +27,13 @@ def is_mac() -> bool:
return "Darwin" in machine
@pytest.mark.parametrize('model, pca, dbscan', [
('LightGBMRegressor', True, False),
('XGBoostRegressor', False, True),
('XGBoostRFRegressor', False, False),
('CatboostRegressor', False, False),
@pytest.mark.parametrize('model, pca, dbscan, float32', [
('LightGBMRegressor', True, False, True),
('XGBoostRegressor', False, True, False),
('XGBoostRFRegressor', False, False, False),
('CatboostRegressor', False, False, False),
])
def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, dbscan):
def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, dbscan, float32):
if is_arm() and model == 'CatboostRegressor':
pytest.skip("CatBoost is not supported on ARM")
@ -43,6 +43,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,
freqai_conf.update({"strategy": "freqai_test_strat"})
freqai_conf['freqai']['feature_parameters'].update({"principal_component_analysis": pca})
freqai_conf['freqai']['feature_parameters'].update({"use_DBSCAN_to_remove_outliers": dbscan})
freqai_conf['freqai'].update({"convert_df_to_float32": float32})
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
exchange = get_patched_exchange(mocker, freqai_conf)