allow user to pass test_size = 0 and avoid using eval sets in prediction models
This commit is contained in:
parent
55cf378ec2
commit
56b17e6f3c
@ -243,6 +243,7 @@ class FreqaiDataKitchen:
|
||||
else:
|
||||
stratification = None
|
||||
|
||||
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||
(
|
||||
train_features,
|
||||
test_features,
|
||||
@ -257,6 +258,13 @@ class FreqaiDataKitchen:
|
||||
stratify=stratification,
|
||||
**self.config["freqai"]["data_split_parameters"],
|
||||
)
|
||||
else:
|
||||
test_labels = np.zeros(2)
|
||||
test_features = pd.DataFrame()
|
||||
test_weights = np.zeros(2)
|
||||
train_features = filtered_dataframe
|
||||
train_labels = labels
|
||||
train_weights = weights
|
||||
|
||||
return self.build_data_dictionary(
|
||||
train_features, test_features, train_labels, test_labels, train_weights, test_weights
|
||||
@ -392,6 +400,7 @@ class FreqaiDataKitchen:
|
||||
/ (train_labels_max - train_labels_min)
|
||||
- 1
|
||||
)
|
||||
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||
data_dictionary["test_labels"][item] = (
|
||||
2
|
||||
* (data_dictionary["test_labels"][item] - train_labels_min)
|
||||
@ -555,6 +564,7 @@ class FreqaiDataKitchen:
|
||||
self.data["training_features_list_raw"] = copy.deepcopy(self.training_features_list)
|
||||
self.training_features_list = self.data_dictionary["train_features"].columns
|
||||
|
||||
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||
self.data_dictionary["test_features"] = pd.DataFrame(
|
||||
data=test_components,
|
||||
columns=["PC" + str(i) for i in range(0, n_keep_components)],
|
||||
@ -652,12 +662,14 @@ class FreqaiDataKitchen:
|
||||
)
|
||||
|
||||
# same for test data
|
||||
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||
y_pred = self.svm_model.predict(self.data_dictionary["test_features"])
|
||||
dropped_points = np.where(y_pred == -1, 0, y_pred)
|
||||
self.data_dictionary["test_features"] = self.data_dictionary["test_features"][
|
||||
(y_pred == 1)
|
||||
]
|
||||
self.data_dictionary["test_labels"] = self.data_dictionary["test_labels"][(y_pred == 1)]
|
||||
self.data_dictionary["test_labels"] = self.data_dictionary["test_labels"][(
|
||||
y_pred == 1)]
|
||||
self.data_dictionary["test_weights"] = self.data_dictionary["test_weights"][
|
||||
(y_pred == 1)
|
||||
]
|
||||
|
@ -28,7 +28,9 @@ class CatboostPredictionModel(BaseRegressionModel):
|
||||
label=data_dictionary["train_labels"],
|
||||
weight=data_dictionary["train_weights"],
|
||||
)
|
||||
|
||||
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
|
||||
test_data = None
|
||||
else:
|
||||
test_data = Pool(
|
||||
data=data_dictionary["test_features"],
|
||||
label=data_dictionary["test_labels"],
|
||||
@ -39,6 +41,9 @@ class CatboostPredictionModel(BaseRegressionModel):
|
||||
allow_writing_files=False,
|
||||
**self.model_training_parameters,
|
||||
)
|
||||
|
||||
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
|
||||
test_data = None
|
||||
model.fit(X=train_data, eval_set=test_data)
|
||||
|
||||
return model
|
||||
|
@ -36,6 +36,8 @@ class CatboostPredictionMultiModel(BaseRegressionModel):
|
||||
|
||||
model = MultiOutputRegressor(estimator=cbr)
|
||||
model.fit(X=X, y=y, sample_weight=sample_weight) # , eval_set=eval_set)
|
||||
|
||||
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||
train_score = model.score(X, y)
|
||||
test_score = model.score(*eval_set)
|
||||
logger.info(f"Train score {train_score}, Test score {test_score}")
|
||||
|
@ -25,11 +25,15 @@ class LightGBMPredictionModel(BaseRegressionModel):
|
||||
all the training and test data/labels.
|
||||
"""
|
||||
|
||||
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
|
||||
eval_set = None
|
||||
else:
|
||||
eval_set = (data_dictionary["test_features"], data_dictionary["test_labels"])
|
||||
X = data_dictionary["train_features"]
|
||||
y = data_dictionary["train_labels"]
|
||||
|
||||
model = LGBMRegressor(**self.model_training_parameters)
|
||||
|
||||
model.fit(X=X, y=y, eval_set=eval_set)
|
||||
|
||||
return model
|
||||
|
Loading…
Reference in New Issue
Block a user