reduce mlp number of parameters for testing

This commit is contained in:
Yinon Polak 2023-03-13 20:09:12 +02:00
parent 9c8c30b0e8
commit 918889a2bd
2 changed files with 2 additions and 4 deletions

View File

@ -35,7 +35,6 @@ class PyTorchClassifierMultiTarget(BasePyTorchModel):
super().__init__(**kwargs) super().__init__(**kwargs)
model_training_params = self.freqai_info.get("model_training_parameters", {}) model_training_params = self.freqai_info.get("model_training_parameters", {})
self.n_hidden: int = model_training_params.get("n_hidden", 1024)
self.max_iters: int = model_training_params.get("max_iters", 100) self.max_iters: int = model_training_params.get("max_iters", 100)
self.batch_size: int = model_training_params.get("batch_size", 64) self.batch_size: int = model_training_params.get("batch_size", 64)
self.learning_rate: float = model_training_params.get("learning_rate", 3e-4) self.learning_rate: float = model_training_params.get("learning_rate", 3e-4)

View File

@ -89,13 +89,12 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,
if 'PyTorchClassifierMultiTarget' in model: if 'PyTorchClassifierMultiTarget' in model:
model_save_ext = 'zip' model_save_ext = 'zip'
freqai_conf['freqai']['model_training_parameters'].update({ freqai_conf['freqai']['model_training_parameters'].update({
"n_hidden": 1024, "max_iters": 1,
"max_iters": 100,
"batch_size": 64, "batch_size": 64,
"learning_rate": 3e-4, "learning_rate": 3e-4,
"max_n_eval_batches": None, "max_n_eval_batches": None,
"model_kwargs": { "model_kwargs": {
"hidden_dim": 1024, "hidden_dim": 32,
"dropout_percent": 0.2, "dropout_percent": 0.2,
"n_layer": 1, "n_layer": 1,
} }