Compare commits
177 Commits
feat/kvsto
...
develop
Author | SHA1 | Date | |
---|---|---|---|
|
0afd5a7385 | ||
|
2131205db6 | ||
|
b2b19915e6 | ||
|
bba6f8e133 | ||
|
a6d2233b95 | ||
|
9857675a5e | ||
|
4ab047dfa7 | ||
|
476ed938f5 | ||
|
40ffac9de0 | ||
|
b892d373cd | ||
|
c3647e49ad | ||
|
37ed37dc76 | ||
|
5cb688c112 | ||
|
3e394d0612 | ||
|
c4c2298686 | ||
|
8564dc10b2 | ||
|
3fb892fcb8 | ||
|
9968348324 | ||
|
fa293c54f8 | ||
|
95449ca886 | ||
|
70fa4a53cd | ||
|
467c63ff01 | ||
|
b8a9c200fe | ||
|
7c10af65a1 | ||
|
e2cd23b1d2 | ||
|
0d408d3d43 | ||
|
2309197771 | ||
|
66fe9abce0 | ||
|
200c18f3e4 | ||
|
351b5f6e65 | ||
|
605cc20a21 | ||
|
f73d2a5371 | ||
|
485a074674 | ||
|
865cf5232b | ||
|
95a24c3133 | ||
|
6833059c70 | ||
|
3833dc0b78 | ||
|
e0d3c771db | ||
|
5a18ab0784 | ||
|
1d66f82b1d | ||
|
2e765fe6d1 | ||
|
21ea02bbcf | ||
|
2ea0157197 | ||
|
03352f3b62 | ||
|
26eb4f7fe6 | ||
|
7e1f3aa545 | ||
|
14532e3a56 | ||
|
a449f7c78c | ||
|
8854ef8cba | ||
|
526943f29e | ||
|
df51111c33 | ||
|
dd8900a1c6 | ||
|
5404905d28 | ||
|
bed51fa790 | ||
|
9c2cdd4fb9 | ||
|
69b9b35a08 | ||
|
c2c97d9f78 | ||
|
48d3c8e62e | ||
|
a655524221 | ||
|
26738370c7 | ||
|
6b204c97ed | ||
|
0c4574b3b7 | ||
|
d9d9993179 | ||
|
7b494c8333 | ||
|
bc9454e0f9 | ||
|
36a0a14a23 | ||
|
c137666230 | ||
|
bd3b70293f | ||
|
355fde3bca | ||
|
5a7ca35c6b | ||
|
077a947972 | ||
|
8ac3a94358 | ||
|
dfbebdea9b | ||
|
b795a70102 | ||
|
026b6a39a9 | ||
|
3cabcabcbd | ||
|
55781e7f10 | ||
|
f1e831a7b8 | ||
|
8903ba5d89 | ||
|
eabd321281 | ||
|
45c6ae446f | ||
|
952e641213 | ||
|
c44b5b1b3a | ||
|
fc8625c5c5 | ||
|
36a005754a | ||
|
479aafc331 | ||
|
f81e3d8667 | ||
|
b9c7d338b3 | ||
|
4f93106755 | ||
|
02bccd0097 | ||
|
1ba01746a0 | ||
|
83a7d888bc | ||
|
eba82360fa | ||
|
3fa23860c0 | ||
|
a80afc8f1b | ||
|
97339e14cf | ||
|
443263803c | ||
|
9906e7d646 | ||
|
e8f040bfbd | ||
|
a4b617e482 | ||
|
c06cd38951 | ||
|
0a55753faf | ||
|
6b4d9f97c1 | ||
|
bf4aa91aab | ||
|
500c401b75 | ||
|
81a2cbb4eb | ||
|
0510cf4491 | ||
|
68728409aa | ||
|
c00ffcee59 | ||
|
9aec1ddb17 | ||
|
d98890f32e | ||
|
f659f8e309 | ||
|
54db239175 | ||
|
601c37f862 | ||
|
501e746c52 | ||
|
d04146d1b1 | ||
|
ea08931ab3 | ||
|
ddd1b5c0ff | ||
|
e08d8190ae | ||
|
fbf7049ac5 | ||
|
2a1a8c0e64 | ||
|
833aaf8e10 | ||
|
566346dd87 | ||
|
d0a33d2ee7 | ||
|
fab505be1b | ||
|
2f386913ac | ||
|
1c11a5f048 | ||
|
903a1dc3e5 | ||
|
6f9a8a089c | ||
|
8bee499328 | ||
|
719faab4b8 | ||
|
9f477aa3c9 | ||
|
61ac36c576 | ||
|
366c148c10 | ||
|
a49f62eecb | ||
|
fab9ff1294 | ||
|
1c91b4427b | ||
|
244662b1a4 | ||
|
4550447409 | ||
|
366740885a | ||
|
918889a2bd | ||
|
9c8c30b0e8 | ||
|
d7ea750823 | ||
|
b6096efadd | ||
|
b927c9dc01 | ||
|
523a58d3d6 | ||
|
0012fe36ca | ||
|
cb17b36981 | ||
|
f9fdf1c31b | ||
|
1cf0e7be24 | ||
|
8a9f2aedbb | ||
|
e88a0d5248 | ||
|
2ef11faba7 | ||
|
c9eee2944b | ||
|
6f962362f2 | ||
|
ba5de0cd00 | ||
|
3081b9402b | ||
|
1597c3aa89 | ||
|
7d26df01b8 | ||
|
c8296ccb2d | ||
|
8d60327d60 | ||
|
04564dc134 | ||
|
6161b858c4 | ||
|
1921a07b89 | ||
|
b65ade51be | ||
|
dfbb2e2b35 | ||
|
1805db2b07 | ||
|
76fbec0c17 | ||
|
4241bff32a | ||
|
5dd60eda36 | ||
|
8acdd0b47c | ||
|
125085fbaf | ||
|
7eedcb9c14 | ||
|
e6e747bcd8 | ||
|
348a08f1c4 | ||
|
b1ac2bf515 | ||
|
751b205618 |
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@ -425,7 +425,7 @@ jobs:
|
|||||||
python setup.py sdist bdist_wheel
|
python setup.py sdist bdist_wheel
|
||||||
|
|
||||||
- name: Publish to PyPI (Test)
|
- name: Publish to PyPI (Test)
|
||||||
uses: pypa/gh-action-pypi-publish@v1.8.4
|
uses: pypa/gh-action-pypi-publish@v1.8.5
|
||||||
if: (github.event_name == 'release')
|
if: (github.event_name == 'release')
|
||||||
with:
|
with:
|
||||||
user: __token__
|
user: __token__
|
||||||
@ -433,7 +433,7 @@ jobs:
|
|||||||
repository_url: https://test.pypi.org/legacy/
|
repository_url: https://test.pypi.org/legacy/
|
||||||
|
|
||||||
- name: Publish to PyPI
|
- name: Publish to PyPI
|
||||||
uses: pypa/gh-action-pypi-publish@v1.8.4
|
uses: pypa/gh-action-pypi-publish@v1.8.5
|
||||||
if: (github.event_name == 'release')
|
if: (github.event_name == 'release')
|
||||||
with:
|
with:
|
||||||
user: __token__
|
user: __token__
|
||||||
|
@ -17,8 +17,8 @@ repos:
|
|||||||
- types-filelock==3.2.7
|
- types-filelock==3.2.7
|
||||||
- types-requests==2.28.11.17
|
- types-requests==2.28.11.17
|
||||||
- types-tabulate==0.9.0.2
|
- types-tabulate==0.9.0.2
|
||||||
- types-python-dateutil==2.8.19.11
|
- types-python-dateutil==2.8.19.12
|
||||||
- SQLAlchemy==2.0.8
|
- SQLAlchemy==2.0.9
|
||||||
# stages: [push]
|
# stages: [push]
|
||||||
|
|
||||||
- repo: https://github.com/pycqa/isort
|
- repo: https://github.com/pycqa/isort
|
||||||
|
@ -12,6 +12,7 @@ TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
|||||||
TAG_PLOT=${TAG}_plot
|
TAG_PLOT=${TAG}_plot
|
||||||
TAG_FREQAI=${TAG}_freqai
|
TAG_FREQAI=${TAG}_freqai
|
||||||
TAG_FREQAI_RL=${TAG_FREQAI}rl
|
TAG_FREQAI_RL=${TAG_FREQAI}rl
|
||||||
|
TAG_FREQAI_TORCH=${TAG_FREQAI}torch
|
||||||
TAG_PI="${TAG}_pi"
|
TAG_PI="${TAG}_pi"
|
||||||
|
|
||||||
TAG_ARM=${TAG}_arm
|
TAG_ARM=${TAG}_arm
|
||||||
@ -84,6 +85,10 @@ docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI}
|
|||||||
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL_ARM}
|
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL_ARM}
|
||||||
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI_RL}
|
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI_RL}
|
||||||
|
|
||||||
|
# Create special Torch tag - which is identical to the RL tag.
|
||||||
|
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI_TORCH} ${CACHE_IMAGE}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL_ARM}
|
||||||
|
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI_TORCH}
|
||||||
|
|
||||||
# copy images to ghcr.io
|
# copy images to ghcr.io
|
||||||
|
|
||||||
alias crane="docker run --rm -i -v $(pwd)/.crane:/home/nonroot/.docker/ gcr.io/go-containerregistry/crane"
|
alias crane="docker run --rm -i -v $(pwd)/.crane:/home/nonroot/.docker/ gcr.io/go-containerregistry/crane"
|
||||||
@ -93,6 +98,7 @@ chmod a+rwx .crane
|
|||||||
echo "${GHCR_TOKEN}" | crane auth login ghcr.io -u "${GHCR_USERNAME}" --password-stdin
|
echo "${GHCR_TOKEN}" | crane auth login ghcr.io -u "${GHCR_USERNAME}" --password-stdin
|
||||||
|
|
||||||
crane copy ${IMAGE_NAME}:${TAG_FREQAI_RL} ${GHCR_IMAGE_NAME}:${TAG_FREQAI_RL}
|
crane copy ${IMAGE_NAME}:${TAG_FREQAI_RL} ${GHCR_IMAGE_NAME}:${TAG_FREQAI_RL}
|
||||||
|
crane copy ${IMAGE_NAME}:${TAG_FREQAI_RL} ${GHCR_IMAGE_NAME}:${TAG_FREQAI_TORCH}
|
||||||
crane copy ${IMAGE_NAME}:${TAG_FREQAI} ${GHCR_IMAGE_NAME}:${TAG_FREQAI}
|
crane copy ${IMAGE_NAME}:${TAG_FREQAI} ${GHCR_IMAGE_NAME}:${TAG_FREQAI}
|
||||||
crane copy ${IMAGE_NAME}:${TAG_PLOT} ${GHCR_IMAGE_NAME}:${TAG_PLOT}
|
crane copy ${IMAGE_NAME}:${TAG_PLOT} ${GHCR_IMAGE_NAME}:${TAG_PLOT}
|
||||||
crane copy ${IMAGE_NAME}:${TAG} ${GHCR_IMAGE_NAME}:${TAG}
|
crane copy ${IMAGE_NAME}:${TAG} ${GHCR_IMAGE_NAME}:${TAG}
|
||||||
|
BIN
docs/assets/freqai_pytorch-diagram.png
Normal file
BIN
docs/assets/freqai_pytorch-diagram.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 18 KiB |
@ -274,19 +274,20 @@ A backtesting result will look like that:
|
|||||||
| XRP/BTC | 35 | 0.66 | 22.96 | 0.00114897 | 11.48 | 3:49:00 | 12 0 23 34.3 |
|
| XRP/BTC | 35 | 0.66 | 22.96 | 0.00114897 | 11.48 | 3:49:00 | 12 0 23 34.3 |
|
||||||
| ZEC/BTC | 22 | -0.46 | -10.18 | -0.00050971 | -5.09 | 2:22:00 | 7 0 15 31.8 |
|
| ZEC/BTC | 22 | -0.46 | -10.18 | -0.00050971 | -5.09 | 2:22:00 | 7 0 15 31.8 |
|
||||||
| TOTAL | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 0 243 43.4 |
|
| TOTAL | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 0 243 43.4 |
|
||||||
========================================================= EXIT REASON STATS ==========================================================
|
|
||||||
| Exit Reason | Exits | Wins | Draws | Losses |
|
|
||||||
|:-------------------|--------:|------:|-------:|--------:|
|
|
||||||
| trailing_stop_loss | 205 | 150 | 0 | 55 |
|
|
||||||
| stop_loss | 166 | 0 | 0 | 166 |
|
|
||||||
| exit_signal | 56 | 36 | 0 | 20 |
|
|
||||||
| force_exit | 2 | 0 | 0 | 2 |
|
|
||||||
====================================================== LEFT OPEN TRADES REPORT ======================================================
|
====================================================== LEFT OPEN TRADES REPORT ======================================================
|
||||||
| Pair | Entries | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Win Draw Loss Win% |
|
| Pair | Entries | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Win Draw Loss Win% |
|
||||||
|:---------|---------:|---------------:|---------------:|-----------------:|---------------:|:---------------|--------------------:|
|
|:---------|---------:|---------------:|---------------:|-----------------:|---------------:|:---------------|--------------------:|
|
||||||
| ADA/BTC | 1 | 0.89 | 0.89 | 0.00004434 | 0.44 | 6:00:00 | 1 0 0 100 |
|
| ADA/BTC | 1 | 0.89 | 0.89 | 0.00004434 | 0.44 | 6:00:00 | 1 0 0 100 |
|
||||||
| LTC/BTC | 1 | 0.68 | 0.68 | 0.00003421 | 0.34 | 2:00:00 | 1 0 0 100 |
|
| LTC/BTC | 1 | 0.68 | 0.68 | 0.00003421 | 0.34 | 2:00:00 | 1 0 0 100 |
|
||||||
| TOTAL | 2 | 0.78 | 1.57 | 0.00007855 | 0.78 | 4:00:00 | 2 0 0 100 |
|
| TOTAL | 2 | 0.78 | 1.57 | 0.00007855 | 0.78 | 4:00:00 | 2 0 0 100 |
|
||||||
|
==================== EXIT REASON STATS ====================
|
||||||
|
| Exit Reason | Exits | Wins | Draws | Losses |
|
||||||
|
|:-------------------|--------:|------:|-------:|--------:|
|
||||||
|
| trailing_stop_loss | 205 | 150 | 0 | 55 |
|
||||||
|
| stop_loss | 166 | 0 | 0 | 166 |
|
||||||
|
| exit_signal | 56 | 36 | 0 | 20 |
|
||||||
|
| force_exit | 2 | 0 | 0 | 2 |
|
||||||
|
|
||||||
================== SUMMARY METRICS ==================
|
================== SUMMARY METRICS ==================
|
||||||
| Metric | Value |
|
| Metric | Value |
|
||||||
|-----------------------------+---------------------|
|
|-----------------------------+---------------------|
|
||||||
|
@ -236,3 +236,161 @@ If you want to predict multiple targets you must specify all labels in the same
|
|||||||
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
||||||
df['&s-up_or_down'] = np.where( df["close"].shift(-100) == df["close"], 'same', df['&s-up_or_down'])
|
df['&s-up_or_down'] = np.where( df["close"].shift(-100) == df["close"], 'same', df['&s-up_or_down'])
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## PyTorch Module
|
||||||
|
|
||||||
|
### Quick start
|
||||||
|
|
||||||
|
The easiest way to quickly run a pytorch model is with the following command (for regression task):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
freqtrade trade --config config_examples/config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel PyTorchMLPRegressor --strategy-path freqtrade/templates
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! note "Installation/docker"
|
||||||
|
The PyTorch module requires large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl or PyTorch (~700mb additional space required) [y/N]?".
|
||||||
|
Users who prefer docker should ensure they use the docker image appended with `_freqaitorch`.
|
||||||
|
|
||||||
|
### Structure
|
||||||
|
|
||||||
|
#### Model
|
||||||
|
|
||||||
|
You can construct your own Neural Network architecture in PyTorch by simply defining your `nn.Module` class inside your custom [`IFreqaiModel` file](#using-different-prediction-models) and then using that class in your `def train()` function. Here is an example of logistic regression model implementation using PyTorch (should be used with nn.BCELoss criterion) for classification tasks.
|
||||||
|
|
||||||
|
```python
|
||||||
|
|
||||||
|
class LogisticRegression(nn.Module):
|
||||||
|
def __init__(self, input_size: int):
|
||||||
|
super().__init__()
|
||||||
|
# Define your layers
|
||||||
|
self.linear = nn.Linear(input_size, 1)
|
||||||
|
self.activation = nn.Sigmoid()
|
||||||
|
|
||||||
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||||
|
# Define the forward pass
|
||||||
|
out = self.linear(x)
|
||||||
|
out = self.activation(out)
|
||||||
|
return out
|
||||||
|
|
||||||
|
class MyCoolPyTorchClassifier(BasePyTorchClassifier):
|
||||||
|
"""
|
||||||
|
This is a custom IFreqaiModel showing how a user might setup their own
|
||||||
|
custom Neural Network architecture for their training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def data_convertor(self) -> PyTorchDataConvertor:
|
||||||
|
return DefaultPyTorchDataConvertor(target_tensor_type=torch.float)
|
||||||
|
|
||||||
|
def __init__(self, **kwargs) -> None:
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
config = self.freqai_info.get("model_training_parameters", {})
|
||||||
|
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
||||||
|
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
|
||||||
|
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
|
||||||
|
|
||||||
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
|
"""
|
||||||
|
User sets up the training and test data to fit their desired model here
|
||||||
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
|
"""
|
||||||
|
|
||||||
|
class_names = self.get_class_names()
|
||||||
|
self.convert_label_column_to_int(data_dictionary, dk, class_names)
|
||||||
|
n_features = data_dictionary["train_features"].shape[-1]
|
||||||
|
model = LogisticRegression(
|
||||||
|
input_dim=n_features
|
||||||
|
)
|
||||||
|
model.to(self.device)
|
||||||
|
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
||||||
|
criterion = torch.nn.CrossEntropyLoss()
|
||||||
|
init_model = self.get_init_model(dk.pair)
|
||||||
|
trainer = PyTorchModelTrainer(
|
||||||
|
model=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
criterion=criterion,
|
||||||
|
model_meta_data={"class_names": class_names},
|
||||||
|
device=self.device,
|
||||||
|
init_model=init_model,
|
||||||
|
data_convertor=self.data_convertor,
|
||||||
|
**self.trainer_kwargs,
|
||||||
|
)
|
||||||
|
trainer.fit(data_dictionary, self.splits)
|
||||||
|
return trainer
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Trainer
|
||||||
|
|
||||||
|
The `PyTorchModelTrainer` performs the idiomatic PyTorch train loop:
|
||||||
|
Define our model, loss function, and optimizer, and then move them to the appropriate device (GPU or CPU). Inside the loop, we iterate through the batches in the dataloader, move the data to the device, compute the prediction and loss, backpropagate, and update the model parameters using the optimizer.
|
||||||
|
|
||||||
|
In addition, the trainer is responsible for the following:
|
||||||
|
- saving and loading the model
|
||||||
|
- converting the data from `pandas.DataFrame` to `torch.Tensor`.
|
||||||
|
|
||||||
|
#### Integration with Freqai module
|
||||||
|
|
||||||
|
Like all freqai models, PyTorch models inherit `IFreqaiModel`. `IFreqaiModel` declares three abstract methods: `train`, `fit`, and `predict`. we implement these methods in three levels of hierarchy.
|
||||||
|
From top to bottom:
|
||||||
|
|
||||||
|
1. `BasePyTorchModel` - Implements the `train` method. all `BasePyTorch*` inherit it. responsible for general data preparation (e.g., data normalization) and calling the `fit` method. Sets `device` attribute used by children classes. Sets `model_type` attribute used by the parent class.
|
||||||
|
2. `BasePyTorch*` - Implements the `predict` method. Here, the `*` represents a group of algorithms, such as classifiers or regressors. responsible for data preprocessing, predicting, and postprocessing if needed.
|
||||||
|
3. `PyTorch*Classifier` / `PyTorch*Regressor` - implements the `fit` method. responsible for the main train flaw, where we initialize the trainer and model objects.
|
||||||
|
|
||||||
|
![image](assets/freqai_pytorch-diagram.png)
|
||||||
|
|
||||||
|
#### Full example
|
||||||
|
|
||||||
|
Building a PyTorch regressor using MLP (multilayer perceptron) model, MSELoss criterion, and AdamW optimizer.
|
||||||
|
|
||||||
|
```python
|
||||||
|
class PyTorchMLPRegressor(BasePyTorchRegressor):
|
||||||
|
def __init__(self, **kwargs) -> None:
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
config = self.freqai_info.get("model_training_parameters", {})
|
||||||
|
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
||||||
|
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
|
||||||
|
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
|
||||||
|
|
||||||
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
|
n_features = data_dictionary["train_features"].shape[-1]
|
||||||
|
model = PyTorchMLPModel(
|
||||||
|
input_dim=n_features,
|
||||||
|
output_dim=1,
|
||||||
|
**self.model_kwargs
|
||||||
|
)
|
||||||
|
model.to(self.device)
|
||||||
|
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
||||||
|
criterion = torch.nn.MSELoss()
|
||||||
|
init_model = self.get_init_model(dk.pair)
|
||||||
|
trainer = PyTorchModelTrainer(
|
||||||
|
model=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
criterion=criterion,
|
||||||
|
device=self.device,
|
||||||
|
init_model=init_model,
|
||||||
|
target_tensor_type=torch.float,
|
||||||
|
**self.trainer_kwargs,
|
||||||
|
)
|
||||||
|
trainer.fit(data_dictionary)
|
||||||
|
return trainer
|
||||||
|
```
|
||||||
|
|
||||||
|
Here we create a `PyTorchMLPRegressor` class that implements the `fit` method. The `fit` method specifies the training building blocks: model, optimizer, criterion, and trainer. We inherit both `BasePyTorchRegressor` and `BasePyTorchModel`, where the former implements the `predict` method that is suitable for our regression task, and the latter implements the train method.
|
||||||
|
|
||||||
|
??? Note "Setting Class Names for Classifiers"
|
||||||
|
When using classifiers, the user must declare the class names (or targets) by overriding the `IFreqaiModel.class_names` attribute. This is achieved by setting `self.freqai.class_names` in the FreqAI strategy inside the `set_freqai_targets` method.
|
||||||
|
|
||||||
|
For example, if you are using a binary classifier to predict price movements as up or down, you can set the class names as follows:
|
||||||
|
```python
|
||||||
|
def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
|
||||||
|
self.freqai.class_names = ["down", "up"]
|
||||||
|
dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-100) >
|
||||||
|
dataframe["close"], 'up', 'down')
|
||||||
|
|
||||||
|
return dataframe
|
||||||
|
```
|
||||||
|
To see a full example, you can refer to the [classifier test strategy class](https://github.com/freqtrade/freqtrade/blob/develop/tests/strategy/strats/freqai_test_classifier.py).
|
||||||
|
@ -86,6 +86,27 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
|||||||
| `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting. <br> **Datatype:** bool. <br> Default: `False`.
|
| `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting. <br> **Datatype:** bool. <br> Default: `False`.
|
||||||
| `drop_ohlc_from_features` | Do not include the normalized ohlc data in the feature set passed to the agent during training (ohlc will still be used for driving the environment in all cases) <br> **Datatype:** Boolean. <br> **Default:** `False`
|
| `drop_ohlc_from_features` | Do not include the normalized ohlc data in the feature set passed to the agent during training (ohlc will still be used for driving the environment in all cases) <br> **Datatype:** Boolean. <br> **Default:** `False`
|
||||||
|
|
||||||
|
### PyTorch parameters
|
||||||
|
|
||||||
|
#### general
|
||||||
|
|
||||||
|
| Parameter | Description |
|
||||||
|
|------------|-------------|
|
||||||
|
| | **Model training parameters within the `freqai.model_training_parameters` sub dictionary**
|
||||||
|
| `learning_rate` | Learning rate to be passed to the optimizer. <br> **Datatype:** float. <br> Default: `3e-4`.
|
||||||
|
| `model_kwargs` | Parameters to be passed to the model class. <br> **Datatype:** dict. <br> Default: `{}`.
|
||||||
|
| `trainer_kwargs` | Parameters to be passed to the trainer class. <br> **Datatype:** dict. <br> Default: `{}`.
|
||||||
|
|
||||||
|
#### trainer_kwargs
|
||||||
|
|
||||||
|
| Parameter | Description |
|
||||||
|
|------------|-------------|
|
||||||
|
| | **Model training parameters within the `freqai.model_training_parameters.model_kwargs` sub dictionary**
|
||||||
|
| `max_iters` | The number of training iterations to run. iteration here refers to the number of times we call self.optimizer.step(). used to calculate n_epochs. <br> **Datatype:** int. <br> Default: `100`.
|
||||||
|
| `batch_size` | The size of the batches to use during training.. <br> **Datatype:** int. <br> Default: `64`.
|
||||||
|
| `max_n_eval_batches` | The maximum number batches to use for evaluation.. <br> **Datatype:** int, optional. <br> Default: `None`.
|
||||||
|
|
||||||
|
|
||||||
### Additional parameters
|
### Additional parameters
|
||||||
|
|
||||||
| Parameter | Description |
|
| Parameter | Description |
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
markdown==3.3.7
|
markdown==3.3.7
|
||||||
mkdocs==1.4.2
|
mkdocs==1.4.2
|
||||||
mkdocs-material==9.1.5
|
mkdocs-material==9.1.6
|
||||||
mdx_truly_sane_lists==1.3
|
mdx_truly_sane_lists==1.3
|
||||||
pymdown-extensions==9.10
|
pymdown-extensions==9.11
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
|
@ -9,9 +9,6 @@ This same command can also be used to update freqUI, should there be a new relea
|
|||||||
|
|
||||||
Once the bot is started in trade / dry-run mode (with `freqtrade trade`) - the UI will be available under the configured port below (usually `http://127.0.0.1:8080`).
|
Once the bot is started in trade / dry-run mode (with `freqtrade trade`) - the UI will be available under the configured port below (usually `http://127.0.0.1:8080`).
|
||||||
|
|
||||||
!!! info "Alpha release"
|
|
||||||
FreqUI is still considered an alpha release - if you encounter bugs or inconsistencies please open a [FreqUI issue](https://github.com/freqtrade/frequi/issues/new/choose).
|
|
||||||
|
|
||||||
!!! Note "developers"
|
!!! Note "developers"
|
||||||
Developers should not use this method, but instead use the method described in the [freqUI repository](https://github.com/freqtrade/frequi) to get the source-code of freqUI.
|
Developers should not use this method, but instead use the method described in the [freqUI repository](https://github.com/freqtrade/frequi) to get the source-code of freqUI.
|
||||||
|
|
||||||
|
@ -23,10 +23,22 @@ These modes can be configured with these values:
|
|||||||
'stoploss_on_exchange_limit_ratio': 0.99
|
'stoploss_on_exchange_limit_ratio': 0.99
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! Note
|
Stoploss on exchange is only supported for the following exchanges, and not all exchanges support both stop-limit and stop-market.
|
||||||
Stoploss on exchange is only supported for Binance (stop-loss-limit), Huobi (stop-limit), Kraken (stop-loss-market, stop-loss-limit), Gate (stop-limit), and Kucoin (stop-limit and stop-market) as of now.
|
The Order-type will be ignored if only one mode is available.
|
||||||
<ins>Do not set too low/tight stoploss value if using stop loss on exchange!</ins>
|
|
||||||
If set to low/tight then you have greater risk of missing fill on the order and stoploss will not work.
|
| Exchange | stop-loss type |
|
||||||
|
|----------|-------------|
|
||||||
|
| Binance | limit |
|
||||||
|
| Binance Futures | market, limit |
|
||||||
|
| Huobi | limit |
|
||||||
|
| kraken | market, limit |
|
||||||
|
| Gate | limit |
|
||||||
|
| Okx | limit |
|
||||||
|
| Kucoin | stop-limit, stop-market|
|
||||||
|
|
||||||
|
!!! Note "Tight stoploss"
|
||||||
|
<ins>Do not set too low/tight stoploss value when using stop loss on exchange!</ins>
|
||||||
|
If set to low/tight you will have greater risk of missing fill on the order and stoploss will not work.
|
||||||
|
|
||||||
### stoploss_on_exchange and stoploss_on_exchange_limit_ratio
|
### stoploss_on_exchange and stoploss_on_exchange_limit_ratio
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ class TimeRange:
|
|||||||
:param text: value from --timerange
|
:param text: value from --timerange
|
||||||
:return: Start and End range period
|
:return: Start and End range period
|
||||||
"""
|
"""
|
||||||
if text is None:
|
if not text:
|
||||||
return TimeRange(None, None, 0, 0)
|
return TimeRange(None, None, 0, 0)
|
||||||
syntax = [(r'^-(\d{8})$', (None, 'date')),
|
syntax = [(r'^-(\d{8})$', (None, 'date')),
|
||||||
(r'^(\d{8})-$', ('date', None)),
|
(r'^(\d{8})-$', ('date', None)),
|
||||||
|
@ -64,6 +64,7 @@ USERPATH_FREQAIMODELS = 'freqaimodels'
|
|||||||
TELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent']
|
TELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent']
|
||||||
WEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw']
|
WEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw']
|
||||||
FULL_DATAFRAME_THRESHOLD = 100
|
FULL_DATAFRAME_THRESHOLD = 100
|
||||||
|
CUSTOM_TAG_MAX_LENGTH = 255
|
||||||
|
|
||||||
ENV_VAR_PREFIX = 'FREQTRADE__'
|
ENV_VAR_PREFIX = 'FREQTRADE__'
|
||||||
|
|
||||||
|
@ -246,14 +246,8 @@ def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame:
|
|||||||
"""
|
"""
|
||||||
Compatibility support for older backtest data.
|
Compatibility support for older backtest data.
|
||||||
"""
|
"""
|
||||||
df['open_date'] = pd.to_datetime(df['open_date'],
|
df['open_date'] = pd.to_datetime(df['open_date'], utc=True)
|
||||||
utc=True,
|
df['close_date'] = pd.to_datetime(df['close_date'], utc=True)
|
||||||
infer_datetime_format=True
|
|
||||||
)
|
|
||||||
df['close_date'] = pd.to_datetime(df['close_date'],
|
|
||||||
utc=True,
|
|
||||||
infer_datetime_format=True
|
|
||||||
)
|
|
||||||
# Compatibility support for pre short Columns
|
# Compatibility support for pre short Columns
|
||||||
if 'is_short' not in df.columns:
|
if 'is_short' not in df.columns:
|
||||||
df['is_short'] = False
|
df['is_short'] = False
|
||||||
|
@ -34,7 +34,7 @@ def ohlcv_to_dataframe(ohlcv: list, timeframe: str, pair: str, *,
|
|||||||
cols = DEFAULT_DATAFRAME_COLUMNS
|
cols = DEFAULT_DATAFRAME_COLUMNS
|
||||||
df = DataFrame(ohlcv, columns=cols)
|
df = DataFrame(ohlcv, columns=cols)
|
||||||
|
|
||||||
df['date'] = to_datetime(df['date'], unit='ms', utc=True, infer_datetime_format=True)
|
df['date'] = to_datetime(df['date'], unit='ms', utc=True)
|
||||||
|
|
||||||
# Some exchanges return int values for Volume and even for OHLC.
|
# Some exchanges return int values for Volume and even for OHLC.
|
||||||
# Convert them since TA-LIB indicators used in the strategy assume floats
|
# Convert them since TA-LIB indicators used in the strategy assume floats
|
||||||
|
@ -63,10 +63,7 @@ class FeatherDataHandler(IDataHandler):
|
|||||||
pairdata.columns = self._columns
|
pairdata.columns = self._columns
|
||||||
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
|
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
|
||||||
'low': 'float', 'close': 'float', 'volume': 'float'})
|
'low': 'float', 'close': 'float', 'volume': 'float'})
|
||||||
pairdata['date'] = to_datetime(pairdata['date'],
|
pairdata['date'] = to_datetime(pairdata['date'], unit='ms', utc=True)
|
||||||
unit='ms',
|
|
||||||
utc=True,
|
|
||||||
infer_datetime_format=True)
|
|
||||||
return pairdata
|
return pairdata
|
||||||
|
|
||||||
def ohlcv_append(
|
def ohlcv_append(
|
||||||
|
@ -75,10 +75,7 @@ class JsonDataHandler(IDataHandler):
|
|||||||
return DataFrame(columns=self._columns)
|
return DataFrame(columns=self._columns)
|
||||||
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
|
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
|
||||||
'low': 'float', 'close': 'float', 'volume': 'float'})
|
'low': 'float', 'close': 'float', 'volume': 'float'})
|
||||||
pairdata['date'] = to_datetime(pairdata['date'],
|
pairdata['date'] = to_datetime(pairdata['date'], unit='ms', utc=True)
|
||||||
unit='ms',
|
|
||||||
utc=True,
|
|
||||||
infer_datetime_format=True)
|
|
||||||
return pairdata
|
return pairdata
|
||||||
|
|
||||||
def ohlcv_append(
|
def ohlcv_append(
|
||||||
|
@ -62,10 +62,7 @@ class ParquetDataHandler(IDataHandler):
|
|||||||
pairdata.columns = self._columns
|
pairdata.columns = self._columns
|
||||||
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
|
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
|
||||||
'low': 'float', 'close': 'float', 'volume': 'float'})
|
'low': 'float', 'close': 'float', 'volume': 'float'})
|
||||||
pairdata['date'] = to_datetime(pairdata['date'],
|
pairdata['date'] = to_datetime(pairdata['date'], unit='ms', utc=True)
|
||||||
unit='ms',
|
|
||||||
utc=True,
|
|
||||||
infer_datetime_format=True)
|
|
||||||
return pairdata
|
return pairdata
|
||||||
|
|
||||||
def ohlcv_append(
|
def ohlcv_append(
|
||||||
|
147
freqtrade/freqai/base_models/BasePyTorchClassifier.py
Normal file
147
freqtrade/freqai/base_models/BasePyTorchClassifier.py
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
import logging
|
||||||
|
from typing import Dict, List, Tuple
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import numpy.typing as npt
|
||||||
|
import pandas as pd
|
||||||
|
import torch
|
||||||
|
from pandas import DataFrame
|
||||||
|
from torch.nn import functional as F
|
||||||
|
|
||||||
|
from freqtrade.exceptions import OperationalException
|
||||||
|
from freqtrade.freqai.base_models.BasePyTorchModel import BasePyTorchModel
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BasePyTorchClassifier(BasePyTorchModel):
|
||||||
|
"""
|
||||||
|
A PyTorch implementation of a classifier.
|
||||||
|
User must implement fit method
|
||||||
|
|
||||||
|
Important!
|
||||||
|
|
||||||
|
- User must declare the target class names in the strategy,
|
||||||
|
under IStrategy.set_freqai_targets method.
|
||||||
|
|
||||||
|
for example, in your strategy:
|
||||||
|
```
|
||||||
|
def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
|
||||||
|
self.freqai.class_names = ["down", "up"]
|
||||||
|
dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-100) >
|
||||||
|
dataframe["close"], 'up', 'down')
|
||||||
|
|
||||||
|
return dataframe
|
||||||
|
"""
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
self.class_name_to_index = None
|
||||||
|
self.index_to_class_name = None
|
||||||
|
|
||||||
|
def predict(
|
||||||
|
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
|
||||||
|
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||||
|
"""
|
||||||
|
Filter the prediction features data and predict with it.
|
||||||
|
:param unfiltered_df: Full dataframe for the current backtest period.
|
||||||
|
:return:
|
||||||
|
:pred_df: dataframe containing the predictions
|
||||||
|
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
||||||
|
data (NaNs) or felt uncertain about data (PCA and DI index)
|
||||||
|
:raises ValueError: if 'class_names' doesn't exist in model meta_data.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class_names = self.model.model_meta_data.get("class_names", None)
|
||||||
|
if not class_names:
|
||||||
|
raise ValueError(
|
||||||
|
"Missing class names. "
|
||||||
|
"self.model.model_meta_data['class_names'] is None."
|
||||||
|
)
|
||||||
|
|
||||||
|
if not self.class_name_to_index:
|
||||||
|
self.init_class_names_to_index_mapping(class_names)
|
||||||
|
|
||||||
|
dk.find_features(unfiltered_df)
|
||||||
|
filtered_df, _ = dk.filter_features(
|
||||||
|
unfiltered_df, dk.training_features_list, training_filter=False
|
||||||
|
)
|
||||||
|
filtered_df = dk.normalize_data_from_metadata(filtered_df)
|
||||||
|
dk.data_dictionary["prediction_features"] = filtered_df
|
||||||
|
self.data_cleaning_predict(dk)
|
||||||
|
x = self.data_convertor.convert_x(
|
||||||
|
dk.data_dictionary["prediction_features"],
|
||||||
|
device=self.device
|
||||||
|
)
|
||||||
|
logits = self.model.model(x)
|
||||||
|
probs = F.softmax(logits, dim=-1)
|
||||||
|
predicted_classes = torch.argmax(probs, dim=-1)
|
||||||
|
predicted_classes_str = self.decode_class_names(predicted_classes)
|
||||||
|
pred_df_prob = DataFrame(probs.detach().numpy(), columns=class_names)
|
||||||
|
pred_df = DataFrame(predicted_classes_str, columns=[dk.label_list[0]])
|
||||||
|
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
|
||||||
|
return (pred_df, dk.do_predict)
|
||||||
|
|
||||||
|
def encode_class_names(
|
||||||
|
self,
|
||||||
|
data_dictionary: Dict[str, pd.DataFrame],
|
||||||
|
dk: FreqaiDataKitchen,
|
||||||
|
class_names: List[str],
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
encode class name, str -> int
|
||||||
|
assuming first column of *_labels data frame to be the target column
|
||||||
|
containing the class names
|
||||||
|
"""
|
||||||
|
|
||||||
|
target_column_name = dk.label_list[0]
|
||||||
|
for split in self.splits:
|
||||||
|
label_df = data_dictionary[f"{split}_labels"]
|
||||||
|
self.assert_valid_class_names(label_df[target_column_name], class_names)
|
||||||
|
label_df[target_column_name] = list(
|
||||||
|
map(lambda x: self.class_name_to_index[x], label_df[target_column_name])
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def assert_valid_class_names(
|
||||||
|
target_column: pd.Series,
|
||||||
|
class_names: List[str]
|
||||||
|
):
|
||||||
|
non_defined_labels = set(target_column) - set(class_names)
|
||||||
|
if len(non_defined_labels) != 0:
|
||||||
|
raise OperationalException(
|
||||||
|
f"Found non defined labels: {non_defined_labels}, ",
|
||||||
|
f"expecting labels: {class_names}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def decode_class_names(self, class_ints: torch.Tensor) -> List[str]:
|
||||||
|
"""
|
||||||
|
decode class name, int -> str
|
||||||
|
"""
|
||||||
|
|
||||||
|
return list(map(lambda x: self.index_to_class_name[x.item()], class_ints))
|
||||||
|
|
||||||
|
def init_class_names_to_index_mapping(self, class_names):
|
||||||
|
self.class_name_to_index = {s: i for i, s in enumerate(class_names)}
|
||||||
|
self.index_to_class_name = {i: s for i, s in enumerate(class_names)}
|
||||||
|
logger.info(f"encoded class name to index: {self.class_name_to_index}")
|
||||||
|
|
||||||
|
def convert_label_column_to_int(
|
||||||
|
self,
|
||||||
|
data_dictionary: Dict[str, pd.DataFrame],
|
||||||
|
dk: FreqaiDataKitchen,
|
||||||
|
class_names: List[str]
|
||||||
|
):
|
||||||
|
self.init_class_names_to_index_mapping(class_names)
|
||||||
|
self.encode_class_names(data_dictionary, dk, class_names)
|
||||||
|
|
||||||
|
def get_class_names(self) -> List[str]:
|
||||||
|
if not self.class_names:
|
||||||
|
raise ValueError(
|
||||||
|
"self.class_names is empty, "
|
||||||
|
"set self.freqai.class_names = ['class a', 'class b', 'class c'] "
|
||||||
|
"inside IStrategy.set_freqai_targets method."
|
||||||
|
)
|
||||||
|
|
||||||
|
return self.class_names
|
83
freqtrade/freqai/base_models/BasePyTorchModel.py
Normal file
83
freqtrade/freqai/base_models/BasePyTorchModel.py
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
import logging
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from time import time
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from pandas import DataFrame
|
||||||
|
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
from freqtrade.freqai.freqai_interface import IFreqaiModel
|
||||||
|
from freqtrade.freqai.torch.PyTorchDataConvertor import PyTorchDataConvertor
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BasePyTorchModel(IFreqaiModel, ABC):
|
||||||
|
"""
|
||||||
|
Base class for PyTorch type models.
|
||||||
|
User *must* inherit from this class and set fit() and predict() and
|
||||||
|
data_convertor property.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super().__init__(config=kwargs["config"])
|
||||||
|
self.dd.model_type = "pytorch"
|
||||||
|
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||||
|
test_size = self.freqai_info.get('data_split_parameters', {}).get('test_size')
|
||||||
|
self.splits = ["train", "test"] if test_size != 0 else ["train"]
|
||||||
|
|
||||||
|
def train(
|
||||||
|
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
|
||||||
|
) -> Any:
|
||||||
|
"""
|
||||||
|
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
||||||
|
for storing, saving, loading, and analyzing the data.
|
||||||
|
:param unfiltered_df: Full dataframe for the current training period
|
||||||
|
:return:
|
||||||
|
:model: Trained model which can be used to inference (self.predict)
|
||||||
|
"""
|
||||||
|
|
||||||
|
logger.info(f"-------------------- Starting training {pair} --------------------")
|
||||||
|
|
||||||
|
start_time = time()
|
||||||
|
|
||||||
|
features_filtered, labels_filtered = dk.filter_features(
|
||||||
|
unfiltered_df,
|
||||||
|
dk.training_features_list,
|
||||||
|
dk.label_list,
|
||||||
|
training_filter=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# split data into train/test data.
|
||||||
|
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
|
||||||
|
if not self.freqai_info.get("fit_live_predictions", 0) or not self.live:
|
||||||
|
dk.fit_labels()
|
||||||
|
# normalize all data based on train_dataset only
|
||||||
|
data_dictionary = dk.normalize_data(data_dictionary)
|
||||||
|
|
||||||
|
# optional additional data cleaning/analysis
|
||||||
|
self.data_cleaning_train(dk)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
|
||||||
|
)
|
||||||
|
logger.info(f"Training model on {len(data_dictionary['train_features'])} data points")
|
||||||
|
|
||||||
|
model = self.fit(data_dictionary, dk)
|
||||||
|
end_time = time()
|
||||||
|
|
||||||
|
logger.info(f"-------------------- Done training {pair} "
|
||||||
|
f"({end_time - start_time:.2f} secs) --------------------")
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def data_convertor(self) -> PyTorchDataConvertor:
|
||||||
|
"""
|
||||||
|
a class responsible for converting `*_features` & `*_labels` pandas dataframes
|
||||||
|
to pytorch tensors.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError("Abstract property")
|
49
freqtrade/freqai/base_models/BasePyTorchRegressor.py
Normal file
49
freqtrade/freqai/base_models/BasePyTorchRegressor.py
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
import logging
|
||||||
|
from typing import Tuple
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import numpy.typing as npt
|
||||||
|
from pandas import DataFrame
|
||||||
|
|
||||||
|
from freqtrade.freqai.base_models.BasePyTorchModel import BasePyTorchModel
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BasePyTorchRegressor(BasePyTorchModel):
|
||||||
|
"""
|
||||||
|
A PyTorch implementation of a regressor.
|
||||||
|
User must implement fit method
|
||||||
|
"""
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
|
def predict(
|
||||||
|
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
|
||||||
|
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||||
|
"""
|
||||||
|
Filter the prediction features data and predict with it.
|
||||||
|
:param unfiltered_df: Full dataframe for the current backtest period.
|
||||||
|
:return:
|
||||||
|
:pred_df: dataframe containing the predictions
|
||||||
|
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
||||||
|
data (NaNs) or felt uncertain about data (PCA and DI index)
|
||||||
|
"""
|
||||||
|
|
||||||
|
dk.find_features(unfiltered_df)
|
||||||
|
filtered_df, _ = dk.filter_features(
|
||||||
|
unfiltered_df, dk.training_features_list, training_filter=False
|
||||||
|
)
|
||||||
|
filtered_df = dk.normalize_data_from_metadata(filtered_df)
|
||||||
|
dk.data_dictionary["prediction_features"] = filtered_df
|
||||||
|
|
||||||
|
self.data_cleaning_predict(dk)
|
||||||
|
x = self.data_convertor.convert_x(
|
||||||
|
dk.data_dictionary["prediction_features"],
|
||||||
|
device=self.device
|
||||||
|
)
|
||||||
|
y = self.model.model(x)
|
||||||
|
pred_df = DataFrame(y.detach().numpy(), columns=[dk.label_list[0]])
|
||||||
|
return (pred_df, dk.do_predict)
|
@ -446,7 +446,7 @@ class FreqaiDataDrawer:
|
|||||||
dump(model, save_path / f"{dk.model_filename}_model.joblib")
|
dump(model, save_path / f"{dk.model_filename}_model.joblib")
|
||||||
elif self.model_type == 'keras':
|
elif self.model_type == 'keras':
|
||||||
model.save(save_path / f"{dk.model_filename}_model.h5")
|
model.save(save_path / f"{dk.model_filename}_model.h5")
|
||||||
elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type:
|
elif self.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]:
|
||||||
model.save(save_path / f"{dk.model_filename}_model.zip")
|
model.save(save_path / f"{dk.model_filename}_model.zip")
|
||||||
|
|
||||||
if dk.svm_model is not None:
|
if dk.svm_model is not None:
|
||||||
@ -496,7 +496,7 @@ class FreqaiDataDrawer:
|
|||||||
dk.training_features_list = dk.data["training_features_list"]
|
dk.training_features_list = dk.data["training_features_list"]
|
||||||
dk.label_list = dk.data["label_list"]
|
dk.label_list = dk.data["label_list"]
|
||||||
|
|
||||||
def load_data(self, coin: str, dk: FreqaiDataKitchen) -> Any:
|
def load_data(self, coin: str, dk: FreqaiDataKitchen) -> Any: # noqa: C901
|
||||||
"""
|
"""
|
||||||
loads all data required to make a prediction on a sub-train time range
|
loads all data required to make a prediction on a sub-train time range
|
||||||
:returns:
|
:returns:
|
||||||
@ -537,6 +537,11 @@ class FreqaiDataDrawer:
|
|||||||
self.model_type, self.freqai_info['rl_config']['model_type'])
|
self.model_type, self.freqai_info['rl_config']['model_type'])
|
||||||
MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type'])
|
MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type'])
|
||||||
model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model")
|
model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model")
|
||||||
|
elif self.model_type == 'pytorch':
|
||||||
|
import torch
|
||||||
|
zip = torch.load(dk.data_path / f"{dk.model_filename}_model.zip")
|
||||||
|
model = zip["pytrainer"]
|
||||||
|
model = model.load_from_checkpoint(zip)
|
||||||
|
|
||||||
if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file():
|
if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file():
|
||||||
dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib")
|
dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib")
|
||||||
|
@ -1291,7 +1291,7 @@ class FreqaiDataKitchen:
|
|||||||
|
|
||||||
return dataframe
|
return dataframe
|
||||||
|
|
||||||
def use_strategy_to_populate_indicators(
|
def use_strategy_to_populate_indicators( # noqa: C901
|
||||||
self,
|
self,
|
||||||
strategy: IStrategy,
|
strategy: IStrategy,
|
||||||
corr_dataframes: dict = {},
|
corr_dataframes: dict = {},
|
||||||
@ -1362,12 +1362,12 @@ class FreqaiDataKitchen:
|
|||||||
dataframe = self.populate_features(dataframe.copy(), corr_pair, strategy,
|
dataframe = self.populate_features(dataframe.copy(), corr_pair, strategy,
|
||||||
corr_dataframes, base_dataframes, True)
|
corr_dataframes, base_dataframes, True)
|
||||||
|
|
||||||
|
if self.live:
|
||||||
dataframe = strategy.set_freqai_targets(dataframe.copy(), metadata=metadata)
|
dataframe = strategy.set_freqai_targets(dataframe.copy(), metadata=metadata)
|
||||||
|
dataframe = self.remove_special_chars_from_feature_names(dataframe)
|
||||||
|
|
||||||
self.get_unique_classes_from_labels(dataframe)
|
self.get_unique_classes_from_labels(dataframe)
|
||||||
|
|
||||||
dataframe = self.remove_special_chars_from_feature_names(dataframe)
|
|
||||||
|
|
||||||
if self.config.get('reduce_df_footprint', False):
|
if self.config.get('reduce_df_footprint', False):
|
||||||
dataframe = reduce_dataframe_footprint(dataframe)
|
dataframe = reduce_dataframe_footprint(dataframe)
|
||||||
|
|
||||||
|
@ -83,6 +83,7 @@ class IFreqaiModel(ABC):
|
|||||||
self.CONV_WIDTH = self.freqai_info.get('conv_width', 1)
|
self.CONV_WIDTH = self.freqai_info.get('conv_width', 1)
|
||||||
if self.ft_params.get("inlier_metric_window", 0):
|
if self.ft_params.get("inlier_metric_window", 0):
|
||||||
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
|
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
|
||||||
|
self.class_names: List[str] = [] # used in classification subclasses
|
||||||
self.pair_it = 0
|
self.pair_it = 0
|
||||||
self.pair_it_train = 0
|
self.pair_it_train = 0
|
||||||
self.total_pairs = len(self.config.get("exchange", {}).get("pair_whitelist"))
|
self.total_pairs = len(self.config.get("exchange", {}).get("pair_whitelist"))
|
||||||
@ -306,7 +307,7 @@ class IFreqaiModel(ABC):
|
|||||||
if check_features:
|
if check_features:
|
||||||
self.dd.load_metadata(dk)
|
self.dd.load_metadata(dk)
|
||||||
dataframe_dummy_features = self.dk.use_strategy_to_populate_indicators(
|
dataframe_dummy_features = self.dk.use_strategy_to_populate_indicators(
|
||||||
strategy, prediction_dataframe=dataframe.tail(1), pair=metadata["pair"]
|
strategy, prediction_dataframe=dataframe.tail(1), pair=pair
|
||||||
)
|
)
|
||||||
dk.find_features(dataframe_dummy_features)
|
dk.find_features(dataframe_dummy_features)
|
||||||
self.check_if_feature_list_matches_strategy(dk)
|
self.check_if_feature_list_matches_strategy(dk)
|
||||||
@ -316,7 +317,7 @@ class IFreqaiModel(ABC):
|
|||||||
else:
|
else:
|
||||||
if populate_indicators:
|
if populate_indicators:
|
||||||
dataframe = self.dk.use_strategy_to_populate_indicators(
|
dataframe = self.dk.use_strategy_to_populate_indicators(
|
||||||
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
|
strategy, prediction_dataframe=dataframe, pair=pair
|
||||||
)
|
)
|
||||||
populate_indicators = False
|
populate_indicators = False
|
||||||
|
|
||||||
@ -332,6 +333,10 @@ class IFreqaiModel(ABC):
|
|||||||
dataframe_train = dk.slice_dataframe(tr_train, dataframe_base_train)
|
dataframe_train = dk.slice_dataframe(tr_train, dataframe_base_train)
|
||||||
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe_base_backtest)
|
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe_base_backtest)
|
||||||
|
|
||||||
|
dataframe_train = dk.remove_special_chars_from_feature_names(dataframe_train)
|
||||||
|
dataframe_backtest = dk.remove_special_chars_from_feature_names(dataframe_backtest)
|
||||||
|
dk.get_unique_classes_from_labels(dataframe_train)
|
||||||
|
|
||||||
if not self.model_exists(dk):
|
if not self.model_exists(dk):
|
||||||
dk.find_features(dataframe_train)
|
dk.find_features(dataframe_train)
|
||||||
dk.find_labels(dataframe_train)
|
dk.find_labels(dataframe_train)
|
||||||
@ -567,8 +572,9 @@ class IFreqaiModel(ABC):
|
|||||||
file_type = ".joblib"
|
file_type = ".joblib"
|
||||||
elif self.dd.model_type == 'keras':
|
elif self.dd.model_type == 'keras':
|
||||||
file_type = ".h5"
|
file_type = ".h5"
|
||||||
elif 'stable_baselines' in self.dd.model_type or 'sb3_contrib' == self.dd.model_type:
|
elif self.dd.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]:
|
||||||
file_type = ".zip"
|
file_type = ".zip"
|
||||||
|
|
||||||
path_to_modelfile = Path(dk.data_path / f"{dk.model_filename}_model{file_type}")
|
path_to_modelfile = Path(dk.data_path / f"{dk.model_filename}_model{file_type}")
|
||||||
file_exists = path_to_modelfile.is_file()
|
file_exists = path_to_modelfile.is_file()
|
||||||
if file_exists:
|
if file_exists:
|
||||||
|
@ -14,16 +14,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class CatboostClassifier(BaseClassifierModel):
|
class CatboostClassifier(BaseClassifierModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
all the training and test data/labels.
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
"""
|
"""
|
||||||
|
|
||||||
train_data = Pool(
|
train_data = Pool(
|
||||||
|
@ -15,16 +15,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class CatboostClassifierMultiTarget(BaseClassifierModel):
|
class CatboostClassifierMultiTarget(BaseClassifierModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
all the training and test data/labels.
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cbc = CatBoostClassifier(
|
cbc = CatBoostClassifier(
|
||||||
|
@ -14,16 +14,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class CatboostRegressor(BaseRegressionModel):
|
class CatboostRegressor(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
all the training and test data/labels.
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
"""
|
"""
|
||||||
|
|
||||||
train_data = Pool(
|
train_data = Pool(
|
||||||
|
@ -15,16 +15,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class CatboostRegressorMultiTarget(BaseRegressionModel):
|
class CatboostRegressorMultiTarget(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
all the training and test data/labels.
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cbr = CatBoostRegressor(
|
cbr = CatBoostRegressor(
|
||||||
|
@ -12,16 +12,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class LightGBMClassifier(BaseClassifierModel):
|
class LightGBMClassifier(BaseClassifierModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
all the training and test data/labels.
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
|
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
|
||||||
|
@ -13,16 +13,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class LightGBMClassifierMultiTarget(BaseClassifierModel):
|
class LightGBMClassifierMultiTarget(BaseClassifierModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
all the training and test data/labels.
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
"""
|
"""
|
||||||
|
|
||||||
lgb = LGBMClassifier(**self.model_training_parameters)
|
lgb = LGBMClassifier(**self.model_training_parameters)
|
||||||
|
@ -12,18 +12,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class LightGBMRegressor(BaseRegressionModel):
|
class LightGBMRegressor(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
Most regressors use the same function names and arguments e.g. user
|
User sets up the training and test data to fit their desired model here
|
||||||
can drop in LGBMRegressor in place of CatBoostRegressor and all data
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
management will be properly handled by Freqai.
|
labels, weights
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param dk: The datakitchen object for the current coin/model
|
||||||
all the training and test data/labels.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
|
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
|
||||||
|
@ -13,16 +13,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class LightGBMRegressorMultiTarget(BaseRegressionModel):
|
class LightGBMRegressorMultiTarget(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
all the training and test data/labels.
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
"""
|
"""
|
||||||
|
|
||||||
lgb = LGBMRegressor(**self.model_training_parameters)
|
lgb = LGBMRegressor(**self.model_training_parameters)
|
||||||
|
89
freqtrade/freqai/prediction_models/PyTorchMLPClassifier.py
Normal file
89
freqtrade/freqai/prediction_models/PyTorchMLPClassifier.py
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from freqtrade.freqai.base_models.BasePyTorchClassifier import BasePyTorchClassifier
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
from freqtrade.freqai.torch.PyTorchDataConvertor import (DefaultPyTorchDataConvertor,
|
||||||
|
PyTorchDataConvertor)
|
||||||
|
from freqtrade.freqai.torch.PyTorchMLPModel import PyTorchMLPModel
|
||||||
|
from freqtrade.freqai.torch.PyTorchModelTrainer import PyTorchModelTrainer
|
||||||
|
|
||||||
|
|
||||||
|
class PyTorchMLPClassifier(BasePyTorchClassifier):
|
||||||
|
"""
|
||||||
|
This class implements the fit method of IFreqaiModel.
|
||||||
|
in the fit method we initialize the model and trainer objects.
|
||||||
|
the only requirement from the model is to be aligned to PyTorchClassifier
|
||||||
|
predict method that expects the model to predict a tensor of type long.
|
||||||
|
|
||||||
|
parameters are passed via `model_training_parameters` under the freqai
|
||||||
|
section in the config file. e.g:
|
||||||
|
{
|
||||||
|
...
|
||||||
|
"freqai": {
|
||||||
|
...
|
||||||
|
"model_training_parameters" : {
|
||||||
|
"learning_rate": 3e-4,
|
||||||
|
"trainer_kwargs": {
|
||||||
|
"max_iters": 5000,
|
||||||
|
"batch_size": 64,
|
||||||
|
"max_n_eval_batches": null,
|
||||||
|
},
|
||||||
|
"model_kwargs": {
|
||||||
|
"hidden_dim": 512,
|
||||||
|
"dropout_percent": 0.2,
|
||||||
|
"n_layer": 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def data_convertor(self) -> PyTorchDataConvertor:
|
||||||
|
return DefaultPyTorchDataConvertor(
|
||||||
|
target_tensor_type=torch.long,
|
||||||
|
squeeze_target_tensor=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, **kwargs) -> None:
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
config = self.freqai_info.get("model_training_parameters", {})
|
||||||
|
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
||||||
|
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
|
||||||
|
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
|
||||||
|
|
||||||
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
|
"""
|
||||||
|
User sets up the training and test data to fit their desired model here
|
||||||
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
|
:raises ValueError: If self.class_names is not defined in the parent class.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class_names = self.get_class_names()
|
||||||
|
self.convert_label_column_to_int(data_dictionary, dk, class_names)
|
||||||
|
n_features = data_dictionary["train_features"].shape[-1]
|
||||||
|
model = PyTorchMLPModel(
|
||||||
|
input_dim=n_features,
|
||||||
|
output_dim=len(class_names),
|
||||||
|
**self.model_kwargs
|
||||||
|
)
|
||||||
|
model.to(self.device)
|
||||||
|
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
||||||
|
criterion = torch.nn.CrossEntropyLoss()
|
||||||
|
init_model = self.get_init_model(dk.pair)
|
||||||
|
trainer = PyTorchModelTrainer(
|
||||||
|
model=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
criterion=criterion,
|
||||||
|
model_meta_data={"class_names": class_names},
|
||||||
|
device=self.device,
|
||||||
|
init_model=init_model,
|
||||||
|
data_convertor=self.data_convertor,
|
||||||
|
**self.trainer_kwargs,
|
||||||
|
)
|
||||||
|
trainer.fit(data_dictionary, self.splits)
|
||||||
|
return trainer
|
83
freqtrade/freqai/prediction_models/PyTorchMLPRegressor.py
Normal file
83
freqtrade/freqai/prediction_models/PyTorchMLPRegressor.py
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from freqtrade.freqai.base_models.BasePyTorchRegressor import BasePyTorchRegressor
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
from freqtrade.freqai.torch.PyTorchDataConvertor import (DefaultPyTorchDataConvertor,
|
||||||
|
PyTorchDataConvertor)
|
||||||
|
from freqtrade.freqai.torch.PyTorchMLPModel import PyTorchMLPModel
|
||||||
|
from freqtrade.freqai.torch.PyTorchModelTrainer import PyTorchModelTrainer
|
||||||
|
|
||||||
|
|
||||||
|
class PyTorchMLPRegressor(BasePyTorchRegressor):
|
||||||
|
"""
|
||||||
|
This class implements the fit method of IFreqaiModel.
|
||||||
|
in the fit method we initialize the model and trainer objects.
|
||||||
|
the only requirement from the model is to be aligned to PyTorchRegressor
|
||||||
|
predict method that expects the model to predict tensor of type float.
|
||||||
|
the trainer defines the training loop.
|
||||||
|
|
||||||
|
parameters are passed via `model_training_parameters` under the freqai
|
||||||
|
section in the config file. e.g:
|
||||||
|
{
|
||||||
|
...
|
||||||
|
"freqai": {
|
||||||
|
...
|
||||||
|
"model_training_parameters" : {
|
||||||
|
"learning_rate": 3e-4,
|
||||||
|
"trainer_kwargs": {
|
||||||
|
"max_iters": 5000,
|
||||||
|
"batch_size": 64,
|
||||||
|
"max_n_eval_batches": null,
|
||||||
|
},
|
||||||
|
"model_kwargs": {
|
||||||
|
"hidden_dim": 512,
|
||||||
|
"dropout_percent": 0.2,
|
||||||
|
"n_layer": 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def data_convertor(self) -> PyTorchDataConvertor:
|
||||||
|
return DefaultPyTorchDataConvertor(target_tensor_type=torch.float)
|
||||||
|
|
||||||
|
def __init__(self, **kwargs) -> None:
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
config = self.freqai_info.get("model_training_parameters", {})
|
||||||
|
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
||||||
|
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
|
||||||
|
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
|
||||||
|
|
||||||
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
|
"""
|
||||||
|
User sets up the training and test data to fit their desired model here
|
||||||
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
|
"""
|
||||||
|
|
||||||
|
n_features = data_dictionary["train_features"].shape[-1]
|
||||||
|
model = PyTorchMLPModel(
|
||||||
|
input_dim=n_features,
|
||||||
|
output_dim=1,
|
||||||
|
**self.model_kwargs
|
||||||
|
)
|
||||||
|
model.to(self.device)
|
||||||
|
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
||||||
|
criterion = torch.nn.MSELoss()
|
||||||
|
init_model = self.get_init_model(dk.pair)
|
||||||
|
trainer = PyTorchModelTrainer(
|
||||||
|
model=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
criterion=criterion,
|
||||||
|
device=self.device,
|
||||||
|
init_model=init_model,
|
||||||
|
data_convertor=self.data_convertor,
|
||||||
|
**self.trainer_kwargs,
|
||||||
|
)
|
||||||
|
trainer.fit(data_dictionary, self.splits)
|
||||||
|
return trainer
|
@ -18,16 +18,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class XGBoostClassifier(BaseClassifierModel):
|
class XGBoostClassifier(BaseClassifierModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
all the training and test data/labels.
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
"""
|
"""
|
||||||
|
|
||||||
X = data_dictionary["train_features"].to_numpy()
|
X = data_dictionary["train_features"].to_numpy()
|
||||||
|
@ -18,16 +18,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class XGBoostRFClassifier(BaseClassifierModel):
|
class XGBoostRFClassifier(BaseClassifierModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
all the training and test data/labels.
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
"""
|
"""
|
||||||
|
|
||||||
X = data_dictionary["train_features"].to_numpy()
|
X = data_dictionary["train_features"].to_numpy()
|
||||||
|
@ -12,16 +12,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class XGBoostRFRegressor(BaseRegressionModel):
|
class XGBoostRFRegressor(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
all the training and test data/labels.
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
"""
|
"""
|
||||||
|
|
||||||
X = data_dictionary["train_features"]
|
X = data_dictionary["train_features"]
|
||||||
|
@ -12,16 +12,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class XGBoostRegressor(BaseRegressionModel):
|
class XGBoostRegressor(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
all the training and test data/labels.
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
"""
|
"""
|
||||||
|
|
||||||
X = data_dictionary["train_features"]
|
X = data_dictionary["train_features"]
|
||||||
|
@ -13,16 +13,20 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class XGBoostRegressorMultiTarget(BaseRegressionModel):
|
class XGBoostRegressorMultiTarget(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class needs to override three necessary
|
User created prediction model. The class inherits IFreqaiModel, which
|
||||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
means it has full access to all Frequency AI functionality. Typically,
|
||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
users would use this to override the common `fit()`, `train()`, or
|
||||||
|
`predict()` methods to add their custom data handling tools or change
|
||||||
|
various aspects of the training that cannot be configured via the
|
||||||
|
top level config.json file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
all the training and test data/labels.
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
"""
|
"""
|
||||||
|
|
||||||
xgb = XGBRegressor(**self.model_training_parameters)
|
xgb = XGBRegressor(**self.model_training_parameters)
|
||||||
|
67
freqtrade/freqai/torch/PyTorchDataConvertor.py
Normal file
67
freqtrade/freqai/torch/PyTorchDataConvertor.py
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
class PyTorchDataConvertor(ABC):
|
||||||
|
"""
|
||||||
|
This class is responsible for converting `*_features` & `*_labels` pandas dataframes
|
||||||
|
to pytorch tensors.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
|
||||||
|
"""
|
||||||
|
:param df: "*_features" dataframe.
|
||||||
|
:param device: The device to use for training (e.g. 'cpu', 'cuda').
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
|
||||||
|
"""
|
||||||
|
:param df: "*_labels" dataframe.
|
||||||
|
:param device: The device to use for training (e.g. 'cpu', 'cuda').
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class DefaultPyTorchDataConvertor(PyTorchDataConvertor):
|
||||||
|
"""
|
||||||
|
A default conversion that keeps features dataframe shapes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
target_tensor_type: Optional[torch.dtype] = None,
|
||||||
|
squeeze_target_tensor: bool = False
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
:param target_tensor_type: type of target tensor, for classification use
|
||||||
|
torch.long, for regressor use torch.float or torch.double.
|
||||||
|
:param squeeze_target_tensor: controls the target shape, used for loss functions
|
||||||
|
that requires 0D or 1D.
|
||||||
|
"""
|
||||||
|
self._target_tensor_type = target_tensor_type
|
||||||
|
self._squeeze_target_tensor = squeeze_target_tensor
|
||||||
|
|
||||||
|
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
|
||||||
|
x = torch.from_numpy(df.values).float()
|
||||||
|
if device:
|
||||||
|
x = x.to(device)
|
||||||
|
|
||||||
|
return [x]
|
||||||
|
|
||||||
|
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
|
||||||
|
y = torch.from_numpy(df.values)
|
||||||
|
|
||||||
|
if self._target_tensor_type:
|
||||||
|
y = y.to(self._target_tensor_type)
|
||||||
|
|
||||||
|
if self._squeeze_target_tensor:
|
||||||
|
y = y.squeeze()
|
||||||
|
|
||||||
|
if device:
|
||||||
|
y = y.to(device)
|
||||||
|
|
||||||
|
return [y]
|
97
freqtrade/freqai/torch/PyTorchMLPModel.py
Normal file
97
freqtrade/freqai/torch/PyTorchMLPModel.py
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
import logging
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from torch import nn
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PyTorchMLPModel(nn.Module):
|
||||||
|
"""
|
||||||
|
A multi-layer perceptron (MLP) model implemented using PyTorch.
|
||||||
|
|
||||||
|
This class mainly serves as a simple example for the integration of PyTorch model's
|
||||||
|
to freqai. It is not optimized at all and should not be used for production purposes.
|
||||||
|
|
||||||
|
:param input_dim: The number of input features. This parameter specifies the number
|
||||||
|
of features in the input data that the MLP will use to make predictions.
|
||||||
|
:param output_dim: The number of output classes. This parameter specifies the number
|
||||||
|
of classes that the MLP will predict.
|
||||||
|
:param hidden_dim: The number of hidden units in each layer. This parameter controls
|
||||||
|
the complexity of the MLP and determines how many nonlinear relationships the MLP
|
||||||
|
can represent. Increasing the number of hidden units can increase the capacity of
|
||||||
|
the MLP to model complex patterns, but it also increases the risk of overfitting
|
||||||
|
the training data. Default: 256
|
||||||
|
:param dropout_percent: The dropout rate for regularization. This parameter specifies
|
||||||
|
the probability of dropping out a neuron during training to prevent overfitting.
|
||||||
|
The dropout rate should be tuned carefully to balance between underfitting and
|
||||||
|
overfitting. Default: 0.2
|
||||||
|
:param n_layer: The number of layers in the MLP. This parameter specifies the number
|
||||||
|
of layers in the MLP architecture. Adding more layers to the MLP can increase its
|
||||||
|
capacity to model complex patterns, but it also increases the risk of overfitting
|
||||||
|
the training data. Default: 1
|
||||||
|
|
||||||
|
:returns: The output of the MLP, with shape (batch_size, output_dim)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, input_dim: int, output_dim: int, **kwargs):
|
||||||
|
super().__init__()
|
||||||
|
hidden_dim: int = kwargs.get("hidden_dim", 256)
|
||||||
|
dropout_percent: int = kwargs.get("dropout_percent", 0.2)
|
||||||
|
n_layer: int = kwargs.get("n_layer", 1)
|
||||||
|
self.input_layer = nn.Linear(input_dim, hidden_dim)
|
||||||
|
self.blocks = nn.Sequential(*[Block(hidden_dim, dropout_percent) for _ in range(n_layer)])
|
||||||
|
self.output_layer = nn.Linear(hidden_dim, output_dim)
|
||||||
|
self.relu = nn.ReLU()
|
||||||
|
self.dropout = nn.Dropout(p=dropout_percent)
|
||||||
|
|
||||||
|
def forward(self, tensors: List[torch.Tensor]) -> torch.Tensor:
|
||||||
|
x: torch.Tensor = tensors[0]
|
||||||
|
x = self.relu(self.input_layer(x))
|
||||||
|
x = self.dropout(x)
|
||||||
|
x = self.blocks(x)
|
||||||
|
x = self.output_layer(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class Block(nn.Module):
|
||||||
|
"""
|
||||||
|
A building block for a multi-layer perceptron (MLP).
|
||||||
|
|
||||||
|
:param hidden_dim: The number of hidden units in the feedforward network.
|
||||||
|
:param dropout_percent: The dropout rate for regularization.
|
||||||
|
|
||||||
|
:returns: torch.Tensor. with shape (batch_size, hidden_dim)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, hidden_dim: int, dropout_percent: int):
|
||||||
|
super().__init__()
|
||||||
|
self.ff = FeedForward(hidden_dim)
|
||||||
|
self.dropout = nn.Dropout(p=dropout_percent)
|
||||||
|
self.ln = nn.LayerNorm(hidden_dim)
|
||||||
|
|
||||||
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||||
|
x = self.ff(self.ln(x))
|
||||||
|
x = self.dropout(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class FeedForward(nn.Module):
|
||||||
|
"""
|
||||||
|
A simple fully-connected feedforward neural network block.
|
||||||
|
|
||||||
|
:param hidden_dim: The number of hidden units in the block.
|
||||||
|
:return: torch.Tensor. with shape (batch_size, hidden_dim)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, hidden_dim: int):
|
||||||
|
super().__init__()
|
||||||
|
self.net = nn.Sequential(
|
||||||
|
nn.Linear(hidden_dim, hidden_dim),
|
||||||
|
nn.ReLU(),
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||||
|
return self.net(x)
|
208
freqtrade/freqai/torch/PyTorchModelTrainer.py
Normal file
208
freqtrade/freqai/torch/PyTorchModelTrainer.py
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
import logging
|
||||||
|
import math
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import torch
|
||||||
|
from torch import nn
|
||||||
|
from torch.optim import Optimizer
|
||||||
|
from torch.utils.data import DataLoader, TensorDataset
|
||||||
|
|
||||||
|
from freqtrade.freqai.torch.PyTorchDataConvertor import PyTorchDataConvertor
|
||||||
|
from freqtrade.freqai.torch.PyTorchTrainerInterface import PyTorchTrainerInterface
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PyTorchModelTrainer(PyTorchTrainerInterface):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model: nn.Module,
|
||||||
|
optimizer: Optimizer,
|
||||||
|
criterion: nn.Module,
|
||||||
|
device: str,
|
||||||
|
init_model: Dict,
|
||||||
|
data_convertor: PyTorchDataConvertor,
|
||||||
|
model_meta_data: Dict[str, Any] = {},
|
||||||
|
**kwargs
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
:param model: The PyTorch model to be trained.
|
||||||
|
:param optimizer: The optimizer to use for training.
|
||||||
|
:param criterion: The loss function to use for training.
|
||||||
|
:param device: The device to use for training (e.g. 'cpu', 'cuda').
|
||||||
|
:param init_model: A dictionary containing the initial model/optimizer
|
||||||
|
state_dict and model_meta_data saved by self.save() method.
|
||||||
|
:param model_meta_data: Additional metadata about the model (optional).
|
||||||
|
:param data_convertor: convertor from pd.DataFrame to torch.tensor.
|
||||||
|
:param max_iters: The number of training iterations to run.
|
||||||
|
iteration here refers to the number of times we call
|
||||||
|
self.optimizer.step(). used to calculate n_epochs.
|
||||||
|
:param batch_size: The size of the batches to use during training.
|
||||||
|
:param max_n_eval_batches: The maximum number batches to use for evaluation.
|
||||||
|
"""
|
||||||
|
self.model = model
|
||||||
|
self.optimizer = optimizer
|
||||||
|
self.criterion = criterion
|
||||||
|
self.model_meta_data = model_meta_data
|
||||||
|
self.device = device
|
||||||
|
self.max_iters: int = kwargs.get("max_iters", 100)
|
||||||
|
self.batch_size: int = kwargs.get("batch_size", 64)
|
||||||
|
self.max_n_eval_batches: Optional[int] = kwargs.get("max_n_eval_batches", None)
|
||||||
|
self.data_convertor = data_convertor
|
||||||
|
if init_model:
|
||||||
|
self.load_from_checkpoint(init_model)
|
||||||
|
|
||||||
|
def fit(self, data_dictionary: Dict[str, pd.DataFrame], splits: List[str]):
|
||||||
|
"""
|
||||||
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
|
all the training and test data/labels.
|
||||||
|
:param splits: splits to use in training, splits must contain "train",
|
||||||
|
optional "test" could be added by setting freqai.data_split_parameters.test_size > 0
|
||||||
|
in the config file.
|
||||||
|
|
||||||
|
- Calculates the predicted output for the batch using the PyTorch model.
|
||||||
|
- Calculates the loss between the predicted and actual output using a loss function.
|
||||||
|
- Computes the gradients of the loss with respect to the model's parameters using
|
||||||
|
backpropagation.
|
||||||
|
- Updates the model's parameters using an optimizer.
|
||||||
|
"""
|
||||||
|
data_loaders_dictionary = self.create_data_loaders_dictionary(data_dictionary, splits)
|
||||||
|
epochs = self.calc_n_epochs(
|
||||||
|
n_obs=len(data_dictionary["train_features"]),
|
||||||
|
batch_size=self.batch_size,
|
||||||
|
n_iters=self.max_iters
|
||||||
|
)
|
||||||
|
for epoch in range(1, epochs + 1):
|
||||||
|
# training
|
||||||
|
losses = []
|
||||||
|
for i, batch_data in enumerate(data_loaders_dictionary["train"]):
|
||||||
|
|
||||||
|
for tensor in batch_data:
|
||||||
|
tensor.to(self.device)
|
||||||
|
|
||||||
|
xb = batch_data[:-1]
|
||||||
|
yb = batch_data[-1]
|
||||||
|
yb_pred = self.model(xb)
|
||||||
|
loss = self.criterion(yb_pred, yb)
|
||||||
|
|
||||||
|
self.optimizer.zero_grad(set_to_none=True)
|
||||||
|
loss.backward()
|
||||||
|
self.optimizer.step()
|
||||||
|
losses.append(loss.item())
|
||||||
|
train_loss = sum(losses) / len(losses)
|
||||||
|
log_message = f"epoch {epoch}/{epochs}: train loss {train_loss:.4f}"
|
||||||
|
|
||||||
|
# evaluation
|
||||||
|
if "test" in splits:
|
||||||
|
test_loss = self.estimate_loss(
|
||||||
|
data_loaders_dictionary,
|
||||||
|
self.max_n_eval_batches,
|
||||||
|
"test"
|
||||||
|
)
|
||||||
|
log_message += f" ; test loss {test_loss:.4f}"
|
||||||
|
|
||||||
|
logger.info(log_message)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def estimate_loss(
|
||||||
|
self,
|
||||||
|
data_loader_dictionary: Dict[str, DataLoader],
|
||||||
|
max_n_eval_batches: Optional[int],
|
||||||
|
split: str,
|
||||||
|
) -> float:
|
||||||
|
self.model.eval()
|
||||||
|
n_batches = 0
|
||||||
|
losses = []
|
||||||
|
for i, batch_data in enumerate(data_loader_dictionary[split]):
|
||||||
|
if max_n_eval_batches and i > max_n_eval_batches:
|
||||||
|
n_batches += 1
|
||||||
|
break
|
||||||
|
|
||||||
|
for tensor in batch_data:
|
||||||
|
tensor.to(self.device)
|
||||||
|
|
||||||
|
xb = batch_data[:-1]
|
||||||
|
yb = batch_data[-1]
|
||||||
|
yb_pred = self.model(xb)
|
||||||
|
loss = self.criterion(yb_pred, yb)
|
||||||
|
losses.append(loss.item())
|
||||||
|
|
||||||
|
self.model.train()
|
||||||
|
return sum(losses) / len(losses)
|
||||||
|
|
||||||
|
def create_data_loaders_dictionary(
|
||||||
|
self,
|
||||||
|
data_dictionary: Dict[str, pd.DataFrame],
|
||||||
|
splits: List[str]
|
||||||
|
) -> Dict[str, DataLoader]:
|
||||||
|
"""
|
||||||
|
Converts the input data to PyTorch tensors using a data loader.
|
||||||
|
"""
|
||||||
|
data_loader_dictionary = {}
|
||||||
|
for split in splits:
|
||||||
|
x = self.data_convertor.convert_x(data_dictionary[f"{split}_features"])
|
||||||
|
y = self.data_convertor.convert_y(data_dictionary[f"{split}_labels"])
|
||||||
|
dataset = TensorDataset(*x, *y)
|
||||||
|
data_loader = DataLoader(
|
||||||
|
dataset,
|
||||||
|
batch_size=self.batch_size,
|
||||||
|
shuffle=True,
|
||||||
|
drop_last=True,
|
||||||
|
num_workers=0,
|
||||||
|
)
|
||||||
|
data_loader_dictionary[split] = data_loader
|
||||||
|
|
||||||
|
return data_loader_dictionary
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def calc_n_epochs(n_obs: int, batch_size: int, n_iters: int) -> int:
|
||||||
|
"""
|
||||||
|
Calculates the number of epochs required to reach the maximum number
|
||||||
|
of iterations specified in the model training parameters.
|
||||||
|
|
||||||
|
the motivation here is that `max_iters` is easier to optimize and keep stable,
|
||||||
|
across different n_obs - the number of data points.
|
||||||
|
"""
|
||||||
|
|
||||||
|
n_batches = math.ceil(n_obs // batch_size)
|
||||||
|
epochs = math.ceil(n_iters // n_batches)
|
||||||
|
if epochs <= 10:
|
||||||
|
logger.warning("User set `max_iters` in such a way that the trainer will only perform "
|
||||||
|
f" {epochs} epochs. Please consider increasing this value accordingly")
|
||||||
|
if epochs <= 1:
|
||||||
|
logger.warning("Epochs set to 1. Please review your `max_iters` value")
|
||||||
|
epochs = 1
|
||||||
|
return epochs
|
||||||
|
|
||||||
|
def save(self, path: Path):
|
||||||
|
"""
|
||||||
|
- Saving any nn.Module state_dict
|
||||||
|
- Saving model_meta_data, this dict should contain any additional data that the
|
||||||
|
user needs to store. e.g class_names for classification models.
|
||||||
|
"""
|
||||||
|
|
||||||
|
torch.save({
|
||||||
|
"model_state_dict": self.model.state_dict(),
|
||||||
|
"optimizer_state_dict": self.optimizer.state_dict(),
|
||||||
|
"model_meta_data": self.model_meta_data,
|
||||||
|
"pytrainer": self
|
||||||
|
}, path)
|
||||||
|
|
||||||
|
def load(self, path: Path):
|
||||||
|
checkpoint = torch.load(path)
|
||||||
|
return self.load_from_checkpoint(checkpoint)
|
||||||
|
|
||||||
|
def load_from_checkpoint(self, checkpoint: Dict):
|
||||||
|
"""
|
||||||
|
when using continual_learning, DataDrawer will load the dictionary
|
||||||
|
(containing state dicts and model_meta_data) by calling torch.load(path).
|
||||||
|
you can access this dict from any class that inherits IFreqaiModel by calling
|
||||||
|
get_init_model method.
|
||||||
|
"""
|
||||||
|
self.model.load_state_dict(checkpoint["model_state_dict"])
|
||||||
|
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
|
||||||
|
self.model_meta_data = checkpoint["model_meta_data"]
|
||||||
|
return self
|
53
freqtrade/freqai/torch/PyTorchTrainerInterface.py
Normal file
53
freqtrade/freqai/torch/PyTorchTrainerInterface.py
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import torch
|
||||||
|
from torch import nn
|
||||||
|
|
||||||
|
|
||||||
|
class PyTorchTrainerInterface(ABC):
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def fit(self, data_dictionary: Dict[str, pd.DataFrame], splits: List[str]) -> None:
|
||||||
|
"""
|
||||||
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
|
all the training and test data/labels.
|
||||||
|
:param splits: splits to use in training, splits must contain "train",
|
||||||
|
optional "test" could be added by setting freqai.data_split_parameters.test_size > 0
|
||||||
|
in the config file.
|
||||||
|
|
||||||
|
- Calculates the predicted output for the batch using the PyTorch model.
|
||||||
|
- Calculates the loss between the predicted and actual output using a loss function.
|
||||||
|
- Computes the gradients of the loss with respect to the model's parameters using
|
||||||
|
backpropagation.
|
||||||
|
- Updates the model's parameters using an optimizer.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def save(self, path: Path) -> None:
|
||||||
|
"""
|
||||||
|
- Saving any nn.Module state_dict
|
||||||
|
- Saving model_meta_data, this dict should contain any additional data that the
|
||||||
|
user needs to store. e.g class_names for classification models.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def load(self, path: Path) -> nn.Module:
|
||||||
|
"""
|
||||||
|
:param path: path to zip file.
|
||||||
|
:returns: pytorch model.
|
||||||
|
"""
|
||||||
|
checkpoint = torch.load(path)
|
||||||
|
return self.load_from_checkpoint(checkpoint)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def load_from_checkpoint(self, checkpoint: Dict) -> nn.Module:
|
||||||
|
"""
|
||||||
|
when using continual_learning, DataDrawer will load the dictionary
|
||||||
|
(containing state dicts and model_meta_data) by calling torch.load(path).
|
||||||
|
you can access this dict from any class that inherits IFreqaiModel by calling
|
||||||
|
get_init_model method.
|
||||||
|
:checkpoint checkpoint: dict containing the model & optimizer state dicts,
|
||||||
|
model_meta_data, etc..
|
||||||
|
"""
|
0
freqtrade/freqai/torch/__init__.py
Normal file
0
freqtrade/freqai/torch/__init__.py
Normal file
@ -865,6 +865,11 @@ def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency:
|
|||||||
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
|
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
|
||||||
print(table)
|
print(table)
|
||||||
|
|
||||||
|
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
|
||||||
|
if isinstance(table, str) and len(table) > 0:
|
||||||
|
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
|
||||||
|
print(table)
|
||||||
|
|
||||||
if (results.get('results_per_enter_tag') is not None
|
if (results.get('results_per_enter_tag') is not None
|
||||||
or results.get('results_per_buy_tag') is not None):
|
or results.get('results_per_buy_tag') is not None):
|
||||||
# results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
|
# results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
|
||||||
@ -884,11 +889,6 @@ def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency:
|
|||||||
print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
|
print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
|
||||||
print(table)
|
print(table)
|
||||||
|
|
||||||
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
|
|
||||||
if isinstance(table, str) and len(table) > 0:
|
|
||||||
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
|
|
||||||
print(table)
|
|
||||||
|
|
||||||
for period in backtest_breakdown:
|
for period in backtest_breakdown:
|
||||||
days_breakdown_stats = generate_periodic_breakdown_stats(
|
days_breakdown_stats = generate_periodic_breakdown_stats(
|
||||||
trade_list=results['trades'], period=period)
|
trade_list=results['trades'], period=period)
|
||||||
@ -917,11 +917,11 @@ def show_backtest_results(config: Config, backtest_stats: Dict):
|
|||||||
strategy, results, stake_currency,
|
strategy, results, stake_currency,
|
||||||
config.get('backtest_breakdown', []))
|
config.get('backtest_breakdown', []))
|
||||||
|
|
||||||
if len(backtest_stats['strategy']) > 1:
|
if len(backtest_stats['strategy']) > 0:
|
||||||
# Print Strategy summary table
|
# Print Strategy summary table
|
||||||
|
|
||||||
table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
|
table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
|
||||||
print(f"{results['backtest_start']} -> {results['backtest_end']} |"
|
print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |"
|
||||||
f" Max open trades : {results['max_open_trades']}")
|
f" Max open trades : {results['max_open_trades']}")
|
||||||
print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
|
print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
|
||||||
print(table)
|
print(table)
|
||||||
|
@ -9,10 +9,10 @@ from typing import Any, ClassVar, Dict, List, Optional, Sequence, cast
|
|||||||
|
|
||||||
from sqlalchemy import (Enum, Float, ForeignKey, Integer, ScalarResult, Select, String,
|
from sqlalchemy import (Enum, Float, ForeignKey, Integer, ScalarResult, Select, String,
|
||||||
UniqueConstraint, desc, func, select)
|
UniqueConstraint, desc, func, select)
|
||||||
from sqlalchemy.orm import Mapped, lazyload, mapped_column, relationship
|
from sqlalchemy.orm import Mapped, lazyload, mapped_column, relationship, validates
|
||||||
|
|
||||||
from freqtrade.constants import (DATETIME_PRINT_FORMAT, MATH_CLOSE_PREC, NON_OPEN_EXCHANGE_STATES,
|
from freqtrade.constants import (CUSTOM_TAG_MAX_LENGTH, DATETIME_PRINT_FORMAT, MATH_CLOSE_PREC,
|
||||||
BuySell, LongShort)
|
NON_OPEN_EXCHANGE_STATES, BuySell, LongShort)
|
||||||
from freqtrade.enums import ExitType, TradingMode
|
from freqtrade.enums import ExitType, TradingMode
|
||||||
from freqtrade.exceptions import DependencyException, OperationalException
|
from freqtrade.exceptions import DependencyException, OperationalException
|
||||||
from freqtrade.exchange import (ROUND_DOWN, ROUND_UP, amount_to_contract_precision,
|
from freqtrade.exchange import (ROUND_DOWN, ROUND_UP, amount_to_contract_precision,
|
||||||
@ -1259,11 +1259,13 @@ class Trade(ModelBase, LocalTrade):
|
|||||||
Float(), nullable=True, default=0.0) # type: ignore
|
Float(), nullable=True, default=0.0) # type: ignore
|
||||||
# Lowest price reached
|
# Lowest price reached
|
||||||
min_rate: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore
|
min_rate: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore
|
||||||
exit_reason: Mapped[Optional[str]] = mapped_column(String(100), nullable=True) # type: ignore
|
exit_reason: Mapped[Optional[str]] = mapped_column(
|
||||||
|
String(CUSTOM_TAG_MAX_LENGTH), nullable=True) # type: ignore
|
||||||
exit_order_status: Mapped[Optional[str]] = mapped_column(
|
exit_order_status: Mapped[Optional[str]] = mapped_column(
|
||||||
String(100), nullable=True) # type: ignore
|
String(100), nullable=True) # type: ignore
|
||||||
strategy: Mapped[Optional[str]] = mapped_column(String(100), nullable=True) # type: ignore
|
strategy: Mapped[Optional[str]] = mapped_column(String(100), nullable=True) # type: ignore
|
||||||
enter_tag: Mapped[Optional[str]] = mapped_column(String(100), nullable=True) # type: ignore
|
enter_tag: Mapped[Optional[str]] = mapped_column(
|
||||||
|
String(CUSTOM_TAG_MAX_LENGTH), nullable=True) # type: ignore
|
||||||
timeframe: Mapped[Optional[int]] = mapped_column(Integer, nullable=True) # type: ignore
|
timeframe: Mapped[Optional[int]] = mapped_column(Integer, nullable=True) # type: ignore
|
||||||
|
|
||||||
trading_mode: Mapped[TradingMode] = mapped_column(
|
trading_mode: Mapped[TradingMode] = mapped_column(
|
||||||
@ -1293,6 +1295,13 @@ class Trade(ModelBase, LocalTrade):
|
|||||||
self.realized_profit = 0
|
self.realized_profit = 0
|
||||||
self.recalc_open_trade_value()
|
self.recalc_open_trade_value()
|
||||||
|
|
||||||
|
@validates('enter_tag', 'exit_reason')
|
||||||
|
def validate_string_len(self, key, value):
|
||||||
|
max_len = getattr(self.__class__, key).prop.columns[0].type.length
|
||||||
|
if value and len(value) > max_len:
|
||||||
|
return value[:max_len]
|
||||||
|
return value
|
||||||
|
|
||||||
def delete(self) -> None:
|
def delete(self) -> None:
|
||||||
|
|
||||||
for order in self.orders:
|
for order in self.orders:
|
||||||
|
@ -1196,6 +1196,7 @@ class RPC:
|
|||||||
from freqtrade.resolvers.strategy_resolver import StrategyResolver
|
from freqtrade.resolvers.strategy_resolver import StrategyResolver
|
||||||
strategy = StrategyResolver.load_strategy(config)
|
strategy = StrategyResolver.load_strategy(config)
|
||||||
strategy.dp = DataProvider(config, exchange=exchange, pairlists=None)
|
strategy.dp = DataProvider(config, exchange=exchange, pairlists=None)
|
||||||
|
strategy.ft_bot_start()
|
||||||
|
|
||||||
df_analyzed = strategy.analyze_ticker(_data[pair], {'pair': pair})
|
df_analyzed = strategy.analyze_ticker(_data[pair], {'pair': pair})
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ from typing import Dict, List, Optional, Tuple, Union
|
|||||||
import arrow
|
import arrow
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
from freqtrade.constants import Config, IntOrInf, ListPairsWithTimeframes
|
from freqtrade.constants import CUSTOM_TAG_MAX_LENGTH, Config, IntOrInf, ListPairsWithTimeframes
|
||||||
from freqtrade.data.dataprovider import DataProvider
|
from freqtrade.data.dataprovider import DataProvider
|
||||||
from freqtrade.enums import (CandleType, ExitCheckTuple, ExitType, MarketDirection, RunMode,
|
from freqtrade.enums import (CandleType, ExitCheckTuple, ExitType, MarketDirection, RunMode,
|
||||||
SignalDirection, SignalTagType, SignalType, TradingMode)
|
SignalDirection, SignalTagType, SignalType, TradingMode)
|
||||||
@ -27,7 +27,6 @@ from freqtrade.wallets import Wallets
|
|||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
CUSTOM_EXIT_MAX_LENGTH = 64
|
|
||||||
|
|
||||||
|
|
||||||
class IStrategy(ABC, HyperStrategyMixin):
|
class IStrategy(ABC, HyperStrategyMixin):
|
||||||
@ -1118,11 +1117,11 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
exit_signal = ExitType.CUSTOM_EXIT
|
exit_signal = ExitType.CUSTOM_EXIT
|
||||||
if isinstance(reason_cust, str):
|
if isinstance(reason_cust, str):
|
||||||
custom_reason = reason_cust
|
custom_reason = reason_cust
|
||||||
if len(reason_cust) > CUSTOM_EXIT_MAX_LENGTH:
|
if len(reason_cust) > CUSTOM_TAG_MAX_LENGTH:
|
||||||
logger.warning(f'Custom exit reason returned from '
|
logger.warning(f'Custom exit reason returned from '
|
||||||
f'custom_exit is too long and was trimmed'
|
f'custom_exit is too long and was trimmed'
|
||||||
f'to {CUSTOM_EXIT_MAX_LENGTH} characters.')
|
f'to {CUSTOM_TAG_MAX_LENGTH} characters.')
|
||||||
custom_reason = reason_cust[:CUSTOM_EXIT_MAX_LENGTH]
|
custom_reason = reason_cust[:CUSTOM_TAG_MAX_LENGTH]
|
||||||
else:
|
else:
|
||||||
custom_reason = ''
|
custom_reason = ''
|
||||||
if (
|
if (
|
||||||
|
@ -223,6 +223,7 @@ class FreqaiExampleHybridStrategy(IStrategy):
|
|||||||
:param metadata: metadata of current pair
|
:param metadata: metadata of current pair
|
||||||
usage example: dataframe["&-target"] = dataframe["close"].shift(-1) / dataframe["close"]
|
usage example: dataframe["&-target"] = dataframe["close"].shift(-1) / dataframe["close"]
|
||||||
"""
|
"""
|
||||||
|
self.freqai.class_names = ["down", "up"]
|
||||||
dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-50) >
|
dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-50) >
|
||||||
dataframe["close"], 'up', 'down')
|
dataframe["close"], 'up', 'down')
|
||||||
|
|
||||||
|
@ -7,10 +7,10 @@
|
|||||||
-r docs/requirements-docs.txt
|
-r docs/requirements-docs.txt
|
||||||
|
|
||||||
coveralls==3.3.1
|
coveralls==3.3.1
|
||||||
ruff==0.0.260
|
ruff==0.0.261
|
||||||
mypy==1.1.1
|
mypy==1.2.0
|
||||||
pre-commit==3.2.1
|
pre-commit==3.2.2
|
||||||
pytest==7.2.2
|
pytest==7.3.0
|
||||||
pytest-asyncio==0.21.0
|
pytest-asyncio==0.21.0
|
||||||
pytest-cov==4.0.0
|
pytest-cov==4.0.0
|
||||||
pytest-mock==3.10.0
|
pytest-mock==3.10.0
|
||||||
@ -22,11 +22,11 @@ time-machine==2.9.0
|
|||||||
httpx==0.23.3
|
httpx==0.23.3
|
||||||
|
|
||||||
# Convert jupyter notebooks to markdown documents
|
# Convert jupyter notebooks to markdown documents
|
||||||
nbconvert==7.2.10
|
nbconvert==7.3.1
|
||||||
|
|
||||||
# mypy types
|
# mypy types
|
||||||
types-cachetools==5.3.0.5
|
types-cachetools==5.3.0.5
|
||||||
types-filelock==3.2.7
|
types-filelock==3.2.7
|
||||||
types-requests==2.28.11.17
|
types-requests==2.28.11.17
|
||||||
types-tabulate==0.9.0.2
|
types-tabulate==0.9.0.2
|
||||||
types-python-dateutil==2.8.19.11
|
types-python-dateutil==2.8.19.12
|
||||||
|
@ -5,5 +5,5 @@
|
|||||||
scipy==1.10.1
|
scipy==1.10.1
|
||||||
scikit-learn==1.1.3
|
scikit-learn==1.1.3
|
||||||
scikit-optimize==0.9.0
|
scikit-optimize==0.9.0
|
||||||
filelock==3.10.6
|
filelock==3.11.0
|
||||||
progressbar2==4.2.0
|
progressbar2==4.2.0
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Include all requirements to run the bot.
|
# Include all requirements to run the bot.
|
||||||
-r requirements.txt
|
-r requirements.txt
|
||||||
|
|
||||||
plotly==5.14.0
|
plotly==5.14.1
|
||||||
|
@ -2,10 +2,10 @@ numpy==1.24.2
|
|||||||
pandas==1.5.3
|
pandas==1.5.3
|
||||||
pandas-ta==0.3.14b
|
pandas-ta==0.3.14b
|
||||||
|
|
||||||
ccxt==3.0.50
|
ccxt==3.0.59
|
||||||
cryptography==40.0.1
|
cryptography==40.0.1
|
||||||
aiohttp==3.8.4
|
aiohttp==3.8.4
|
||||||
SQLAlchemy==2.0.8
|
SQLAlchemy==2.0.9
|
||||||
python-telegram-bot==13.15
|
python-telegram-bot==13.15
|
||||||
arrow==1.2.3
|
arrow==1.2.3
|
||||||
cachetools==4.2.2
|
cachetools==4.2.2
|
||||||
@ -28,7 +28,7 @@ py_find_1st==1.1.5
|
|||||||
# Load ticker files 30% faster
|
# Load ticker files 30% faster
|
||||||
python-rapidjson==1.10
|
python-rapidjson==1.10
|
||||||
# Properly format api responses
|
# Properly format api responses
|
||||||
orjson==3.8.9
|
orjson==3.8.10
|
||||||
|
|
||||||
# Notify systemd
|
# Notify systemd
|
||||||
sdnotify==0.3.2
|
sdnotify==0.3.2
|
||||||
@ -50,10 +50,10 @@ prompt-toolkit==3.0.38
|
|||||||
python-dateutil==2.8.2
|
python-dateutil==2.8.2
|
||||||
|
|
||||||
#Futures
|
#Futures
|
||||||
schedule==1.1.0
|
schedule==1.2.0
|
||||||
|
|
||||||
#WS Messages
|
#WS Messages
|
||||||
websockets==11.0
|
websockets==11.0.1
|
||||||
janus==1.0.0
|
janus==1.0.0
|
||||||
|
|
||||||
ast-comments==1.0.1
|
ast-comments==1.0.1
|
||||||
|
2
setup.sh
2
setup.sh
@ -85,7 +85,7 @@ function updateenv() {
|
|||||||
if [[ $REPLY =~ ^[Yy]$ ]]
|
if [[ $REPLY =~ ^[Yy]$ ]]
|
||||||
then
|
then
|
||||||
REQUIREMENTS_FREQAI="-r requirements-freqai.txt --use-pep517"
|
REQUIREMENTS_FREQAI="-r requirements-freqai.txt --use-pep517"
|
||||||
read -p "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]? "
|
read -p "Do you also want dependencies for freqai-rl or PyTorch (~700mb additional space required) [y/N]? "
|
||||||
if [[ $REPLY =~ ^[Yy]$ ]]
|
if [[ $REPLY =~ ^[Yy]$ ]]
|
||||||
then
|
then
|
||||||
REQUIREMENTS_FREQAI="-r requirements-freqai-rl.txt"
|
REQUIREMENTS_FREQAI="-r requirements-freqai-rl.txt"
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict
|
||||||
from unittest.mock import MagicMock
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@ -85,6 +86,22 @@ def make_rl_config(conf):
|
|||||||
return conf
|
return conf
|
||||||
|
|
||||||
|
|
||||||
|
def mock_pytorch_mlp_model_training_parameters() -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"learning_rate": 3e-4,
|
||||||
|
"trainer_kwargs": {
|
||||||
|
"max_iters": 1,
|
||||||
|
"batch_size": 64,
|
||||||
|
"max_n_eval_batches": 1,
|
||||||
|
},
|
||||||
|
"model_kwargs": {
|
||||||
|
"hidden_dim": 32,
|
||||||
|
"dropout_percent": 0.2,
|
||||||
|
"n_layer": 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_patched_data_kitchen(mocker, freqaiconf):
|
def get_patched_data_kitchen(mocker, freqaiconf):
|
||||||
dk = FreqaiDataKitchen(freqaiconf)
|
dk = FreqaiDataKitchen(freqaiconf)
|
||||||
return dk
|
return dk
|
||||||
@ -119,6 +136,7 @@ def make_unfiltered_dataframe(mocker, freqai_conf):
|
|||||||
freqai = strategy.freqai
|
freqai = strategy.freqai
|
||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = True
|
||||||
freqai.dk.pair = "ADA/BTC"
|
freqai.dk.pair = "ADA/BTC"
|
||||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
freqai.dd.load_all_pair_histories(data_load_timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(data_load_timerange, freqai.dk)
|
||||||
@ -152,6 +170,7 @@ def make_data_dictionary(mocker, freqai_conf):
|
|||||||
freqai = strategy.freqai
|
freqai = strategy.freqai
|
||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = True
|
||||||
freqai.dk.pair = "ADA/BTC"
|
freqai.dk.pair = "ADA/BTC"
|
||||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
freqai.dd.load_all_pair_histories(data_load_timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(data_load_timerange, freqai.dk)
|
||||||
|
@ -19,6 +19,7 @@ def test_update_historic_data(mocker, freqai_conf):
|
|||||||
freqai = strategy.freqai
|
freqai = strategy.freqai
|
||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = True
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180114")
|
timerange = TimeRange.parse_timerange("20180110-20180114")
|
||||||
|
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||||
@ -41,6 +42,7 @@ def test_load_all_pairs_histories(mocker, freqai_conf):
|
|||||||
freqai = strategy.freqai
|
freqai = strategy.freqai
|
||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = True
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180114")
|
timerange = TimeRange.parse_timerange("20180110-20180114")
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||||
|
|
||||||
@ -60,6 +62,7 @@ def test_get_base_and_corr_dataframes(mocker, freqai_conf):
|
|||||||
freqai = strategy.freqai
|
freqai = strategy.freqai
|
||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = True
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180114")
|
timerange = TimeRange.parse_timerange("20180110-20180114")
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||||
sub_timerange = TimeRange.parse_timerange("20180111-20180114")
|
sub_timerange = TimeRange.parse_timerange("20180111-20180114")
|
||||||
@ -87,6 +90,7 @@ def test_use_strategy_to_populate_indicators(mocker, freqai_conf):
|
|||||||
freqai = strategy.freqai
|
freqai = strategy.freqai
|
||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = True
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180114")
|
timerange = TimeRange.parse_timerange("20180110-20180114")
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||||
sub_timerange = TimeRange.parse_timerange("20180111-20180114")
|
sub_timerange = TimeRange.parse_timerange("20180111-20180114")
|
||||||
@ -103,8 +107,9 @@ def test_get_timerange_from_live_historic_predictions(mocker, freqai_conf):
|
|||||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||||
freqai = strategy.freqai
|
freqai = strategy.freqai
|
||||||
freqai.live = True
|
freqai.live = False
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = False
|
||||||
timerange = TimeRange.parse_timerange("20180126-20180130")
|
timerange = TimeRange.parse_timerange("20180126-20180130")
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||||
sub_timerange = TimeRange.parse_timerange("20180128-20180130")
|
sub_timerange = TimeRange.parse_timerange("20180128-20180130")
|
||||||
|
@ -180,6 +180,7 @@ def test_get_full_model_path(mocker, freqai_conf, model):
|
|||||||
freqai = strategy.freqai
|
freqai = strategy.freqai
|
||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = True
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180130")
|
timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||||
|
|
||||||
|
@ -15,7 +15,8 @@ from freqtrade.optimize.backtesting import Backtesting
|
|||||||
from freqtrade.persistence import Trade
|
from freqtrade.persistence import Trade
|
||||||
from freqtrade.plugins.pairlistmanager import PairListManager
|
from freqtrade.plugins.pairlistmanager import PairListManager
|
||||||
from tests.conftest import EXMS, create_mock_trades, get_patched_exchange, log_has_re
|
from tests.conftest import EXMS, create_mock_trades, get_patched_exchange, log_has_re
|
||||||
from tests.freqai.conftest import get_patched_freqai_strategy, make_rl_config
|
from tests.freqai.conftest import (get_patched_freqai_strategy, make_rl_config,
|
||||||
|
mock_pytorch_mlp_model_training_parameters)
|
||||||
|
|
||||||
|
|
||||||
def is_py11() -> bool:
|
def is_py11() -> bool:
|
||||||
@ -34,13 +35,14 @@ def is_mac() -> bool:
|
|||||||
|
|
||||||
def can_run_model(model: str) -> None:
|
def can_run_model(model: str) -> None:
|
||||||
if (is_arm() or is_py11()) and "Catboost" in model:
|
if (is_arm() or is_py11()) and "Catboost" in model:
|
||||||
pytest.skip("CatBoost is not supported on ARM")
|
pytest.skip("CatBoost is not supported on ARM.")
|
||||||
|
|
||||||
if is_mac() and not is_arm() and 'Reinforcement' in model:
|
is_pytorch_model = 'Reinforcement' in model or 'PyTorch' in model
|
||||||
pytest.skip("Reinforcement learning module not available on intel based Mac OS")
|
if is_pytorch_model and is_mac() and not is_arm():
|
||||||
|
pytest.skip("Reinforcement learning / PyTorch module not available on intel based Mac OS.")
|
||||||
|
|
||||||
if is_py11() and 'Reinforcement' in model:
|
if is_pytorch_model and is_py11():
|
||||||
pytest.skip("Reinforcement learning currently not available on python 3.11.")
|
pytest.skip("Reinforcement learning / PyTorch currently not available on python 3.11.")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('model, pca, dbscan, float32, can_short, shuffle, buffer', [
|
@pytest.mark.parametrize('model, pca, dbscan, float32, can_short, shuffle, buffer', [
|
||||||
@ -48,11 +50,12 @@ def can_run_model(model: str) -> None:
|
|||||||
('XGBoostRegressor', False, True, False, True, False, 10),
|
('XGBoostRegressor', False, True, False, True, False, 10),
|
||||||
('XGBoostRFRegressor', False, False, False, True, False, 0),
|
('XGBoostRFRegressor', False, False, False, True, False, 0),
|
||||||
('CatboostRegressor', False, False, False, True, True, 0),
|
('CatboostRegressor', False, False, False, True, True, 0),
|
||||||
|
('PyTorchMLPRegressor', False, False, False, True, False, 0),
|
||||||
('ReinforcementLearner', False, True, False, True, False, 0),
|
('ReinforcementLearner', False, True, False, True, False, 0),
|
||||||
('ReinforcementLearner_multiproc', False, False, False, True, False, 0),
|
('ReinforcementLearner_multiproc', False, False, False, True, False, 0),
|
||||||
('ReinforcementLearner_test_3ac', False, False, False, False, False, 0),
|
('ReinforcementLearner_test_3ac', False, False, False, False, False, 0),
|
||||||
('ReinforcementLearner_test_3ac', False, False, False, True, False, 0),
|
('ReinforcementLearner_test_3ac', False, False, False, True, False, 0),
|
||||||
('ReinforcementLearner_test_4ac', False, False, False, True, False, 0)
|
('ReinforcementLearner_test_4ac', False, False, False, True, False, 0),
|
||||||
])
|
])
|
||||||
def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,
|
def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,
|
||||||
dbscan, float32, can_short, shuffle, buffer):
|
dbscan, float32, can_short, shuffle, buffer):
|
||||||
@ -79,6 +82,11 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,
|
|||||||
freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models")
|
freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models")
|
||||||
freqai_conf["freqai"]["rl_config"]["drop_ohlc_from_features"] = True
|
freqai_conf["freqai"]["rl_config"]["drop_ohlc_from_features"] = True
|
||||||
|
|
||||||
|
if 'PyTorchMLPRegressor' in model:
|
||||||
|
model_save_ext = 'zip'
|
||||||
|
pytorch_mlp_mtp = mock_pytorch_mlp_model_training_parameters()
|
||||||
|
freqai_conf['freqai']['model_training_parameters'].update(pytorch_mlp_mtp)
|
||||||
|
|
||||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||||
@ -87,6 +95,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,
|
|||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.can_short = can_short
|
freqai.can_short = can_short
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = True
|
||||||
freqai.dk.set_paths('ADA/BTC', 10000)
|
freqai.dk.set_paths('ADA/BTC', 10000)
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180130")
|
timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||||
@ -122,8 +131,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,
|
|||||||
('CatboostClassifierMultiTarget', "freqai_test_multimodel_classifier_strat")
|
('CatboostClassifierMultiTarget', "freqai_test_multimodel_classifier_strat")
|
||||||
])
|
])
|
||||||
def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model, strat):
|
def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model, strat):
|
||||||
if (is_arm() or is_py11()) and 'Catboost' in model:
|
can_run_model(model)
|
||||||
pytest.skip("CatBoost is not supported on ARM")
|
|
||||||
|
|
||||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||||
freqai_conf.update({"strategy": strat})
|
freqai_conf.update({"strategy": strat})
|
||||||
@ -135,6 +143,7 @@ def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model, s
|
|||||||
freqai = strategy.freqai
|
freqai = strategy.freqai
|
||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = True
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180130")
|
timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||||
|
|
||||||
@ -162,10 +171,10 @@ def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model, s
|
|||||||
'CatboostClassifier',
|
'CatboostClassifier',
|
||||||
'XGBoostClassifier',
|
'XGBoostClassifier',
|
||||||
'XGBoostRFClassifier',
|
'XGBoostRFClassifier',
|
||||||
|
'PyTorchMLPClassifier',
|
||||||
])
|
])
|
||||||
def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model):
|
def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model):
|
||||||
if (is_arm() or is_py11()) and model == 'CatboostClassifier':
|
can_run_model(model)
|
||||||
pytest.skip("CatBoost is not supported on ARM")
|
|
||||||
|
|
||||||
freqai_conf.update({"freqaimodel": model})
|
freqai_conf.update({"freqaimodel": model})
|
||||||
freqai_conf.update({"strategy": "freqai_test_classifier"})
|
freqai_conf.update({"strategy": "freqai_test_classifier"})
|
||||||
@ -178,6 +187,7 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model):
|
|||||||
freqai = strategy.freqai
|
freqai = strategy.freqai
|
||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = True
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180130")
|
timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||||
|
|
||||||
@ -190,7 +200,20 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model):
|
|||||||
freqai.extract_data_and_train_model(new_timerange, "ADA/BTC",
|
freqai.extract_data_and_train_model(new_timerange, "ADA/BTC",
|
||||||
strategy, freqai.dk, data_load_timerange)
|
strategy, freqai.dk, data_load_timerange)
|
||||||
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").exists()
|
if 'PyTorchMLPClassifier':
|
||||||
|
pytorch_mlp_mtp = mock_pytorch_mlp_model_training_parameters()
|
||||||
|
freqai_conf['freqai']['model_training_parameters'].update(pytorch_mlp_mtp)
|
||||||
|
|
||||||
|
if freqai.dd.model_type == 'joblib':
|
||||||
|
model_file_extension = ".joblib"
|
||||||
|
elif freqai.dd.model_type == "pytorch":
|
||||||
|
model_file_extension = ".zip"
|
||||||
|
else:
|
||||||
|
raise Exception(f"Unsupported model type: {freqai.dd.model_type},"
|
||||||
|
f" can't assign model_file_extension")
|
||||||
|
|
||||||
|
assert Path(freqai.dk.data_path /
|
||||||
|
f"{freqai.dk.model_filename}_model{model_file_extension}").exists()
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists()
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").exists()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").exists()
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").exists()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").exists()
|
||||||
@ -204,10 +227,12 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model):
|
|||||||
("LightGBMRegressor", 2, "freqai_test_strat"),
|
("LightGBMRegressor", 2, "freqai_test_strat"),
|
||||||
("XGBoostRegressor", 2, "freqai_test_strat"),
|
("XGBoostRegressor", 2, "freqai_test_strat"),
|
||||||
("CatboostRegressor", 2, "freqai_test_strat"),
|
("CatboostRegressor", 2, "freqai_test_strat"),
|
||||||
|
("PyTorchMLPRegressor", 2, "freqai_test_strat"),
|
||||||
("ReinforcementLearner", 3, "freqai_rl_test_strat"),
|
("ReinforcementLearner", 3, "freqai_rl_test_strat"),
|
||||||
("XGBoostClassifier", 2, "freqai_test_classifier"),
|
("XGBoostClassifier", 2, "freqai_test_classifier"),
|
||||||
("LightGBMClassifier", 2, "freqai_test_classifier"),
|
("LightGBMClassifier", 2, "freqai_test_classifier"),
|
||||||
("CatboostClassifier", 2, "freqai_test_classifier")
|
("CatboostClassifier", 2, "freqai_test_classifier"),
|
||||||
|
("PyTorchMLPClassifier", 2, "freqai_test_classifier")
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog):
|
def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog):
|
||||||
@ -228,6 +253,10 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog)
|
|||||||
if 'test_4ac' in model:
|
if 'test_4ac' in model:
|
||||||
freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models")
|
freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models")
|
||||||
|
|
||||||
|
if 'PyTorchMLP' in model:
|
||||||
|
pytorch_mlp_mtp = mock_pytorch_mlp_model_training_parameters()
|
||||||
|
freqai_conf['freqai']['model_training_parameters'].update(pytorch_mlp_mtp)
|
||||||
|
|
||||||
freqai_conf.get("freqai", {}).get("feature_parameters", {}).update(
|
freqai_conf.get("freqai", {}).get("feature_parameters", {}).update(
|
||||||
{"indicator_periods_candles": [2]})
|
{"indicator_periods_candles": [2]})
|
||||||
|
|
||||||
@ -371,6 +400,9 @@ def test_backtesting_fit_live_predictions(mocker, freqai_conf, caplog):
|
|||||||
sub_timerange = TimeRange.parse_timerange("20180129-20180130")
|
sub_timerange = TimeRange.parse_timerange("20180129-20180130")
|
||||||
corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk)
|
corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk)
|
||||||
df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC")
|
df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC")
|
||||||
|
df = strategy.set_freqai_targets(df.copy(), metadata={"pair": "LTC/BTC"})
|
||||||
|
df = freqai.dk.remove_special_chars_from_feature_names(df)
|
||||||
|
freqai.dk.get_unique_classes_from_labels(df)
|
||||||
freqai.dk.pair = "ADA/BTC"
|
freqai.dk.pair = "ADA/BTC"
|
||||||
freqai.dk.full_df = df.fillna(0)
|
freqai.dk.full_df = df.fillna(0)
|
||||||
freqai.dk.full_df
|
freqai.dk.full_df
|
||||||
@ -394,6 +426,7 @@ def test_principal_component_analysis(mocker, freqai_conf):
|
|||||||
freqai = strategy.freqai
|
freqai = strategy.freqai
|
||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = True
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180130")
|
timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||||
|
|
||||||
@ -425,10 +458,12 @@ def test_plot_feature_importance(mocker, freqai_conf):
|
|||||||
freqai = strategy.freqai
|
freqai = strategy.freqai
|
||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.live = True
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180130")
|
timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||||
|
|
||||||
freqai.dd.pair_dict = MagicMock()
|
freqai.dd.pair_dict = {"ADA/BTC": {"model_filename": "fake_name",
|
||||||
|
"trained_timestamp": 1, "data_path": "", "extras": {}}}
|
||||||
|
|
||||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
||||||
|
@ -6,7 +6,7 @@ import arrow
|
|||||||
import pytest
|
import pytest
|
||||||
from sqlalchemy import select
|
from sqlalchemy import select
|
||||||
|
|
||||||
from freqtrade.constants import DATETIME_PRINT_FORMAT
|
from freqtrade.constants import CUSTOM_TAG_MAX_LENGTH, DATETIME_PRINT_FORMAT
|
||||||
from freqtrade.enums import TradingMode
|
from freqtrade.enums import TradingMode
|
||||||
from freqtrade.exceptions import DependencyException
|
from freqtrade.exceptions import DependencyException
|
||||||
from freqtrade.persistence import LocalTrade, Order, Trade, init_db
|
from freqtrade.persistence import LocalTrade, Order, Trade, init_db
|
||||||
@ -2037,6 +2037,7 @@ def test_Trade_object_idem():
|
|||||||
'get_mix_tag_performance',
|
'get_mix_tag_performance',
|
||||||
'get_trading_volume',
|
'get_trading_volume',
|
||||||
'from_json',
|
'from_json',
|
||||||
|
'validate_string_len',
|
||||||
)
|
)
|
||||||
EXCLUDES2 = ('trades', 'trades_open', 'bt_trades_open_pp', 'bt_open_open_trade_count',
|
EXCLUDES2 = ('trades', 'trades_open', 'bt_trades_open_pp', 'bt_open_open_trade_count',
|
||||||
'total_profit')
|
'total_profit')
|
||||||
@ -2055,6 +2056,31 @@ def test_Trade_object_idem():
|
|||||||
assert item in trade
|
assert item in trade
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.usefixtures("init_persistence")
|
||||||
|
def test_trade_truncates_string_fields():
|
||||||
|
trade = Trade(
|
||||||
|
pair='ADA/USDT',
|
||||||
|
stake_amount=20.0,
|
||||||
|
amount=30.0,
|
||||||
|
open_rate=2.0,
|
||||||
|
open_date=datetime.utcnow() - timedelta(minutes=20),
|
||||||
|
fee_open=0.001,
|
||||||
|
fee_close=0.001,
|
||||||
|
exchange='binance',
|
||||||
|
leverage=1.0,
|
||||||
|
trading_mode='futures',
|
||||||
|
enter_tag='a' * CUSTOM_TAG_MAX_LENGTH * 2,
|
||||||
|
exit_reason='b' * CUSTOM_TAG_MAX_LENGTH * 2,
|
||||||
|
)
|
||||||
|
Trade.session.add(trade)
|
||||||
|
Trade.commit()
|
||||||
|
|
||||||
|
trade1 = Trade.session.scalars(select(Trade)).first()
|
||||||
|
|
||||||
|
assert trade1.enter_tag == 'a' * CUSTOM_TAG_MAX_LENGTH
|
||||||
|
assert trade1.exit_reason == 'b' * CUSTOM_TAG_MAX_LENGTH
|
||||||
|
|
||||||
|
|
||||||
def test_recalc_trade_from_orders(fee):
|
def test_recalc_trade_from_orders(fee):
|
||||||
|
|
||||||
o1_amount = 100
|
o1_amount = 100
|
||||||
|
@ -1405,10 +1405,10 @@ def test_api_pair_candles(botclient, ohlcv_history):
|
|||||||
])
|
])
|
||||||
|
|
||||||
|
|
||||||
def test_api_pair_history(botclient, ohlcv_history):
|
def test_api_pair_history(botclient, mocker):
|
||||||
ftbot, client = botclient
|
ftbot, client = botclient
|
||||||
timeframe = '5m'
|
timeframe = '5m'
|
||||||
|
lfm = mocker.patch('freqtrade.strategy.interface.IStrategy.load_freqAI_model')
|
||||||
# No pair
|
# No pair
|
||||||
rc = client_get(client,
|
rc = client_get(client,
|
||||||
f"{BASE_URI}/pair_history?timeframe={timeframe}"
|
f"{BASE_URI}/pair_history?timeframe={timeframe}"
|
||||||
@ -1442,6 +1442,7 @@ def test_api_pair_history(botclient, ohlcv_history):
|
|||||||
assert len(rc.json()['data']) == rc.json()['length']
|
assert len(rc.json()['data']) == rc.json()['length']
|
||||||
assert 'columns' in rc.json()
|
assert 'columns' in rc.json()
|
||||||
assert 'data' in rc.json()
|
assert 'data' in rc.json()
|
||||||
|
assert lfm.call_count == 1
|
||||||
assert rc.json()['pair'] == 'UNITTEST/BTC'
|
assert rc.json()['pair'] == 'UNITTEST/BTC'
|
||||||
assert rc.json()['strategy'] == CURRENT_TEST_STRATEGY
|
assert rc.json()['strategy'] == CURRENT_TEST_STRATEGY
|
||||||
assert rc.json()['data_start'] == '2018-01-11 00:00:00+00:00'
|
assert rc.json()['data_start'] == '2018-01-11 00:00:00+00:00'
|
||||||
|
@ -2241,8 +2241,9 @@ def test_send_msg_buy_notification_no_fiat(
|
|||||||
('Short', 'short_signal_01', 2.0),
|
('Short', 'short_signal_01', 2.0),
|
||||||
])
|
])
|
||||||
def test_send_msg_sell_notification_no_fiat(
|
def test_send_msg_sell_notification_no_fiat(
|
||||||
default_conf, mocker, direction, enter_signal, leverage) -> None:
|
default_conf, mocker, direction, enter_signal, leverage, time_machine) -> None:
|
||||||
del default_conf['fiat_display_currency']
|
del default_conf['fiat_display_currency']
|
||||||
|
time_machine.move_to('2022-05-02 00:00:00 +00:00', tick=False)
|
||||||
telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
|
telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
|
||||||
|
|
||||||
telegram.send_msg({
|
telegram.send_msg({
|
||||||
|
@ -82,7 +82,7 @@ class freqai_test_classifier(IStrategy):
|
|||||||
return dataframe
|
return dataframe
|
||||||
|
|
||||||
def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
|
def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
|
||||||
|
self.freqai.class_names = ["down", "up"]
|
||||||
dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-100) >
|
dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-100) >
|
||||||
dataframe["close"], 'up', 'down')
|
dataframe["close"], 'up', 'down')
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ import pytest
|
|||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
|
from freqtrade.constants import CUSTOM_TAG_MAX_LENGTH
|
||||||
from freqtrade.data.dataprovider import DataProvider
|
from freqtrade.data.dataprovider import DataProvider
|
||||||
from freqtrade.data.history import load_data
|
from freqtrade.data.history import load_data
|
||||||
from freqtrade.enums import ExitCheckTuple, ExitType, HyperoptState, SignalDirection
|
from freqtrade.enums import ExitCheckTuple, ExitType, HyperoptState, SignalDirection
|
||||||
@ -529,13 +530,13 @@ def test_custom_exit(default_conf, fee, caplog) -> None:
|
|||||||
assert res[0].exit_reason == 'hello world'
|
assert res[0].exit_reason == 'hello world'
|
||||||
|
|
||||||
caplog.clear()
|
caplog.clear()
|
||||||
strategy.custom_exit = MagicMock(return_value='h' * 100)
|
strategy.custom_exit = MagicMock(return_value='h' * CUSTOM_TAG_MAX_LENGTH * 2)
|
||||||
res = strategy.should_exit(trade, 1, now,
|
res = strategy.should_exit(trade, 1, now,
|
||||||
enter=False, exit_=False,
|
enter=False, exit_=False,
|
||||||
low=None, high=None)
|
low=None, high=None)
|
||||||
assert res[0].exit_type == ExitType.CUSTOM_EXIT
|
assert res[0].exit_type == ExitType.CUSTOM_EXIT
|
||||||
assert res[0].exit_flag is True
|
assert res[0].exit_flag is True
|
||||||
assert res[0].exit_reason == 'h' * 64
|
assert res[0].exit_reason == 'h' * (CUSTOM_TAG_MAX_LENGTH)
|
||||||
assert log_has_re('Custom exit reason returned from custom_exit is too long.*', caplog)
|
assert log_has_re('Custom exit reason returned from custom_exit is too long.*', caplog)
|
||||||
|
|
||||||
|
|
||||||
|
@ -10,6 +10,8 @@ from freqtrade.exceptions import OperationalException
|
|||||||
|
|
||||||
def test_parse_timerange_incorrect():
|
def test_parse_timerange_incorrect():
|
||||||
|
|
||||||
|
timerange = TimeRange.parse_timerange('')
|
||||||
|
assert timerange == TimeRange(None, None, 0, 0)
|
||||||
timerange = TimeRange.parse_timerange('20100522-')
|
timerange = TimeRange.parse_timerange('20100522-')
|
||||||
assert TimeRange('date', None, 1274486400, 0) == timerange
|
assert TimeRange('date', None, 1274486400, 0) == timerange
|
||||||
assert timerange.timerange_str == '20100522-'
|
assert timerange.timerange_str == '20100522-'
|
||||||
|
Loading…
Reference in New Issue
Block a user