Compare commits
1 Commits
develop
...
dependabot
Author | SHA1 | Date | |
---|---|---|---|
|
40e4035a65 |
20
.github/workflows/ci.yml
vendored
20
.github/workflows/ci.yml
vendored
@ -16,8 +16,7 @@ on:
|
|||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
permissions:
|
|
||||||
repository-projects: read
|
|
||||||
jobs:
|
jobs:
|
||||||
build_linux:
|
build_linux:
|
||||||
|
|
||||||
@ -25,7 +24,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04, ubuntu-22.04 ]
|
os: [ ubuntu-20.04, ubuntu-22.04 ]
|
||||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
python-version: ["3.8", "3.9", "3.10"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@ -116,7 +115,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ macos-latest ]
|
os: [ macos-latest ]
|
||||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
python-version: ["3.8", "3.9", "3.10"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@ -213,7 +212,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ windows-latest ]
|
os: [ windows-latest ]
|
||||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
python-version: ["3.8", "3.9", "3.10"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@ -322,6 +321,7 @@ jobs:
|
|||||||
build_linux_online:
|
build_linux_online:
|
||||||
# Run pytest with "live" checks
|
# Run pytest with "live" checks
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
# permissions:
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
@ -425,7 +425,7 @@ jobs:
|
|||||||
python setup.py sdist bdist_wheel
|
python setup.py sdist bdist_wheel
|
||||||
|
|
||||||
- name: Publish to PyPI (Test)
|
- name: Publish to PyPI (Test)
|
||||||
uses: pypa/gh-action-pypi-publish@v1.8.5
|
uses: pypa/gh-action-pypi-publish@v1.6.4
|
||||||
if: (github.event_name == 'release')
|
if: (github.event_name == 'release')
|
||||||
with:
|
with:
|
||||||
user: __token__
|
user: __token__
|
||||||
@ -433,7 +433,7 @@ jobs:
|
|||||||
repository_url: https://test.pypi.org/legacy/
|
repository_url: https://test.pypi.org/legacy/
|
||||||
|
|
||||||
- name: Publish to PyPI
|
- name: Publish to PyPI
|
||||||
uses: pypa/gh-action-pypi-publish@v1.8.5
|
uses: pypa/gh-action-pypi-publish@v1.6.4
|
||||||
if: (github.event_name == 'release')
|
if: (github.event_name == 'release')
|
||||||
with:
|
with:
|
||||||
user: __token__
|
user: __token__
|
||||||
@ -466,13 +466,12 @@ jobs:
|
|||||||
|
|
||||||
- name: Build and test and push docker images
|
- name: Build and test and push docker images
|
||||||
env:
|
env:
|
||||||
|
IMAGE_NAME: freqtradeorg/freqtrade
|
||||||
BRANCH_NAME: ${{ steps.extract_branch.outputs.branch }}
|
BRANCH_NAME: ${{ steps.extract_branch.outputs.branch }}
|
||||||
run: |
|
run: |
|
||||||
build_helpers/publish_docker_multi.sh
|
build_helpers/publish_docker_multi.sh
|
||||||
|
|
||||||
deploy_arm:
|
deploy_arm:
|
||||||
permissions:
|
|
||||||
packages: write
|
|
||||||
needs: [ deploy ]
|
needs: [ deploy ]
|
||||||
# Only run on 64bit machines
|
# Only run on 64bit machines
|
||||||
runs-on: [self-hosted, linux, ARM64]
|
runs-on: [self-hosted, linux, ARM64]
|
||||||
@ -495,9 +494,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Build and test and push docker images
|
- name: Build and test and push docker images
|
||||||
env:
|
env:
|
||||||
|
IMAGE_NAME: freqtradeorg/freqtrade
|
||||||
BRANCH_NAME: ${{ steps.extract_branch.outputs.branch }}
|
BRANCH_NAME: ${{ steps.extract_branch.outputs.branch }}
|
||||||
GHCR_USERNAME: ${{ github.actor }}
|
|
||||||
GHCR_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run: |
|
run: |
|
||||||
build_helpers/publish_docker_arm64.sh
|
build_helpers/publish_docker_arm64.sh
|
||||||
|
|
||||||
|
@ -8,17 +8,16 @@ repos:
|
|||||||
# stages: [push]
|
# stages: [push]
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||||
rev: "v1.0.1"
|
rev: "v0.991"
|
||||||
hooks:
|
hooks:
|
||||||
- id: mypy
|
- id: mypy
|
||||||
exclude: build_helpers
|
exclude: build_helpers
|
||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
- types-cachetools==5.3.0.5
|
- types-cachetools==5.3.0.0
|
||||||
- types-filelock==3.2.7
|
- types-filelock==3.2.7
|
||||||
- types-requests==2.28.11.17
|
- types-requests==2.28.11.13
|
||||||
- types-tabulate==0.9.0.2
|
- types-tabulate==0.9.0.0
|
||||||
- types-python-dateutil==2.8.19.12
|
- types-python-dateutil==2.8.19.6
|
||||||
- SQLAlchemy==2.0.9
|
|
||||||
# stages: [push]
|
# stages: [push]
|
||||||
|
|
||||||
- repo: https://github.com/pycqa/isort
|
- repo: https://github.com/pycqa/isort
|
||||||
@ -30,7 +29,7 @@ repos:
|
|||||||
|
|
||||||
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||||
# Ruff version.
|
# Ruff version.
|
||||||
rev: 'v0.0.255'
|
rev: 'v0.0.251'
|
||||||
hooks:
|
hooks:
|
||||||
- id: ruff
|
- id: ruff
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM python:3.10.11-slim-bullseye as base
|
FROM python:3.10.10-slim-bullseye as base
|
||||||
|
|
||||||
# Setup env
|
# Setup env
|
||||||
ENV LANG C.UTF-8
|
ENV LANG C.UTF-8
|
||||||
|
@ -8,8 +8,8 @@ if [ -n "$2" ] || [ ! -f "${INSTALL_LOC}/lib/libta_lib.a" ]; then
|
|||||||
tar zxvf ta-lib-0.4.0-src.tar.gz
|
tar zxvf ta-lib-0.4.0-src.tar.gz
|
||||||
cd ta-lib \
|
cd ta-lib \
|
||||||
&& sed -i.bak "s|0.00000001|0.000000000000000001 |g" src/ta_func/ta_utility.h \
|
&& sed -i.bak "s|0.00000001|0.000000000000000001 |g" src/ta_func/ta_utility.h \
|
||||||
&& curl 'https://raw.githubusercontent.com/gcc-mirror/gcc/master/config.guess' -o config.guess \
|
&& curl 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD' -o config.guess \
|
||||||
&& curl 'https://raw.githubusercontent.com/gcc-mirror/gcc/master/config.sub' -o config.sub \
|
&& curl 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD' -o config.sub \
|
||||||
&& ./configure --prefix=${INSTALL_LOC}/ \
|
&& ./configure --prefix=${INSTALL_LOC}/ \
|
||||||
&& make
|
&& make
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
|
@ -8,17 +8,12 @@ import yaml
|
|||||||
|
|
||||||
pre_commit_file = Path('.pre-commit-config.yaml')
|
pre_commit_file = Path('.pre-commit-config.yaml')
|
||||||
require_dev = Path('requirements-dev.txt')
|
require_dev = Path('requirements-dev.txt')
|
||||||
require = Path('requirements.txt')
|
|
||||||
|
|
||||||
with require_dev.open('r') as rfile:
|
with require_dev.open('r') as rfile:
|
||||||
requirements = rfile.readlines()
|
requirements = rfile.readlines()
|
||||||
|
|
||||||
with require.open('r') as rfile:
|
|
||||||
requirements.extend(rfile.readlines())
|
|
||||||
|
|
||||||
# Extract types only
|
# Extract types only
|
||||||
type_reqs = [r.strip('\n') for r in requirements if r.startswith(
|
type_reqs = [r.strip('\n') for r in requirements if r.startswith('types-')]
|
||||||
'types-') or r.startswith('SQLAlchemy')]
|
|
||||||
|
|
||||||
with pre_commit_file.open('r') as file:
|
with pre_commit_file.open('r') as file:
|
||||||
f = yaml.load(file, Loader=yaml.FullLoader)
|
f = yaml.load(file, Loader=yaml.FullLoader)
|
||||||
|
@ -3,22 +3,18 @@
|
|||||||
# Use BuildKit, otherwise building on ARM fails
|
# Use BuildKit, otherwise building on ARM fails
|
||||||
export DOCKER_BUILDKIT=1
|
export DOCKER_BUILDKIT=1
|
||||||
|
|
||||||
IMAGE_NAME=freqtradeorg/freqtrade
|
|
||||||
CACHE_IMAGE=freqtradeorg/freqtrade_cache
|
|
||||||
GHCR_IMAGE_NAME=ghcr.io/freqtrade/freqtrade
|
|
||||||
|
|
||||||
# Replace / with _ to create a valid tag
|
# Replace / with _ to create a valid tag
|
||||||
TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
||||||
TAG_PLOT=${TAG}_plot
|
TAG_PLOT=${TAG}_plot
|
||||||
TAG_FREQAI=${TAG}_freqai
|
TAG_FREQAI=${TAG}_freqai
|
||||||
TAG_FREQAI_RL=${TAG_FREQAI}rl
|
TAG_FREQAI_RL=${TAG_FREQAI}rl
|
||||||
TAG_FREQAI_TORCH=${TAG_FREQAI}torch
|
|
||||||
TAG_PI="${TAG}_pi"
|
TAG_PI="${TAG}_pi"
|
||||||
|
|
||||||
TAG_ARM=${TAG}_arm
|
TAG_ARM=${TAG}_arm
|
||||||
TAG_PLOT_ARM=${TAG_PLOT}_arm
|
TAG_PLOT_ARM=${TAG_PLOT}_arm
|
||||||
TAG_FREQAI_ARM=${TAG_FREQAI}_arm
|
TAG_FREQAI_ARM=${TAG_FREQAI}_arm
|
||||||
TAG_FREQAI_RL_ARM=${TAG_FREQAI_RL}_arm
|
TAG_FREQAI_RL_ARM=${TAG_FREQAI_RL}_arm
|
||||||
|
CACHE_IMAGE=freqtradeorg/freqtrade_cache
|
||||||
|
|
||||||
echo "Running for ${TAG}"
|
echo "Running for ${TAG}"
|
||||||
|
|
||||||
@ -42,13 +38,13 @@ if [ $? -ne 0 ]; then
|
|||||||
echo "failed building multiarch images"
|
echo "failed building multiarch images"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
docker build --build-arg sourceimage=freqtrade --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_PLOT_ARM} -f docker/Dockerfile.plot .
|
|
||||||
docker build --build-arg sourceimage=freqtrade --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_FREQAI_ARM} -f docker/Dockerfile.freqai .
|
|
||||||
docker build --build-arg sourceimage=freqtrade --build-arg sourcetag=${TAG_FREQAI_ARM} -t freqtrade:${TAG_FREQAI_RL_ARM} -f docker/Dockerfile.freqai_rl .
|
|
||||||
|
|
||||||
# Tag image for upload and next build step
|
# Tag image for upload and next build step
|
||||||
docker tag freqtrade:$TAG_ARM ${CACHE_IMAGE}:$TAG_ARM
|
docker tag freqtrade:$TAG_ARM ${CACHE_IMAGE}:$TAG_ARM
|
||||||
|
|
||||||
|
docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_PLOT_ARM} -f docker/Dockerfile.plot .
|
||||||
|
docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_FREQAI_ARM} -f docker/Dockerfile.freqai .
|
||||||
|
docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_FREQAI_RL_ARM} -f docker/Dockerfile.freqai_rl .
|
||||||
|
|
||||||
docker tag freqtrade:$TAG_PLOT_ARM ${CACHE_IMAGE}:$TAG_PLOT_ARM
|
docker tag freqtrade:$TAG_PLOT_ARM ${CACHE_IMAGE}:$TAG_PLOT_ARM
|
||||||
docker tag freqtrade:$TAG_FREQAI_ARM ${CACHE_IMAGE}:$TAG_FREQAI_ARM
|
docker tag freqtrade:$TAG_FREQAI_ARM ${CACHE_IMAGE}:$TAG_FREQAI_ARM
|
||||||
docker tag freqtrade:$TAG_FREQAI_RL_ARM ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM
|
docker tag freqtrade:$TAG_FREQAI_RL_ARM ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM
|
||||||
@ -63,6 +59,7 @@ fi
|
|||||||
|
|
||||||
docker images
|
docker images
|
||||||
|
|
||||||
|
# docker push ${IMAGE_NAME}
|
||||||
docker push ${CACHE_IMAGE}:$TAG_PLOT_ARM
|
docker push ${CACHE_IMAGE}:$TAG_PLOT_ARM
|
||||||
docker push ${CACHE_IMAGE}:$TAG_FREQAI_ARM
|
docker push ${CACHE_IMAGE}:$TAG_FREQAI_ARM
|
||||||
docker push ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM
|
docker push ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM
|
||||||
@ -85,35 +82,14 @@ docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI}
|
|||||||
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL_ARM}
|
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL_ARM}
|
||||||
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI_RL}
|
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI_RL}
|
||||||
|
|
||||||
# Create special Torch tag - which is identical to the RL tag.
|
|
||||||
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI_TORCH} ${CACHE_IMAGE}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL_ARM}
|
|
||||||
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI_TORCH}
|
|
||||||
|
|
||||||
# copy images to ghcr.io
|
|
||||||
|
|
||||||
alias crane="docker run --rm -i -v $(pwd)/.crane:/home/nonroot/.docker/ gcr.io/go-containerregistry/crane"
|
|
||||||
mkdir .crane
|
|
||||||
chmod a+rwx .crane
|
|
||||||
|
|
||||||
echo "${GHCR_TOKEN}" | crane auth login ghcr.io -u "${GHCR_USERNAME}" --password-stdin
|
|
||||||
|
|
||||||
crane copy ${IMAGE_NAME}:${TAG_FREQAI_RL} ${GHCR_IMAGE_NAME}:${TAG_FREQAI_RL}
|
|
||||||
crane copy ${IMAGE_NAME}:${TAG_FREQAI_RL} ${GHCR_IMAGE_NAME}:${TAG_FREQAI_TORCH}
|
|
||||||
crane copy ${IMAGE_NAME}:${TAG_FREQAI} ${GHCR_IMAGE_NAME}:${TAG_FREQAI}
|
|
||||||
crane copy ${IMAGE_NAME}:${TAG_PLOT} ${GHCR_IMAGE_NAME}:${TAG_PLOT}
|
|
||||||
crane copy ${IMAGE_NAME}:${TAG} ${GHCR_IMAGE_NAME}:${TAG}
|
|
||||||
|
|
||||||
# Tag as latest for develop builds
|
# Tag as latest for develop builds
|
||||||
if [ "${TAG}" = "develop" ]; then
|
if [ "${TAG}" = "develop" ]; then
|
||||||
echo 'Tagging image as latest'
|
echo 'Tagging image as latest'
|
||||||
docker manifest create ${IMAGE_NAME}:latest ${CACHE_IMAGE}:${TAG_ARM} ${IMAGE_NAME}:${TAG_PI} ${CACHE_IMAGE}:${TAG}
|
docker manifest create ${IMAGE_NAME}:latest ${CACHE_IMAGE}:${TAG_ARM} ${IMAGE_NAME}:${TAG_PI} ${CACHE_IMAGE}:${TAG}
|
||||||
docker manifest push -p ${IMAGE_NAME}:latest
|
docker manifest push -p ${IMAGE_NAME}:latest
|
||||||
|
|
||||||
crane copy ${IMAGE_NAME}:latest ${GHCR_IMAGE_NAME}:latest
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
docker images
|
docker images
|
||||||
rm -rf .crane
|
|
||||||
|
|
||||||
# Cleanup old images from arm64 node.
|
# Cleanup old images from arm64 node.
|
||||||
docker image prune -a --force --filter "until=24h"
|
docker image prune -a --force --filter "until=24h"
|
||||||
|
@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
# The below assumes a correctly setup docker buildx environment
|
# The below assumes a correctly setup docker buildx environment
|
||||||
|
|
||||||
IMAGE_NAME=freqtradeorg/freqtrade
|
|
||||||
CACHE_IMAGE=freqtradeorg/freqtrade_cache
|
|
||||||
# Replace / with _ to create a valid tag
|
# Replace / with _ to create a valid tag
|
||||||
TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
||||||
TAG_PLOT=${TAG}_plot
|
TAG_PLOT=${TAG}_plot
|
||||||
@ -13,6 +11,7 @@ TAG_PI="${TAG}_pi"
|
|||||||
|
|
||||||
PI_PLATFORM="linux/arm/v7"
|
PI_PLATFORM="linux/arm/v7"
|
||||||
echo "Running for ${TAG}"
|
echo "Running for ${TAG}"
|
||||||
|
CACHE_IMAGE=freqtradeorg/freqtrade_cache
|
||||||
CACHE_TAG=${CACHE_IMAGE}:${TAG_PI}_cache
|
CACHE_TAG=${CACHE_IMAGE}:${TAG_PI}_cache
|
||||||
|
|
||||||
# Add commit and commit_message to docker container
|
# Add commit and commit_message to docker container
|
||||||
@ -58,9 +57,9 @@ fi
|
|||||||
# Tag image for upload and next build step
|
# Tag image for upload and next build step
|
||||||
docker tag freqtrade:$TAG ${CACHE_IMAGE}:$TAG
|
docker tag freqtrade:$TAG ${CACHE_IMAGE}:$TAG
|
||||||
|
|
||||||
docker build --build-arg sourceimage=freqtrade --build-arg sourcetag=${TAG} -t freqtrade:${TAG_PLOT} -f docker/Dockerfile.plot .
|
docker build --cache-from freqtrade:${TAG} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG} -t freqtrade:${TAG_PLOT} -f docker/Dockerfile.plot .
|
||||||
docker build --build-arg sourceimage=freqtrade --build-arg sourcetag=${TAG} -t freqtrade:${TAG_FREQAI} -f docker/Dockerfile.freqai .
|
docker build --cache-from freqtrade:${TAG} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG} -t freqtrade:${TAG_FREQAI} -f docker/Dockerfile.freqai .
|
||||||
docker build --build-arg sourceimage=freqtrade --build-arg sourcetag=${TAG_FREQAI} -t freqtrade:${TAG_FREQAI_RL} -f docker/Dockerfile.freqai_rl .
|
docker build --cache-from freqtrade:${TAG_FREQAI} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_FREQAI} -t freqtrade:${TAG_FREQAI_RL} -f docker/Dockerfile.freqai_rl .
|
||||||
|
|
||||||
docker tag freqtrade:$TAG_PLOT ${CACHE_IMAGE}:$TAG_PLOT
|
docker tag freqtrade:$TAG_PLOT ${CACHE_IMAGE}:$TAG_PLOT
|
||||||
docker tag freqtrade:$TAG_FREQAI ${CACHE_IMAGE}:$TAG_FREQAI
|
docker tag freqtrade:$TAG_FREQAI ${CACHE_IMAGE}:$TAG_FREQAI
|
||||||
|
Binary file not shown.
Before Width: | Height: | Size: 18 KiB |
@ -274,20 +274,19 @@ A backtesting result will look like that:
|
|||||||
| XRP/BTC | 35 | 0.66 | 22.96 | 0.00114897 | 11.48 | 3:49:00 | 12 0 23 34.3 |
|
| XRP/BTC | 35 | 0.66 | 22.96 | 0.00114897 | 11.48 | 3:49:00 | 12 0 23 34.3 |
|
||||||
| ZEC/BTC | 22 | -0.46 | -10.18 | -0.00050971 | -5.09 | 2:22:00 | 7 0 15 31.8 |
|
| ZEC/BTC | 22 | -0.46 | -10.18 | -0.00050971 | -5.09 | 2:22:00 | 7 0 15 31.8 |
|
||||||
| TOTAL | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 0 243 43.4 |
|
| TOTAL | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 0 243 43.4 |
|
||||||
====================================================== LEFT OPEN TRADES REPORT ======================================================
|
========================================================= EXIT REASON STATS ==========================================================
|
||||||
| Pair | Entries | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Win Draw Loss Win% |
|
|
||||||
|:---------|---------:|---------------:|---------------:|-----------------:|---------------:|:---------------|--------------------:|
|
|
||||||
| ADA/BTC | 1 | 0.89 | 0.89 | 0.00004434 | 0.44 | 6:00:00 | 1 0 0 100 |
|
|
||||||
| LTC/BTC | 1 | 0.68 | 0.68 | 0.00003421 | 0.34 | 2:00:00 | 1 0 0 100 |
|
|
||||||
| TOTAL | 2 | 0.78 | 1.57 | 0.00007855 | 0.78 | 4:00:00 | 2 0 0 100 |
|
|
||||||
==================== EXIT REASON STATS ====================
|
|
||||||
| Exit Reason | Exits | Wins | Draws | Losses |
|
| Exit Reason | Exits | Wins | Draws | Losses |
|
||||||
|:-------------------|--------:|------:|-------:|--------:|
|
|:-------------------|--------:|------:|-------:|--------:|
|
||||||
| trailing_stop_loss | 205 | 150 | 0 | 55 |
|
| trailing_stop_loss | 205 | 150 | 0 | 55 |
|
||||||
| stop_loss | 166 | 0 | 0 | 166 |
|
| stop_loss | 166 | 0 | 0 | 166 |
|
||||||
| exit_signal | 56 | 36 | 0 | 20 |
|
| exit_signal | 56 | 36 | 0 | 20 |
|
||||||
| force_exit | 2 | 0 | 0 | 2 |
|
| force_exit | 2 | 0 | 0 | 2 |
|
||||||
|
====================================================== LEFT OPEN TRADES REPORT ======================================================
|
||||||
|
| Pair | Entries | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Win Draw Loss Win% |
|
||||||
|
|:---------|---------:|---------------:|---------------:|-----------------:|---------------:|:---------------|--------------------:|
|
||||||
|
| ADA/BTC | 1 | 0.89 | 0.89 | 0.00004434 | 0.44 | 6:00:00 | 1 0 0 100 |
|
||||||
|
| LTC/BTC | 1 | 0.68 | 0.68 | 0.00003421 | 0.34 | 2:00:00 | 1 0 0 100 |
|
||||||
|
| TOTAL | 2 | 0.78 | 1.57 | 0.00007855 | 0.78 | 4:00:00 | 2 0 0 100 |
|
||||||
================== SUMMARY METRICS ==================
|
================== SUMMARY METRICS ==================
|
||||||
| Metric | Value |
|
| Metric | Value |
|
||||||
|-----------------------------+---------------------|
|
|-----------------------------+---------------------|
|
||||||
|
@ -12,9 +12,6 @@ This page provides you some basic concepts on how Freqtrade works and operates.
|
|||||||
* **Indicators**: Technical indicators (SMA, EMA, RSI, ...).
|
* **Indicators**: Technical indicators (SMA, EMA, RSI, ...).
|
||||||
* **Limit order**: Limit orders which execute at the defined limit price or better.
|
* **Limit order**: Limit orders which execute at the defined limit price or better.
|
||||||
* **Market order**: Guaranteed to fill, may move price depending on the order size.
|
* **Market order**: Guaranteed to fill, may move price depending on the order size.
|
||||||
* **Current Profit**: Currently pending (unrealized) profit for this trade. This is mainly used throughout the bot and UI.
|
|
||||||
* **Realized Profit**: Already realized profit. Only relevant in combination with [partial exits](strategy-callbacks.md#adjust-trade-position) - which also explains the calculation logic for this.
|
|
||||||
* **Total Profit**: Combined realized and unrealized profit. The relative number (%) is calculated against the total investment in this trade.
|
|
||||||
|
|
||||||
## Fee handling
|
## Fee handling
|
||||||
|
|
||||||
@ -60,10 +57,10 @@ This loop will be repeated again and again until the bot is stopped.
|
|||||||
|
|
||||||
* Load historic data for configured pairlist.
|
* Load historic data for configured pairlist.
|
||||||
* Calls `bot_start()` once.
|
* Calls `bot_start()` once.
|
||||||
|
* Calls `bot_loop_start()` once.
|
||||||
* Calculate indicators (calls `populate_indicators()` once per pair).
|
* Calculate indicators (calls `populate_indicators()` once per pair).
|
||||||
* Calculate entry / exit signals (calls `populate_entry_trend()` and `populate_exit_trend()` once per pair).
|
* Calculate entry / exit signals (calls `populate_entry_trend()` and `populate_exit_trend()` once per pair).
|
||||||
* Loops per candle simulating entry and exit points.
|
* Loops per candle simulating entry and exit points.
|
||||||
* Calls `bot_loop_start()` strategy callback.
|
|
||||||
* Check for Order timeouts, either via the `unfilledtimeout` configuration, or via `check_entry_timeout()` / `check_exit_timeout()` strategy callbacks.
|
* Check for Order timeouts, either via the `unfilledtimeout` configuration, or via `check_entry_timeout()` / `check_exit_timeout()` strategy callbacks.
|
||||||
* Calls `adjust_entry_price()` strategy callback for open entry orders.
|
* Calls `adjust_entry_price()` strategy callback for open entry orders.
|
||||||
* Check for trade entry signals (`enter_long` / `enter_short` columns).
|
* Check for trade entry signals (`enter_long` / `enter_short` columns).
|
||||||
|
@ -74,8 +74,3 @@ Webhook terminology changed from "sell" to "exit", and from "buy" to "entry", re
|
|||||||
* `webhooksell`, `webhookexit` -> `exit`
|
* `webhooksell`, `webhookexit` -> `exit`
|
||||||
* `webhooksellfill`, `webhookexitfill` -> `exit_fill`
|
* `webhooksellfill`, `webhookexitfill` -> `exit_fill`
|
||||||
* `webhooksellcancel`, `webhookexitcancel` -> `exit_cancel`
|
* `webhooksellcancel`, `webhookexitcancel` -> `exit_cancel`
|
||||||
|
|
||||||
|
|
||||||
## Removal of `populate_any_indicators`
|
|
||||||
|
|
||||||
version 2023.3 saw the removal of `populate_any_indicators` in favor of split methods for feature engineering and targets. Please read the [migration document](strategy_migration.md#freqai-strategy) for full details.
|
|
||||||
|
@ -236,161 +236,3 @@ If you want to predict multiple targets you must specify all labels in the same
|
|||||||
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
||||||
df['&s-up_or_down'] = np.where( df["close"].shift(-100) == df["close"], 'same', df['&s-up_or_down'])
|
df['&s-up_or_down'] = np.where( df["close"].shift(-100) == df["close"], 'same', df['&s-up_or_down'])
|
||||||
```
|
```
|
||||||
|
|
||||||
## PyTorch Module
|
|
||||||
|
|
||||||
### Quick start
|
|
||||||
|
|
||||||
The easiest way to quickly run a pytorch model is with the following command (for regression task):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
freqtrade trade --config config_examples/config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel PyTorchMLPRegressor --strategy-path freqtrade/templates
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! note "Installation/docker"
|
|
||||||
The PyTorch module requires large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl or PyTorch (~700mb additional space required) [y/N]?".
|
|
||||||
Users who prefer docker should ensure they use the docker image appended with `_freqaitorch`.
|
|
||||||
|
|
||||||
### Structure
|
|
||||||
|
|
||||||
#### Model
|
|
||||||
|
|
||||||
You can construct your own Neural Network architecture in PyTorch by simply defining your `nn.Module` class inside your custom [`IFreqaiModel` file](#using-different-prediction-models) and then using that class in your `def train()` function. Here is an example of logistic regression model implementation using PyTorch (should be used with nn.BCELoss criterion) for classification tasks.
|
|
||||||
|
|
||||||
```python
|
|
||||||
|
|
||||||
class LogisticRegression(nn.Module):
|
|
||||||
def __init__(self, input_size: int):
|
|
||||||
super().__init__()
|
|
||||||
# Define your layers
|
|
||||||
self.linear = nn.Linear(input_size, 1)
|
|
||||||
self.activation = nn.Sigmoid()
|
|
||||||
|
|
||||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
||||||
# Define the forward pass
|
|
||||||
out = self.linear(x)
|
|
||||||
out = self.activation(out)
|
|
||||||
return out
|
|
||||||
|
|
||||||
class MyCoolPyTorchClassifier(BasePyTorchClassifier):
|
|
||||||
"""
|
|
||||||
This is a custom IFreqaiModel showing how a user might setup their own
|
|
||||||
custom Neural Network architecture for their training.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@property
|
|
||||||
def data_convertor(self) -> PyTorchDataConvertor:
|
|
||||||
return DefaultPyTorchDataConvertor(target_tensor_type=torch.float)
|
|
||||||
|
|
||||||
def __init__(self, **kwargs) -> None:
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
config = self.freqai_info.get("model_training_parameters", {})
|
|
||||||
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
|
||||||
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
|
|
||||||
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
|
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
|
||||||
"""
|
|
||||||
User sets up the training and test data to fit their desired model here
|
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
|
||||||
labels, weights
|
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
|
||||||
|
|
||||||
class_names = self.get_class_names()
|
|
||||||
self.convert_label_column_to_int(data_dictionary, dk, class_names)
|
|
||||||
n_features = data_dictionary["train_features"].shape[-1]
|
|
||||||
model = LogisticRegression(
|
|
||||||
input_dim=n_features
|
|
||||||
)
|
|
||||||
model.to(self.device)
|
|
||||||
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
|
||||||
criterion = torch.nn.CrossEntropyLoss()
|
|
||||||
init_model = self.get_init_model(dk.pair)
|
|
||||||
trainer = PyTorchModelTrainer(
|
|
||||||
model=model,
|
|
||||||
optimizer=optimizer,
|
|
||||||
criterion=criterion,
|
|
||||||
model_meta_data={"class_names": class_names},
|
|
||||||
device=self.device,
|
|
||||||
init_model=init_model,
|
|
||||||
data_convertor=self.data_convertor,
|
|
||||||
**self.trainer_kwargs,
|
|
||||||
)
|
|
||||||
trainer.fit(data_dictionary, self.splits)
|
|
||||||
return trainer
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Trainer
|
|
||||||
|
|
||||||
The `PyTorchModelTrainer` performs the idiomatic PyTorch train loop:
|
|
||||||
Define our model, loss function, and optimizer, and then move them to the appropriate device (GPU or CPU). Inside the loop, we iterate through the batches in the dataloader, move the data to the device, compute the prediction and loss, backpropagate, and update the model parameters using the optimizer.
|
|
||||||
|
|
||||||
In addition, the trainer is responsible for the following:
|
|
||||||
- saving and loading the model
|
|
||||||
- converting the data from `pandas.DataFrame` to `torch.Tensor`.
|
|
||||||
|
|
||||||
#### Integration with Freqai module
|
|
||||||
|
|
||||||
Like all freqai models, PyTorch models inherit `IFreqaiModel`. `IFreqaiModel` declares three abstract methods: `train`, `fit`, and `predict`. we implement these methods in three levels of hierarchy.
|
|
||||||
From top to bottom:
|
|
||||||
|
|
||||||
1. `BasePyTorchModel` - Implements the `train` method. all `BasePyTorch*` inherit it. responsible for general data preparation (e.g., data normalization) and calling the `fit` method. Sets `device` attribute used by children classes. Sets `model_type` attribute used by the parent class.
|
|
||||||
2. `BasePyTorch*` - Implements the `predict` method. Here, the `*` represents a group of algorithms, such as classifiers or regressors. responsible for data preprocessing, predicting, and postprocessing if needed.
|
|
||||||
3. `PyTorch*Classifier` / `PyTorch*Regressor` - implements the `fit` method. responsible for the main train flaw, where we initialize the trainer and model objects.
|
|
||||||
|
|
||||||
![image](assets/freqai_pytorch-diagram.png)
|
|
||||||
|
|
||||||
#### Full example
|
|
||||||
|
|
||||||
Building a PyTorch regressor using MLP (multilayer perceptron) model, MSELoss criterion, and AdamW optimizer.
|
|
||||||
|
|
||||||
```python
|
|
||||||
class PyTorchMLPRegressor(BasePyTorchRegressor):
|
|
||||||
def __init__(self, **kwargs) -> None:
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
config = self.freqai_info.get("model_training_parameters", {})
|
|
||||||
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
|
||||||
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
|
|
||||||
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
|
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
|
||||||
n_features = data_dictionary["train_features"].shape[-1]
|
|
||||||
model = PyTorchMLPModel(
|
|
||||||
input_dim=n_features,
|
|
||||||
output_dim=1,
|
|
||||||
**self.model_kwargs
|
|
||||||
)
|
|
||||||
model.to(self.device)
|
|
||||||
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
|
||||||
criterion = torch.nn.MSELoss()
|
|
||||||
init_model = self.get_init_model(dk.pair)
|
|
||||||
trainer = PyTorchModelTrainer(
|
|
||||||
model=model,
|
|
||||||
optimizer=optimizer,
|
|
||||||
criterion=criterion,
|
|
||||||
device=self.device,
|
|
||||||
init_model=init_model,
|
|
||||||
target_tensor_type=torch.float,
|
|
||||||
**self.trainer_kwargs,
|
|
||||||
)
|
|
||||||
trainer.fit(data_dictionary)
|
|
||||||
return trainer
|
|
||||||
```
|
|
||||||
|
|
||||||
Here we create a `PyTorchMLPRegressor` class that implements the `fit` method. The `fit` method specifies the training building blocks: model, optimizer, criterion, and trainer. We inherit both `BasePyTorchRegressor` and `BasePyTorchModel`, where the former implements the `predict` method that is suitable for our regression task, and the latter implements the train method.
|
|
||||||
|
|
||||||
??? Note "Setting Class Names for Classifiers"
|
|
||||||
When using classifiers, the user must declare the class names (or targets) by overriding the `IFreqaiModel.class_names` attribute. This is achieved by setting `self.freqai.class_names` in the FreqAI strategy inside the `set_freqai_targets` method.
|
|
||||||
|
|
||||||
For example, if you are using a binary classifier to predict price movements as up or down, you can set the class names as follows:
|
|
||||||
```python
|
|
||||||
def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
|
|
||||||
self.freqai.class_names = ["down", "up"]
|
|
||||||
dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-100) >
|
|
||||||
dataframe["close"], 'up', 'down')
|
|
||||||
|
|
||||||
return dataframe
|
|
||||||
```
|
|
||||||
To see a full example, you can refer to the [classifier test strategy class](https://github.com/freqtrade/freqtrade/blob/develop/tests/strategy/strats/freqai_test_classifier.py).
|
|
||||||
|
@ -6,8 +6,8 @@ Low level feature engineering is performed in the user strategy within a set of
|
|||||||
|
|
||||||
| Function | Description |
|
| Function | Description |
|
||||||
|---------------|-------------|
|
|---------------|-------------|
|
||||||
| `feature_engineering_expand_all()` | This optional function will automatically expand the defined features on the config defined `indicator_periods_candles`, `include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`.
|
| `feature_engineering__expand_all()` | This optional function will automatically expand the defined features on the config defined `indicator_periods_candles`, `include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`.
|
||||||
| `feature_engineering_expand_basic()` | This optional function will automatically expand the defined features on the config defined `include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`. Note: this function does *not* expand across `include_periods_candles`.
|
| `feature_engineering__expand_basic()` | This optional function will automatically expand the defined features on the config defined `include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`. Note: this function does *not* expand across `include_periods_candles`.
|
||||||
| `feature_engineering_standard()` | This optional function will be called once with the dataframe of the base timeframe. This is the final function to be called, which means that the dataframe entering this function will contain all the features and columns from the base asset created by the other `feature_engineering_expand` functions. This function is a good place to do custom exotic feature extractions (e.g. tsfresh). This function is also a good place for any feature that should not be auto-expanded upon (e.g., day of the week).
|
| `feature_engineering_standard()` | This optional function will be called once with the dataframe of the base timeframe. This is the final function to be called, which means that the dataframe entering this function will contain all the features and columns from the base asset created by the other `feature_engineering_expand` functions. This function is a good place to do custom exotic feature extractions (e.g. tsfresh). This function is also a good place for any feature that should not be auto-expanded upon (e.g., day of the week).
|
||||||
| `set_freqai_targets()` | Required function to set the targets for the model. All targets must be prepended with `&` to be recognized by the FreqAI internals.
|
| `set_freqai_targets()` | Required function to set the targets for the model. All targets must be prepended with `&` to be recognized by the FreqAI internals.
|
||||||
|
|
||||||
@ -182,11 +182,11 @@ In total, the number of features the user of the presented example strat has cre
|
|||||||
$= 3 * 3 * 3 * 2 * 2 = 108$.
|
$= 3 * 3 * 3 * 2 * 2 = 108$.
|
||||||
|
|
||||||
|
|
||||||
### Gain finer control over `feature_engineering_*` functions with `metadata`
|
### Gain finer control over `feature_engineering_*` functions with `metadata`
|
||||||
|
|
||||||
All `feature_engineering_*` and `set_freqai_targets()` functions are passed a `metadata` dictionary which contains information about the `pair`, `tf` (timeframe), and `period` that FreqAI is automating for feature building. As such, a user can use `metadata` inside `feature_engineering_*` functions as criteria for blocking/reserving features for certain timeframes, periods, pairs etc.
|
All `feature_engineering_*` and `set_freqai_targets()` functions are passed a `metadata` dictionary which contains information about the `pair`, `tf` (timeframe), and `period` that FreqAI is automating for feature building. As such, a user can use `metadata` inside `feature_engineering_*` functions as criteria for blocking/reserving features for certain timeframes, periods, pairs etc.
|
||||||
|
|
||||||
```python
|
```py
|
||||||
def feature_engineering_expand_all(self, dataframe, period, metadata, **kwargs):
|
def feature_engineering_expand_all(self, dataframe, period, metadata, **kwargs):
|
||||||
if metadata["tf"] == "1h":
|
if metadata["tf"] == "1h":
|
||||||
dataframe["%-roc-period"] = ta.ROC(dataframe, timeperiod=period)
|
dataframe["%-roc-period"] = ta.ROC(dataframe, timeperiod=period)
|
||||||
|
@ -46,7 +46,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
|||||||
| `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset. <br> **Datatype:** Float. <br> Default: `30`.
|
| `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset. <br> **Datatype:** Float. <br> Default: `30`.
|
||||||
| `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it. <br> **Datatype:** Boolean. <br> Default: `False` (no reversal).
|
| `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it. <br> **Datatype:** Boolean. <br> Default: `False` (no reversal).
|
||||||
| `shuffle_after_split` | Split the data into train and test sets, and then shuffle both sets individually. <br> **Datatype:** Boolean. <br> Default: `False`.
|
| `shuffle_after_split` | Split the data into train and test sets, and then shuffle both sets individually. <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||||
| `buffer_train_data_candles` | Cut `buffer_train_data_candles` off the beginning and end of the training data *after* the indicators were populated. The main example use is when predicting maxima and minima, the argrelextrema function cannot know the maxima/minima at the edges of the timerange. To improve model accuracy, it is best to compute argrelextrema on the full timerange and then use this function to cut off the edges (buffer) by the kernel. In another case, if the targets are set to a shifted price movement, this buffer is unnecessary because the shifted candles at the end of the timerange will be NaN and FreqAI will automatically cut those off of the training dataset.<br> **Datatype:** Integer. <br> Default: `0`.
|
| `buffer_train_data_candles` | Cut `buffer_train_data_candles` off the beginning and end of the training data *after* the indicators were populated. The main example use is when predicting maxima and minima, the argrelextrema function cannot know the maxima/minima at the edges of the timerange. To improve model accuracy, it is best to compute argrelextrema on the full timerange and then use this function to cut off the edges (buffer) by the kernel. In another case, if the targets are set to a shifted price movement, this buffer is unnecessary because the shifted candles at the end of the timerange will be NaN and FreqAI will automatically cut those off of the training dataset.<br> **Datatype:** Boolean. <br> Default: `False`.
|
||||||
|
|
||||||
### Data split parameters
|
### Data split parameters
|
||||||
|
|
||||||
@ -84,28 +84,6 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
|||||||
| `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting. <br> **Datatype:** bool. <br> Default: `False`.
|
| `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting. <br> **Datatype:** bool. <br> Default: `False`.
|
||||||
| `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[<shared layers>, dict(vf=[<non-shared value network layers>], pi=[<non-shared policy network layers>])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each.
|
| `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[<shared layers>, dict(vf=[<non-shared value network layers>], pi=[<non-shared policy network layers>])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each.
|
||||||
| `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting. <br> **Datatype:** bool. <br> Default: `False`.
|
| `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting. <br> **Datatype:** bool. <br> Default: `False`.
|
||||||
| `drop_ohlc_from_features` | Do not include the normalized ohlc data in the feature set passed to the agent during training (ohlc will still be used for driving the environment in all cases) <br> **Datatype:** Boolean. <br> **Default:** `False`
|
|
||||||
|
|
||||||
### PyTorch parameters
|
|
||||||
|
|
||||||
#### general
|
|
||||||
|
|
||||||
| Parameter | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| | **Model training parameters within the `freqai.model_training_parameters` sub dictionary**
|
|
||||||
| `learning_rate` | Learning rate to be passed to the optimizer. <br> **Datatype:** float. <br> Default: `3e-4`.
|
|
||||||
| `model_kwargs` | Parameters to be passed to the model class. <br> **Datatype:** dict. <br> Default: `{}`.
|
|
||||||
| `trainer_kwargs` | Parameters to be passed to the trainer class. <br> **Datatype:** dict. <br> Default: `{}`.
|
|
||||||
|
|
||||||
#### trainer_kwargs
|
|
||||||
|
|
||||||
| Parameter | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| | **Model training parameters within the `freqai.model_training_parameters.model_kwargs` sub dictionary**
|
|
||||||
| `max_iters` | The number of training iterations to run. iteration here refers to the number of times we call self.optimizer.step(). used to calculate n_epochs. <br> **Datatype:** int. <br> Default: `100`.
|
|
||||||
| `batch_size` | The size of the batches to use during training.. <br> **Datatype:** int. <br> Default: `64`.
|
|
||||||
| `max_n_eval_batches` | The maximum number batches to use for evaluation.. <br> **Datatype:** int, optional. <br> Default: `None`.
|
|
||||||
|
|
||||||
|
|
||||||
### Additional parameters
|
### Additional parameters
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ where `ReinforcementLearner` will use the templated `ReinforcementLearner` from
|
|||||||
dataframe["&-action"] = 0
|
dataframe["&-action"] = 0
|
||||||
```
|
```
|
||||||
|
|
||||||
Most of the function remains the same as for typical Regressors, however, the function below shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environment:
|
Most of the function remains the same as for typical Regressors, however, the function above shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environment:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def feature_engineering_standard(self, dataframe, **kwargs):
|
def feature_engineering_standard(self, dataframe, **kwargs):
|
||||||
@ -176,11 +176,9 @@ As you begin to modify the strategy and the prediction model, you will quickly r
|
|||||||
|
|
||||||
factor = 100
|
factor = 100
|
||||||
|
|
||||||
pair = self.pair.replace(':', '')
|
|
||||||
|
|
||||||
# you can use feature values from dataframe
|
# you can use feature values from dataframe
|
||||||
# Assumes the shifted RSI indicator has been generated in the strategy.
|
# Assumes the shifted RSI indicator has been generated in the strategy.
|
||||||
rsi_now = self.raw_features[f"%-rsi-period_10_shift-1_{pair}_"
|
rsi_now = self.raw_features[f"%-rsi-period-10_shift-1_{self.pair}_"
|
||||||
f"{self.config['timeframe']}"].iloc[self._current_tick]
|
f"{self.config['timeframe']}"].iloc[self._current_tick]
|
||||||
|
|
||||||
# reward agent for entering trades
|
# reward agent for entering trades
|
||||||
@ -248,13 +246,13 @@ FreqAI also provides a built in episodic summary logger called `self.tensorboard
|
|||||||
"""
|
"""
|
||||||
def calculate_reward(self, action: int) -> float:
|
def calculate_reward(self, action: int) -> float:
|
||||||
if not self._is_valid(action):
|
if not self._is_valid(action):
|
||||||
self.tensorboard_log("invalid")
|
self.tensorboard_log("is_valid")
|
||||||
return -2
|
return -2
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)`. In this case the metric values are not incremented.
|
The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)` would add 0.23 to `float_metric`. In this case you can also disable incrementing using `inc=False` parameter.
|
||||||
|
|
||||||
### Choosing a base environment
|
### Choosing a base environment
|
||||||
|
|
||||||
|
@ -128,9 +128,6 @@ The FreqAI specific parameter `label_period_candles` defines the offset (number
|
|||||||
|
|
||||||
You can choose to adopt a continual learning scheme by setting `"continual_learning": true` in the config. By enabling `continual_learning`, after training an initial model from scratch, subsequent trainings will start from the final model state of the preceding training. This gives the new model a "memory" of the previous state. By default, this is set to `False` which means that all new models are trained from scratch, without input from previous models.
|
You can choose to adopt a continual learning scheme by setting `"continual_learning": true` in the config. By enabling `continual_learning`, after training an initial model from scratch, subsequent trainings will start from the final model state of the preceding training. This gives the new model a "memory" of the previous state. By default, this is set to `False` which means that all new models are trained from scratch, without input from previous models.
|
||||||
|
|
||||||
???+ danger "Continual learning enforces a constant parameter space"
|
|
||||||
Since `continual_learning` means that the model parameter space *cannot* change between trainings, `principal_component_analysis` is automatically disabled when `continual_learning` is enabled. Hint: PCA changes the parameter space and the number of features, learn more about PCA [here](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis).
|
|
||||||
|
|
||||||
## Hyperopt
|
## Hyperopt
|
||||||
|
|
||||||
You can hyperopt using the same command as for [typical Freqtrade hyperopt](hyperopt.md):
|
You can hyperopt using the same command as for [typical Freqtrade hyperopt](hyperopt.md):
|
||||||
|
@ -71,10 +71,6 @@ pip install -r requirements-freqai.txt
|
|||||||
!!! Note
|
!!! Note
|
||||||
Catboost will not be installed on arm devices (raspberry, Mac M1, ARM based VPS, ...), since it does not provide wheels for this platform.
|
Catboost will not be installed on arm devices (raspberry, Mac M1, ARM based VPS, ...), since it does not provide wheels for this platform.
|
||||||
|
|
||||||
!!! Note "python 3.11"
|
|
||||||
Some dependencies (Catboost, Torch) currently don't support python 3.11. Freqtrade therefore only supports python 3.10 for these models/dependencies.
|
|
||||||
Tests involving these dependencies are skipped on 3.11.
|
|
||||||
|
|
||||||
### Usage with docker
|
### Usage with docker
|
||||||
|
|
||||||
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices.
|
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices.
|
||||||
|
@ -149,7 +149,7 @@ The below example assumes a timeframe of 1 hour:
|
|||||||
* Locks each pair after selling for an additional 5 candles (`CooldownPeriod`), giving other pairs a chance to get filled.
|
* Locks each pair after selling for an additional 5 candles (`CooldownPeriod`), giving other pairs a chance to get filled.
|
||||||
* Stops trading for 4 hours (`4 * 1h candles`) if the last 2 days (`48 * 1h candles`) had 20 trades, which caused a max-drawdown of more than 20%. (`MaxDrawdown`).
|
* Stops trading for 4 hours (`4 * 1h candles`) if the last 2 days (`48 * 1h candles`) had 20 trades, which caused a max-drawdown of more than 20%. (`MaxDrawdown`).
|
||||||
* Stops trading if more than 4 stoploss occur for all pairs within a 1 day (`24 * 1h candles`) limit (`StoplossGuard`).
|
* Stops trading if more than 4 stoploss occur for all pairs within a 1 day (`24 * 1h candles`) limit (`StoplossGuard`).
|
||||||
* Locks all pairs that had 2 Trades within the last 6 hours (`6 * 1h candles`) with a combined profit ratio of below 0.02 (<2%) (`LowProfitPairs`).
|
* Locks all pairs that had 4 Trades within the last 6 hours (`6 * 1h candles`) with a combined profit ratio of below 0.02 (<2%) (`LowProfitPairs`).
|
||||||
* Locks all pairs for 2 candles that had a profit of below 0.01 (<1%) within the last 24h (`24 * 1h candles`), a minimum of 4 trades.
|
* Locks all pairs for 2 candles that had a profit of below 0.01 (<1%) within the last 24h (`24 * 1h candles`), a minimum of 4 trades.
|
||||||
|
|
||||||
``` python
|
``` python
|
||||||
|
@ -290,8 +290,10 @@ cd freqtrade
|
|||||||
|
|
||||||
#### Freqtrade install: Conda Environment
|
#### Freqtrade install: Conda Environment
|
||||||
|
|
||||||
|
Prepare conda-freqtrade environment, using file `environment.yml`, which exist in main freqtrade directory
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
conda create --name freqtrade python=3.10
|
conda env create -n freqtrade-conda -f environment.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! Note "Creating Conda Environment"
|
!!! Note "Creating Conda Environment"
|
||||||
@ -300,9 +302,12 @@ conda create --name freqtrade python=3.10
|
|||||||
```bash
|
```bash
|
||||||
# choose your own packages
|
# choose your own packages
|
||||||
conda env create -n [name of the environment] [python version] [packages]
|
conda env create -n [name of the environment] [python version] [packages]
|
||||||
|
|
||||||
|
# point to file with packages
|
||||||
|
conda env create -n [name of the environment] -f [file]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Enter/exit freqtrade environment
|
#### Enter/exit freqtrade-conda environment
|
||||||
|
|
||||||
To check available environments, type
|
To check available environments, type
|
||||||
|
|
||||||
@ -314,7 +319,7 @@ Enter installed environment
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# enter conda environment
|
# enter conda environment
|
||||||
conda activate freqtrade
|
conda activate freqtrade-conda
|
||||||
|
|
||||||
# exit conda environment - don't do it now
|
# exit conda environment - don't do it now
|
||||||
conda deactivate
|
conda deactivate
|
||||||
@ -324,7 +329,6 @@ Install last python dependencies with pip
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
python3 -m pip install --upgrade pip
|
python3 -m pip install --upgrade pip
|
||||||
python3 -m pip install -r requirements.txt
|
|
||||||
python3 -m pip install -e .
|
python3 -m pip install -e .
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -332,7 +336,7 @@ Patch conda libta-lib (Linux only)
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Ensure that the environment is active!
|
# Ensure that the environment is active!
|
||||||
conda activate freqtrade
|
conda activate freqtrade-conda
|
||||||
|
|
||||||
cd build_helpers
|
cd build_helpers
|
||||||
bash install_ta-lib.sh ${CONDA_PREFIX} nosudo
|
bash install_ta-lib.sh ${CONDA_PREFIX} nosudo
|
||||||
@ -351,8 +355,8 @@ conda env list
|
|||||||
# activate base environment
|
# activate base environment
|
||||||
conda activate
|
conda activate
|
||||||
|
|
||||||
# activate freqtrade environment
|
# activate freqtrade-conda environment
|
||||||
conda activate freqtrade
|
conda activate freqtrade-conda
|
||||||
|
|
||||||
#deactivate any conda environments
|
#deactivate any conda environments
|
||||||
conda deactivate
|
conda deactivate
|
||||||
|
@ -42,14 +42,14 @@ Enable subscribing to an instance by adding the `external_message_consumer` sect
|
|||||||
| `producers` | **Required.** List of producers <br> **Datatype:** Array.
|
| `producers` | **Required.** List of producers <br> **Datatype:** Array.
|
||||||
| `producers.name` | **Required.** Name of this producer. This name must be used in calls to `get_producer_pairs()` and `get_producer_df()` if more than one producer is used.<br> **Datatype:** string
|
| `producers.name` | **Required.** Name of this producer. This name must be used in calls to `get_producer_pairs()` and `get_producer_df()` if more than one producer is used.<br> **Datatype:** string
|
||||||
| `producers.host` | **Required.** The hostname or IP address from your producer.<br> **Datatype:** string
|
| `producers.host` | **Required.** The hostname or IP address from your producer.<br> **Datatype:** string
|
||||||
| `producers.port` | **Required.** The port matching the above host.<br>*Defaults to `8080`.*<br> **Datatype:** Integer
|
| `producers.port` | **Required.** The port matching the above host.<br> **Datatype:** string
|
||||||
| `producers.secure` | **Optional.** Use ssl in websockets connection. Default False.<br> **Datatype:** string
|
| `producers.secure` | **Optional.** Use ssl in websockets connection. Default False.<br> **Datatype:** string
|
||||||
| `producers.ws_token` | **Required.** `ws_token` as configured on the producer.<br> **Datatype:** string
|
| `producers.ws_token` | **Required.** `ws_token` as configured on the producer.<br> **Datatype:** string
|
||||||
| | **Optional settings**
|
| | **Optional settings**
|
||||||
| `wait_timeout` | Timeout until we ping again if no message is received. <br>*Defaults to `300`.*<br> **Datatype:** Integer - in seconds.
|
| `wait_timeout` | Timeout until we ping again if no message is received. <br>*Defaults to `300`.*<br> **Datatype:** Integer - in seconds.
|
||||||
| `ping_timeout` | Ping timeout <br>*Defaults to `10`.*<br> **Datatype:** Integer - in seconds.
|
| `wait_timeout` | Ping timeout <br>*Defaults to `10`.*<br> **Datatype:** Integer - in seconds.
|
||||||
| `sleep_time` | Sleep time before retrying to connect.<br>*Defaults to `10`.*<br> **Datatype:** Integer - in seconds.
|
| `sleep_time` | Sleep time before retrying to connect.<br>*Defaults to `10`.*<br> **Datatype:** Integer - in seconds.
|
||||||
| `remove_entry_exit_signals` | Remove signal columns from the dataframe (set them to 0) on dataframe receipt.<br>*Defaults to `False`.*<br> **Datatype:** Boolean.
|
| `remove_entry_exit_signals` | Remove signal columns from the dataframe (set them to 0) on dataframe receipt.<br>*Defaults to `10`.*<br> **Datatype:** Integer - in seconds.
|
||||||
| `message_size_limit` | Size limit per message<br>*Defaults to `8`.*<br> **Datatype:** Integer - Megabytes.
|
| `message_size_limit` | Size limit per message<br>*Defaults to `8`.*<br> **Datatype:** Integer - Megabytes.
|
||||||
|
|
||||||
Instead of (or as well as) calculating indicators in `populate_indicators()` the follower instance listens on the connection to a producer instance's messages (or multiple producer instances in advanced configurations) and requests the producer's most recently analyzed dataframes for each pair in the active whitelist.
|
Instead of (or as well as) calculating indicators in `populate_indicators()` the follower instance listens on the connection to a producer instance's messages (or multiple producer instances in advanced configurations) and requests the producer's most recently analyzed dataframes for each pair in the active whitelist.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
markdown==3.3.7
|
markdown==3.3.7
|
||||||
mkdocs==1.4.2
|
mkdocs==1.4.2
|
||||||
mkdocs-material==9.1.6
|
mkdocs-material==9.0.13
|
||||||
mdx_truly_sane_lists==1.3
|
mdx_truly_sane_lists==1.3
|
||||||
pymdown-extensions==9.11
|
pymdown-extensions==9.9.2
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
|
@ -9,6 +9,9 @@ This same command can also be used to update freqUI, should there be a new relea
|
|||||||
|
|
||||||
Once the bot is started in trade / dry-run mode (with `freqtrade trade`) - the UI will be available under the configured port below (usually `http://127.0.0.1:8080`).
|
Once the bot is started in trade / dry-run mode (with `freqtrade trade`) - the UI will be available under the configured port below (usually `http://127.0.0.1:8080`).
|
||||||
|
|
||||||
|
!!! info "Alpha release"
|
||||||
|
FreqUI is still considered an alpha release - if you encounter bugs or inconsistencies please open a [FreqUI issue](https://github.com/freqtrade/frequi/issues/new/choose).
|
||||||
|
|
||||||
!!! Note "developers"
|
!!! Note "developers"
|
||||||
Developers should not use this method, but instead use the method described in the [freqUI repository](https://github.com/freqtrade/frequi) to get the source-code of freqUI.
|
Developers should not use this method, but instead use the method described in the [freqUI repository](https://github.com/freqtrade/frequi) to get the source-code of freqUI.
|
||||||
|
|
||||||
|
@ -23,22 +23,10 @@ These modes can be configured with these values:
|
|||||||
'stoploss_on_exchange_limit_ratio': 0.99
|
'stoploss_on_exchange_limit_ratio': 0.99
|
||||||
```
|
```
|
||||||
|
|
||||||
Stoploss on exchange is only supported for the following exchanges, and not all exchanges support both stop-limit and stop-market.
|
!!! Note
|
||||||
The Order-type will be ignored if only one mode is available.
|
Stoploss on exchange is only supported for Binance (stop-loss-limit), Huobi (stop-limit), Kraken (stop-loss-market, stop-loss-limit), Gate (stop-limit), and Kucoin (stop-limit and stop-market) as of now.
|
||||||
|
<ins>Do not set too low/tight stoploss value if using stop loss on exchange!</ins>
|
||||||
| Exchange | stop-loss type |
|
If set to low/tight then you have greater risk of missing fill on the order and stoploss will not work.
|
||||||
|----------|-------------|
|
|
||||||
| Binance | limit |
|
|
||||||
| Binance Futures | market, limit |
|
|
||||||
| Huobi | limit |
|
|
||||||
| kraken | market, limit |
|
|
||||||
| Gate | limit |
|
|
||||||
| Okx | limit |
|
|
||||||
| Kucoin | stop-limit, stop-market|
|
|
||||||
|
|
||||||
!!! Note "Tight stoploss"
|
|
||||||
<ins>Do not set too low/tight stoploss value when using stop loss on exchange!</ins>
|
|
||||||
If set to low/tight you will have greater risk of missing fill on the order and stoploss will not work.
|
|
||||||
|
|
||||||
### stoploss_on_exchange and stoploss_on_exchange_limit_ratio
|
### stoploss_on_exchange and stoploss_on_exchange_limit_ratio
|
||||||
|
|
||||||
|
@ -51,8 +51,7 @@ During hyperopt, this runs only once at startup.
|
|||||||
|
|
||||||
## Bot loop start
|
## Bot loop start
|
||||||
|
|
||||||
A simple callback which is called once at the start of every bot throttling iteration in dry/live mode (roughly every 5
|
A simple callback which is called once at the start of every bot throttling iteration (roughly every 5 seconds, unless configured differently).
|
||||||
seconds, unless configured differently) or once per candle in backtest/hyperopt mode.
|
|
||||||
This can be used to perform calculations which are pair independent (apply to all pairs), loading of external data, etc.
|
This can be used to perform calculations which are pair independent (apply to all pairs), loading of external data, etc.
|
||||||
|
|
||||||
``` python
|
``` python
|
||||||
@ -62,12 +61,11 @@ class AwesomeStrategy(IStrategy):
|
|||||||
|
|
||||||
# ... populate_* methods
|
# ... populate_* methods
|
||||||
|
|
||||||
def bot_loop_start(self, current_time: datetime, **kwargs) -> None:
|
def bot_loop_start(self, **kwargs) -> None:
|
||||||
"""
|
"""
|
||||||
Called at the start of the bot iteration (one loop).
|
Called at the start of the bot iteration (one loop).
|
||||||
Might be used to perform pair-independent tasks
|
Might be used to perform pair-independent tasks
|
||||||
(e.g. gather some remote resource for comparison)
|
(e.g. gather some remote resource for comparison)
|
||||||
:param current_time: datetime object, containing the current datetime
|
|
||||||
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
||||||
"""
|
"""
|
||||||
if self.config['runmode'].value in ('live', 'dry_run'):
|
if self.config['runmode'].value in ('live', 'dry_run'):
|
||||||
@ -318,11 +316,11 @@ class AwesomeStrategy(IStrategy):
|
|||||||
|
|
||||||
# evaluate highest to lowest, so that highest possible stop is used
|
# evaluate highest to lowest, so that highest possible stop is used
|
||||||
if current_profit > 0.40:
|
if current_profit > 0.40:
|
||||||
return stoploss_from_open(0.25, current_profit, is_short=trade.is_short, leverage=trade.leverage)
|
return stoploss_from_open(0.25, current_profit, is_short=trade.is_short)
|
||||||
elif current_profit > 0.25:
|
elif current_profit > 0.25:
|
||||||
return stoploss_from_open(0.15, current_profit, is_short=trade.is_short, leverage=trade.leverage)
|
return stoploss_from_open(0.15, current_profit, is_short=trade.is_short)
|
||||||
elif current_profit > 0.20:
|
elif current_profit > 0.20:
|
||||||
return stoploss_from_open(0.07, current_profit, is_short=trade.is_short, leverage=trade.leverage)
|
return stoploss_from_open(0.07, current_profit, is_short=trade.is_short)
|
||||||
|
|
||||||
# return maximum stoploss value, keeping current stoploss price unchanged
|
# return maximum stoploss value, keeping current stoploss price unchanged
|
||||||
return 1
|
return 1
|
||||||
|
@ -881,7 +881,7 @@ All columns of the informative dataframe will be available on the returning data
|
|||||||
|
|
||||||
### *stoploss_from_open()*
|
### *stoploss_from_open()*
|
||||||
|
|
||||||
Stoploss values returned from `custom_stoploss` must specify a percentage relative to `current_rate`, but sometimes you may want to specify a stoploss relative to the entry point instead. `stoploss_from_open()` is a helper function to calculate a stoploss value that can be returned from `custom_stoploss` which will be equivalent to the desired trade profit above the entry point.
|
Stoploss values returned from `custom_stoploss` must specify a percentage relative to `current_rate`, but sometimes you may want to specify a stoploss relative to the open price instead. `stoploss_from_open()` is a helper function to calculate a stoploss value that can be returned from `custom_stoploss` which will be equivalent to the desired percentage above the open price.
|
||||||
|
|
||||||
??? Example "Returning a stoploss relative to the open price from the custom stoploss function"
|
??? Example "Returning a stoploss relative to the open price from the custom stoploss function"
|
||||||
|
|
||||||
@ -889,8 +889,6 @@ Stoploss values returned from `custom_stoploss` must specify a percentage relati
|
|||||||
|
|
||||||
If we want a stop price at 7% above the open price we can call `stoploss_from_open(0.07, current_profit, False)` which will return `0.1157024793`. 11.57% below $121 is $107, which is the same as 7% above $100.
|
If we want a stop price at 7% above the open price we can call `stoploss_from_open(0.07, current_profit, False)` which will return `0.1157024793`. 11.57% below $121 is $107, which is the same as 7% above $100.
|
||||||
|
|
||||||
This function will consider leverage - so at 10x leverage, the actual stoploss would be 0.7% above $100 (0.7% * 10x = 7%).
|
|
||||||
|
|
||||||
|
|
||||||
``` python
|
``` python
|
||||||
|
|
||||||
@ -909,7 +907,7 @@ Stoploss values returned from `custom_stoploss` must specify a percentage relati
|
|||||||
|
|
||||||
# once the profit has risen above 10%, keep the stoploss at 7% above the open price
|
# once the profit has risen above 10%, keep the stoploss at 7% above the open price
|
||||||
if current_profit > 0.10:
|
if current_profit > 0.10:
|
||||||
return stoploss_from_open(0.07, current_profit, is_short=trade.is_short, leverage=trade.leverage)
|
return stoploss_from_open(0.07, current_profit, is_short=trade.is_short)
|
||||||
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
@ -956,14 +954,12 @@ In some situations it may be confusing to deal with stops relative to current ra
|
|||||||
|
|
||||||
## Additional data (Wallets)
|
## Additional data (Wallets)
|
||||||
|
|
||||||
The strategy provides access to the `wallets` object. This contains the current balances on the exchange.
|
The strategy provides access to the `Wallets` object. This contains the current balances on the exchange.
|
||||||
|
|
||||||
!!! Note "Backtesting / Hyperopt"
|
!!! Note
|
||||||
Wallets behaves differently depending on the function it's called.
|
Wallets is not available during backtesting / hyperopt.
|
||||||
Within `populate_*()` methods, it'll return the full wallet as configured.
|
|
||||||
Within [callbacks](strategy-callbacks.md), you'll get the wallet state corresponding to the actual simulated wallet at that point in the simulation process.
|
|
||||||
|
|
||||||
Please always check if `wallets` is available to avoid failures during backtesting.
|
Please always check if `Wallets` is available to avoid failures during backtesting.
|
||||||
|
|
||||||
``` python
|
``` python
|
||||||
if self.wallets:
|
if self.wallets:
|
||||||
@ -1040,10 +1036,11 @@ from datetime import timedelta, datetime, timezone
|
|||||||
|
|
||||||
# Within populate indicators (or populate_buy):
|
# Within populate indicators (or populate_buy):
|
||||||
if self.config['runmode'].value in ('live', 'dry_run'):
|
if self.config['runmode'].value in ('live', 'dry_run'):
|
||||||
# fetch closed trades for the last 2 days
|
# fetch closed trades for the last 2 days
|
||||||
trades = Trade.get_trades_proxy(
|
trades = Trade.get_trades([Trade.pair == metadata['pair'],
|
||||||
pair=metadata['pair'], is_open=False,
|
Trade.open_date > datetime.utcnow() - timedelta(days=2),
|
||||||
open_date=datetime.now(timezone.utc) - timedelta(days=2))
|
Trade.is_open.is_(False),
|
||||||
|
]).all()
|
||||||
# Analyze the conditions you'd like to lock the pair .... will probably be different for every strategy
|
# Analyze the conditions you'd like to lock the pair .... will probably be different for every strategy
|
||||||
sumprofit = sum(trade.close_profit for trade in trades)
|
sumprofit = sum(trade.close_profit for trade in trades)
|
||||||
if sumprofit < 0:
|
if sumprofit < 0:
|
||||||
|
@ -152,7 +152,7 @@ You can create your own keyboard in `config.json`:
|
|||||||
!!! Note "Supported Commands"
|
!!! Note "Supported Commands"
|
||||||
Only the following commands are allowed. Command arguments are not supported!
|
Only the following commands are allowed. Command arguments are not supported!
|
||||||
|
|
||||||
`/start`, `/stop`, `/status`, `/status table`, `/trades`, `/profit`, `/performance`, `/daily`, `/stats`, `/count`, `/locks`, `/balance`, `/stopentry`, `/reload_config`, `/show_config`, `/logs`, `/whitelist`, `/blacklist`, `/edge`, `/help`, `/version`, `/marketdir`
|
`/start`, `/stop`, `/status`, `/status table`, `/trades`, `/profit`, `/performance`, `/daily`, `/stats`, `/count`, `/locks`, `/balance`, `/stopentry`, `/reload_config`, `/show_config`, `/logs`, `/whitelist`, `/blacklist`, `/edge`, `/help`, `/version`
|
||||||
|
|
||||||
## Telegram commands
|
## Telegram commands
|
||||||
|
|
||||||
@ -179,7 +179,6 @@ official commands. You can ask at any moment for help with `/help`.
|
|||||||
| `/count` | Displays number of trades used and available
|
| `/count` | Displays number of trades used and available
|
||||||
| `/locks` | Show currently locked pairs.
|
| `/locks` | Show currently locked pairs.
|
||||||
| `/unlock <pair or lock_id>` | Remove the lock for this pair (or for this lock id).
|
| `/unlock <pair or lock_id>` | Remove the lock for this pair (or for this lock id).
|
||||||
| `/marketdir [long | short | even | none]` | Updates the user managed variable that represents the current market direction. If no direction is provided, the currently set direction will be displayed.
|
|
||||||
| **Modify Trade states** |
|
| **Modify Trade states** |
|
||||||
| `/forceexit <trade_id> | /fx <tradeid>` | Instantly exits the given trade (Ignoring `minimum_roi`).
|
| `/forceexit <trade_id> | /fx <tradeid>` | Instantly exits the given trade (Ignoring `minimum_roi`).
|
||||||
| `/forceexit all | /fx all` | Instantly exits all open trades (Ignoring `minimum_roi`).
|
| `/forceexit all | /fx all` | Instantly exits all open trades (Ignoring `minimum_roi`).
|
||||||
@ -243,7 +242,7 @@ Enter Tag is configurable via Strategy.
|
|||||||
> **Enter Tag:** Awesome Long Signal
|
> **Enter Tag:** Awesome Long Signal
|
||||||
> **Open Rate:** `0.00007489`
|
> **Open Rate:** `0.00007489`
|
||||||
> **Current Rate:** `0.00007489`
|
> **Current Rate:** `0.00007489`
|
||||||
> **Unrealized Profit:** `12.95%`
|
> **Current Profit:** `12.95%`
|
||||||
> **Stoploss:** `0.00007389 (-0.02%)`
|
> **Stoploss:** `0.00007389 (-0.02%)`
|
||||||
|
|
||||||
### /status table
|
### /status table
|
||||||
@ -279,7 +278,6 @@ Return a summary of your profit/loss and performance.
|
|||||||
> ∙ `33.095 EUR`
|
> ∙ `33.095 EUR`
|
||||||
>
|
>
|
||||||
> **Total Trade Count:** `138`
|
> **Total Trade Count:** `138`
|
||||||
> **Bot started:** `2022-07-11 18:40:44`
|
|
||||||
> **First Trade opened:** `3 days ago`
|
> **First Trade opened:** `3 days ago`
|
||||||
> **Latest Trade opened:** `2 minutes ago`
|
> **Latest Trade opened:** `2 minutes ago`
|
||||||
> **Avg. Duration:** `2:33:45`
|
> **Avg. Duration:** `2:33:45`
|
||||||
@ -293,7 +291,6 @@ The relative profit of `15.2 Σ%` is be based on the starting capital - so in th
|
|||||||
Starting capital is either taken from the `available_capital` setting, or calculated by using current wallet size - profits.
|
Starting capital is either taken from the `available_capital` setting, or calculated by using current wallet size - profits.
|
||||||
Profit Factor is calculated as gross profits / gross losses - and should serve as an overall metric for the strategy.
|
Profit Factor is calculated as gross profits / gross losses - and should serve as an overall metric for the strategy.
|
||||||
Max drawdown corresponds to the backtesting metric `Absolute Drawdown (Account)` - calculated as `(Absolute Drawdown) / (DrawdownHigh + startingBalance)`.
|
Max drawdown corresponds to the backtesting metric `Absolute Drawdown (Account)` - calculated as `(Absolute Drawdown) / (DrawdownHigh + startingBalance)`.
|
||||||
Bot started date will refer to the date the bot was first started. For older bots, this will default to the first trade's open date.
|
|
||||||
|
|
||||||
### /forceexit <trade_id>
|
### /forceexit <trade_id>
|
||||||
|
|
||||||
@ -419,27 +416,3 @@ ARDR/ETH 0.366667 0.143059 -0.01
|
|||||||
### /version
|
### /version
|
||||||
|
|
||||||
> **Version:** `0.14.3`
|
> **Version:** `0.14.3`
|
||||||
|
|
||||||
### /marketdir
|
|
||||||
|
|
||||||
If a market direction is provided the command updates the user managed variable that represents the current market direction.
|
|
||||||
This variable is not set to any valid market direction on bot startup and must be set by the user. The example below is for `/marketdir long`:
|
|
||||||
|
|
||||||
```
|
|
||||||
Successfully updated marketdirection from none to long.
|
|
||||||
```
|
|
||||||
|
|
||||||
If no market direction is provided the command outputs the currently set market directions. The example below is for `/marketdir`:
|
|
||||||
|
|
||||||
```
|
|
||||||
Currently set marketdirection: even
|
|
||||||
```
|
|
||||||
|
|
||||||
You can use the market direction in your strategy via `self.market_direction`.
|
|
||||||
|
|
||||||
!!! Warning "Bot restarts"
|
|
||||||
Please note that the market direction is not persisted, and will be reset after a bot restart/reload.
|
|
||||||
|
|
||||||
!!! Danger "Backtesting"
|
|
||||||
As this value/variable is intended to be changed manually in dry/live trading.
|
|
||||||
Strategies using `market_direction` will probably not produce reliable, reproducible results (changes to this variable will not be reflected for backtesting). Use at your own risk.
|
|
||||||
|
@ -955,47 +955,3 @@ Print trades with id 2 and 3 as json
|
|||||||
``` bash
|
``` bash
|
||||||
freqtrade show-trades --db-url sqlite:///tradesv3.sqlite --trade-ids 2 3 --print-json
|
freqtrade show-trades --db-url sqlite:///tradesv3.sqlite --trade-ids 2 3 --print-json
|
||||||
```
|
```
|
||||||
|
|
||||||
### Strategy-Updater
|
|
||||||
|
|
||||||
Updates listed strategies or all strategies within the strategies folder to be v3 compliant.
|
|
||||||
If the command runs without --strategy-list then all strategies inside the strategies folder will be converted.
|
|
||||||
Your original strategy will remain available in the `user_data/strategies_orig_updater/` directory.
|
|
||||||
|
|
||||||
!!! Warning "Conversion results"
|
|
||||||
Strategy updater will work on a "best effort" approach. Please do your due diligence and verify the results of the conversion.
|
|
||||||
We also recommend to run a python formatter (e.g. `black`) to format results in a sane manner.
|
|
||||||
|
|
||||||
```
|
|
||||||
usage: freqtrade strategy-updater [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
|
||||||
[-d PATH] [--userdir PATH]
|
|
||||||
[--strategy-list STRATEGY_LIST [STRATEGY_LIST ...]]
|
|
||||||
|
|
||||||
options:
|
|
||||||
-h, --help show this help message and exit
|
|
||||||
--strategy-list STRATEGY_LIST [STRATEGY_LIST ...]
|
|
||||||
Provide a space-separated list of strategies to
|
|
||||||
backtest. Please note that timeframe needs to be set
|
|
||||||
either in config or via command line. When using this
|
|
||||||
together with `--export trades`, the strategy-name is
|
|
||||||
injected into the filename (so `backtest-data.json`
|
|
||||||
becomes `backtest-data-SampleStrategy.json`
|
|
||||||
|
|
||||||
Common arguments:
|
|
||||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
|
||||||
--logfile FILE, --log-file FILE
|
|
||||||
Log to the file specified. Special values are:
|
|
||||||
'syslog', 'journald'. See the documentation for more
|
|
||||||
details.
|
|
||||||
-V, --version show program's version number and exit
|
|
||||||
-c PATH, --config PATH
|
|
||||||
Specify configuration file (default:
|
|
||||||
`userdir/config.json` or `config.json` whichever
|
|
||||||
exists). Multiple --config options may be used. Can be
|
|
||||||
set to `-` to read config from stdin.
|
|
||||||
-d PATH, --datadir PATH, --data-dir PATH
|
|
||||||
Path to directory with historical backtesting data.
|
|
||||||
--userdir PATH, --user-data-dir PATH
|
|
||||||
Path to userdata directory.
|
|
||||||
|
|
||||||
```
|
|
||||||
|
@ -26,7 +26,7 @@ Install ta-lib according to the [ta-lib documentation](https://github.com/mrjbq7
|
|||||||
|
|
||||||
As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), there is also a repository of unofficial pre-compiled windows Wheels [here](https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib), which need to be downloaded and installed using `pip install TA_Lib-0.4.25-cp38-cp38-win_amd64.whl` (make sure to use the version matching your python version).
|
As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), there is also a repository of unofficial pre-compiled windows Wheels [here](https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib), which need to be downloaded and installed using `pip install TA_Lib-0.4.25-cp38-cp38-win_amd64.whl` (make sure to use the version matching your python version).
|
||||||
|
|
||||||
Freqtrade provides these dependencies for the latest 3 Python versions (3.8, 3.9, 3.10 and 3.11) and for 64bit Windows.
|
Freqtrade provides these dependencies for the latest 3 Python versions (3.8, 3.9 and 3.10) and for 64bit Windows.
|
||||||
Other versions must be downloaded from the above link.
|
Other versions must be downloaded from the above link.
|
||||||
|
|
||||||
``` powershell
|
``` powershell
|
||||||
|
@ -0,0 +1,74 @@
|
|||||||
|
name: freqtrade
|
||||||
|
channels:
|
||||||
|
- conda-forge
|
||||||
|
# - defaults
|
||||||
|
dependencies:
|
||||||
|
# 1/4 req main
|
||||||
|
- python>=3.8,<=3.10
|
||||||
|
- numpy
|
||||||
|
- pandas
|
||||||
|
- pip
|
||||||
|
|
||||||
|
- py-find-1st
|
||||||
|
- aiohttp
|
||||||
|
- SQLAlchemy
|
||||||
|
- python-telegram-bot<20.0.0
|
||||||
|
- arrow
|
||||||
|
- cachetools
|
||||||
|
- requests
|
||||||
|
- urllib3
|
||||||
|
- jsonschema
|
||||||
|
- TA-Lib
|
||||||
|
- tabulate
|
||||||
|
- jinja2
|
||||||
|
- blosc
|
||||||
|
- sdnotify
|
||||||
|
- fastapi
|
||||||
|
- uvicorn
|
||||||
|
- pyjwt
|
||||||
|
- aiofiles
|
||||||
|
- psutil
|
||||||
|
- colorama
|
||||||
|
- questionary
|
||||||
|
- prompt-toolkit
|
||||||
|
- schedule
|
||||||
|
- python-dateutil
|
||||||
|
- joblib
|
||||||
|
- pyarrow
|
||||||
|
|
||||||
|
|
||||||
|
# ============================
|
||||||
|
# 2/4 req dev
|
||||||
|
|
||||||
|
- coveralls
|
||||||
|
- mypy
|
||||||
|
- pytest
|
||||||
|
- pytest-asyncio
|
||||||
|
- pytest-cov
|
||||||
|
- pytest-mock
|
||||||
|
- isort
|
||||||
|
- nbconvert
|
||||||
|
|
||||||
|
# ============================
|
||||||
|
# 3/4 req hyperopt
|
||||||
|
|
||||||
|
- scipy
|
||||||
|
- scikit-learn<1.2.0
|
||||||
|
- filelock
|
||||||
|
- scikit-optimize
|
||||||
|
- progressbar2
|
||||||
|
# ============================
|
||||||
|
# 4/4 req plot
|
||||||
|
|
||||||
|
- plotly
|
||||||
|
- jupyter
|
||||||
|
|
||||||
|
- pip:
|
||||||
|
- pycoingecko
|
||||||
|
# - py_find_1st
|
||||||
|
- tables
|
||||||
|
- pytest-random-order
|
||||||
|
- ccxt
|
||||||
|
- ruff
|
||||||
|
- -e .
|
||||||
|
# - python-rapidjso
|
@ -1,5 +1,5 @@
|
|||||||
""" Freqtrade bot """
|
""" Freqtrade bot """
|
||||||
__version__ = '2023.4.dev'
|
__version__ = '2023.3.dev'
|
||||||
|
|
||||||
if 'dev' in __version__:
|
if 'dev' in __version__:
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
@ -22,6 +22,5 @@ from freqtrade.commands.optimize_commands import (start_backtesting, start_backt
|
|||||||
start_edge, start_hyperopt)
|
start_edge, start_hyperopt)
|
||||||
from freqtrade.commands.pairlist_commands import start_test_pairlist
|
from freqtrade.commands.pairlist_commands import start_test_pairlist
|
||||||
from freqtrade.commands.plot_commands import start_plot_dataframe, start_plot_profit
|
from freqtrade.commands.plot_commands import start_plot_dataframe, start_plot_profit
|
||||||
from freqtrade.commands.strategy_utils_commands import start_strategy_update
|
|
||||||
from freqtrade.commands.trade_commands import start_trading
|
from freqtrade.commands.trade_commands import start_trading
|
||||||
from freqtrade.commands.webserver_commands import start_webserver
|
from freqtrade.commands.webserver_commands import start_webserver
|
||||||
|
@ -40,8 +40,8 @@ def setup_analyze_configuration(args: Dict[str, Any], method: RunMode) -> Dict[s
|
|||||||
|
|
||||||
if (not Path(signals_file).exists()):
|
if (not Path(signals_file).exists()):
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
f"Cannot find latest backtest signals file: {signals_file}."
|
(f"Cannot find latest backtest signals file: {signals_file}."
|
||||||
"Run backtesting with `--export signals`."
|
"Run backtesting with `--export signals`.")
|
||||||
)
|
)
|
||||||
|
|
||||||
return config
|
return config
|
||||||
|
@ -111,13 +111,10 @@ ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason
|
|||||||
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
|
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
|
||||||
"list-markets", "list-pairs", "list-strategies", "list-freqaimodels",
|
"list-markets", "list-pairs", "list-strategies", "list-freqaimodels",
|
||||||
"list-data", "hyperopt-list", "hyperopt-show", "backtest-filter",
|
"list-data", "hyperopt-list", "hyperopt-show", "backtest-filter",
|
||||||
"plot-dataframe", "plot-profit", "show-trades", "trades-to-ohlcv",
|
"plot-dataframe", "plot-profit", "show-trades", "trades-to-ohlcv"]
|
||||||
"strategy-updater"]
|
|
||||||
|
|
||||||
NO_CONF_ALLOWED = ["create-userdir", "list-exchanges", "new-strategy"]
|
NO_CONF_ALLOWED = ["create-userdir", "list-exchanges", "new-strategy"]
|
||||||
|
|
||||||
ARGS_STRATEGY_UTILS = ["strategy_list", "strategy_path", "recursive_strategy_search"]
|
|
||||||
|
|
||||||
|
|
||||||
class Arguments:
|
class Arguments:
|
||||||
"""
|
"""
|
||||||
@ -201,8 +198,8 @@ class Arguments:
|
|||||||
start_list_freqAI_models, start_list_markets,
|
start_list_freqAI_models, start_list_markets,
|
||||||
start_list_strategies, start_list_timeframes,
|
start_list_strategies, start_list_timeframes,
|
||||||
start_new_config, start_new_strategy, start_plot_dataframe,
|
start_new_config, start_new_strategy, start_plot_dataframe,
|
||||||
start_plot_profit, start_show_trades, start_strategy_update,
|
start_plot_profit, start_show_trades, start_test_pairlist,
|
||||||
start_test_pairlist, start_trading, start_webserver)
|
start_trading, start_webserver)
|
||||||
|
|
||||||
subparsers = self.parser.add_subparsers(dest='command',
|
subparsers = self.parser.add_subparsers(dest='command',
|
||||||
# Use custom message when no subhandler is added
|
# Use custom message when no subhandler is added
|
||||||
@ -443,11 +440,3 @@ class Arguments:
|
|||||||
parents=[_common_parser])
|
parents=[_common_parser])
|
||||||
webserver_cmd.set_defaults(func=start_webserver)
|
webserver_cmd.set_defaults(func=start_webserver)
|
||||||
self._build_args(optionlist=ARGS_WEBSERVER, parser=webserver_cmd)
|
self._build_args(optionlist=ARGS_WEBSERVER, parser=webserver_cmd)
|
||||||
|
|
||||||
# Add strategy_updater subcommand
|
|
||||||
strategy_updater_cmd = subparsers.add_parser('strategy-updater',
|
|
||||||
help='updates outdated strategy'
|
|
||||||
'files to the current version',
|
|
||||||
parents=[_common_parser])
|
|
||||||
strategy_updater_cmd.set_defaults(func=start_strategy_update)
|
|
||||||
self._build_args(optionlist=ARGS_STRATEGY_UTILS, parser=strategy_updater_cmd)
|
|
||||||
|
@ -204,14 +204,11 @@ def start_list_data(args: Dict[str, Any]) -> None:
|
|||||||
pair, timeframe, candle_type,
|
pair, timeframe, candle_type,
|
||||||
*dhc.ohlcv_data_min_max(pair, timeframe, candle_type)
|
*dhc.ohlcv_data_min_max(pair, timeframe, candle_type)
|
||||||
) for pair, timeframe, candle_type in paircombs]
|
) for pair, timeframe, candle_type in paircombs]
|
||||||
|
|
||||||
print(tabulate([
|
print(tabulate([
|
||||||
(pair, timeframe, candle_type,
|
(pair, timeframe, candle_type,
|
||||||
start.strftime(DATETIME_PRINT_FORMAT),
|
start.strftime(DATETIME_PRINT_FORMAT),
|
||||||
end.strftime(DATETIME_PRINT_FORMAT))
|
end.strftime(DATETIME_PRINT_FORMAT))
|
||||||
for pair, timeframe, candle_type, start, end in sorted(
|
for pair, timeframe, candle_type, start, end in paircombs1
|
||||||
paircombs1,
|
|
||||||
key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2]))
|
|
||||||
],
|
],
|
||||||
headers=("Pair", "Timeframe", "Type", 'From', 'To'),
|
headers=("Pair", "Timeframe", "Type", 'From', 'To'),
|
||||||
tablefmt='psql', stralign='right'))
|
tablefmt='psql', stralign='right'))
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
from sqlalchemy import func, select
|
from sqlalchemy import func
|
||||||
|
|
||||||
from freqtrade.configuration.config_setup import setup_utils_configuration
|
from freqtrade.configuration.config_setup import setup_utils_configuration
|
||||||
from freqtrade.enums import RunMode
|
from freqtrade.enums import RunMode
|
||||||
@ -20,7 +20,7 @@ def start_convert_db(args: Dict[str, Any]) -> None:
|
|||||||
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
|
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
|
||||||
|
|
||||||
init_db(config['db_url'])
|
init_db(config['db_url'])
|
||||||
session_target = Trade.session
|
session_target = Trade._session
|
||||||
init_db(config['db_url_from'])
|
init_db(config['db_url_from'])
|
||||||
logger.info("Starting db migration.")
|
logger.info("Starting db migration.")
|
||||||
|
|
||||||
@ -36,16 +36,16 @@ def start_convert_db(args: Dict[str, Any]) -> None:
|
|||||||
|
|
||||||
session_target.commit()
|
session_target.commit()
|
||||||
|
|
||||||
for pairlock in PairLock.get_all_locks():
|
for pairlock in PairLock.query:
|
||||||
pairlock_count += 1
|
pairlock_count += 1
|
||||||
make_transient(pairlock)
|
make_transient(pairlock)
|
||||||
session_target.add(pairlock)
|
session_target.add(pairlock)
|
||||||
session_target.commit()
|
session_target.commit()
|
||||||
|
|
||||||
# Update sequences
|
# Update sequences
|
||||||
max_trade_id = session_target.scalar(select(func.max(Trade.id)))
|
max_trade_id = session_target.query(func.max(Trade.id)).scalar()
|
||||||
max_order_id = session_target.scalar(select(func.max(Order.id)))
|
max_order_id = session_target.query(func.max(Order.id)).scalar()
|
||||||
max_pairlock_id = session_target.scalar(select(func.max(PairLock.id)))
|
max_pairlock_id = session_target.query(func.max(PairLock.id)).scalar()
|
||||||
|
|
||||||
set_sequence_ids(session_target.get_bind(),
|
set_sequence_ids(session_target.get_bind(),
|
||||||
trade_id=max_trade_id,
|
trade_id=max_trade_id,
|
||||||
|
@ -1,55 +0,0 @@
|
|||||||
import logging
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any, Dict
|
|
||||||
|
|
||||||
from freqtrade.configuration import setup_utils_configuration
|
|
||||||
from freqtrade.enums import RunMode
|
|
||||||
from freqtrade.resolvers import StrategyResolver
|
|
||||||
from freqtrade.strategy.strategyupdater import StrategyUpdater
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def start_strategy_update(args: Dict[str, Any]) -> None:
|
|
||||||
"""
|
|
||||||
Start the strategy updating script
|
|
||||||
:param args: Cli args from Arguments()
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
|
|
||||||
if sys.version_info == (3, 8): # pragma: no cover
|
|
||||||
sys.exit("Freqtrade strategy updater requires Python version >= 3.9")
|
|
||||||
|
|
||||||
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
|
|
||||||
|
|
||||||
strategy_objs = StrategyResolver.search_all_objects(
|
|
||||||
config, enum_failed=False, recursive=config.get('recursive_strategy_search', False))
|
|
||||||
|
|
||||||
filtered_strategy_objs = []
|
|
||||||
if args['strategy_list']:
|
|
||||||
filtered_strategy_objs = [
|
|
||||||
strategy_obj for strategy_obj in strategy_objs
|
|
||||||
if strategy_obj['name'] in args['strategy_list']
|
|
||||||
]
|
|
||||||
|
|
||||||
else:
|
|
||||||
# Use all available entries.
|
|
||||||
filtered_strategy_objs = strategy_objs
|
|
||||||
|
|
||||||
processed_locations = set()
|
|
||||||
for strategy_obj in filtered_strategy_objs:
|
|
||||||
if strategy_obj['location'] not in processed_locations:
|
|
||||||
processed_locations.add(strategy_obj['location'])
|
|
||||||
start_conversion(strategy_obj, config)
|
|
||||||
|
|
||||||
|
|
||||||
def start_conversion(strategy_obj, config):
|
|
||||||
print(f"Conversion of {Path(strategy_obj['location']).name} started.")
|
|
||||||
instance_strategy_updater = StrategyUpdater()
|
|
||||||
start = time.perf_counter()
|
|
||||||
instance_strategy_updater.start(config, strategy_obj)
|
|
||||||
elapsed = time.perf_counter() - start
|
|
||||||
print(f"Conversion of {Path(strategy_obj['location']).name} took {elapsed:.1f} seconds.")
|
|
@ -27,7 +27,10 @@ def _extend_validator(validator_class):
|
|||||||
if 'default' in subschema:
|
if 'default' in subschema:
|
||||||
instance.setdefault(prop, subschema['default'])
|
instance.setdefault(prop, subschema['default'])
|
||||||
|
|
||||||
yield from validate_properties(validator, properties, instance, schema)
|
for error in validate_properties(
|
||||||
|
validator, properties, instance, schema,
|
||||||
|
):
|
||||||
|
yield error
|
||||||
|
|
||||||
return validators.extend(
|
return validators.extend(
|
||||||
validator_class, {'properties': set_defaults}
|
validator_class, {'properties': set_defaults}
|
||||||
|
@ -58,7 +58,7 @@ def load_config_file(path: str) -> Dict[str, Any]:
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
# Read config from stdin if requested in the options
|
# Read config from stdin if requested in the options
|
||||||
with Path(path).open() if path != '-' else sys.stdin as file:
|
with open(path) if path != '-' else sys.stdin as file:
|
||||||
config = rapidjson.load(file, parse_mode=CONFIG_PARSE_MODE)
|
config = rapidjson.load(file, parse_mode=CONFIG_PARSE_MODE)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
|
@ -116,7 +116,7 @@ class TimeRange:
|
|||||||
:param text: value from --timerange
|
:param text: value from --timerange
|
||||||
:return: Start and End range period
|
:return: Start and End range period
|
||||||
"""
|
"""
|
||||||
if not text:
|
if text is None:
|
||||||
return TimeRange(None, None, 0, 0)
|
return TimeRange(None, None, 0, 0)
|
||||||
syntax = [(r'^-(\d{8})$', (None, 'date')),
|
syntax = [(r'^-(\d{8})$', (None, 'date')),
|
||||||
(r'^(\d{8})-$', ('date', None)),
|
(r'^(\d{8})-$', ('date', None)),
|
||||||
|
@ -36,10 +36,9 @@ AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'ProducerPairList', '
|
|||||||
'AgeFilter', 'OffsetFilter', 'PerformanceFilter',
|
'AgeFilter', 'OffsetFilter', 'PerformanceFilter',
|
||||||
'PrecisionFilter', 'PriceFilter', 'RangeStabilityFilter',
|
'PrecisionFilter', 'PriceFilter', 'RangeStabilityFilter',
|
||||||
'ShuffleFilter', 'SpreadFilter', 'VolatilityFilter']
|
'ShuffleFilter', 'SpreadFilter', 'VolatilityFilter']
|
||||||
AVAILABLE_PROTECTIONS = ['CooldownPeriod',
|
AVAILABLE_PROTECTIONS = ['CooldownPeriod', 'LowProfitPairs', 'MaxDrawdown', 'StoplossGuard']
|
||||||
'LowProfitPairs', 'MaxDrawdown', 'StoplossGuard']
|
AVAILABLE_DATAHANDLERS_TRADES = ['json', 'jsongz', 'hdf5']
|
||||||
AVAILABLE_DATAHANDLERS_TRADES = ['json', 'jsongz', 'hdf5', 'feather']
|
AVAILABLE_DATAHANDLERS = AVAILABLE_DATAHANDLERS_TRADES + ['feather', 'parquet']
|
||||||
AVAILABLE_DATAHANDLERS = AVAILABLE_DATAHANDLERS_TRADES + ['parquet']
|
|
||||||
BACKTEST_BREAKDOWNS = ['day', 'week', 'month']
|
BACKTEST_BREAKDOWNS = ['day', 'week', 'month']
|
||||||
BACKTEST_CACHE_AGE = ['none', 'day', 'week', 'month']
|
BACKTEST_CACHE_AGE = ['none', 'day', 'week', 'month']
|
||||||
BACKTEST_CACHE_DEFAULT = 'day'
|
BACKTEST_CACHE_DEFAULT = 'day'
|
||||||
@ -64,7 +63,6 @@ USERPATH_FREQAIMODELS = 'freqaimodels'
|
|||||||
TELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent']
|
TELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent']
|
||||||
WEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw']
|
WEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw']
|
||||||
FULL_DATAFRAME_THRESHOLD = 100
|
FULL_DATAFRAME_THRESHOLD = 100
|
||||||
CUSTOM_TAG_MAX_LENGTH = 255
|
|
||||||
|
|
||||||
ENV_VAR_PREFIX = 'FREQTRADE__'
|
ENV_VAR_PREFIX = 'FREQTRADE__'
|
||||||
|
|
||||||
@ -590,7 +588,6 @@ CONF_SCHEMA = {
|
|||||||
"rl_config": {
|
"rl_config": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"drop_ohlc_from_features": {"type": "boolean", "default": False},
|
|
||||||
"train_cycles": {"type": "integer"},
|
"train_cycles": {"type": "integer"},
|
||||||
"max_trade_duration_candles": {"type": "integer"},
|
"max_trade_duration_candles": {"type": "integer"},
|
||||||
"add_state_info": {"type": "boolean", "default": False},
|
"add_state_info": {"type": "boolean", "default": False},
|
||||||
@ -599,7 +596,7 @@ CONF_SCHEMA = {
|
|||||||
"model_type": {"type": "string", "default": "PPO"},
|
"model_type": {"type": "string", "default": "PPO"},
|
||||||
"policy_type": {"type": "string", "default": "MlpPolicy"},
|
"policy_type": {"type": "string", "default": "MlpPolicy"},
|
||||||
"net_arch": {"type": "array", "default": [128, 128]},
|
"net_arch": {"type": "array", "default": [128, 128]},
|
||||||
"randomize_starting_position": {"type": "boolean", "default": False},
|
"randomize_startinng_position": {"type": "boolean", "default": False},
|
||||||
"model_reward_parameters": {
|
"model_reward_parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -246,8 +246,14 @@ def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame:
|
|||||||
"""
|
"""
|
||||||
Compatibility support for older backtest data.
|
Compatibility support for older backtest data.
|
||||||
"""
|
"""
|
||||||
df['open_date'] = pd.to_datetime(df['open_date'], utc=True)
|
df['open_date'] = pd.to_datetime(df['open_date'],
|
||||||
df['close_date'] = pd.to_datetime(df['close_date'], utc=True)
|
utc=True,
|
||||||
|
infer_datetime_format=True
|
||||||
|
)
|
||||||
|
df['close_date'] = pd.to_datetime(df['close_date'],
|
||||||
|
utc=True,
|
||||||
|
infer_datetime_format=True
|
||||||
|
)
|
||||||
# Compatibility support for pre short Columns
|
# Compatibility support for pre short Columns
|
||||||
if 'is_short' not in df.columns:
|
if 'is_short' not in df.columns:
|
||||||
df['is_short'] = False
|
df['is_short'] = False
|
||||||
@ -340,7 +346,7 @@ def evaluate_result_multi(results: pd.DataFrame, timeframe: str,
|
|||||||
return df_final[df_final['open_trades'] > max_open_trades]
|
return df_final[df_final['open_trades'] > max_open_trades]
|
||||||
|
|
||||||
|
|
||||||
def trade_list_to_dataframe(trades: Union[List[Trade], List[LocalTrade]]) -> pd.DataFrame:
|
def trade_list_to_dataframe(trades: List[LocalTrade]) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
Convert list of Trade objects to pandas Dataframe
|
Convert list of Trade objects to pandas Dataframe
|
||||||
:param trades: List of trade objects
|
:param trades: List of trade objects
|
||||||
@ -367,7 +373,7 @@ def load_trades_from_db(db_url: str, strategy: Optional[str] = None) -> pd.DataF
|
|||||||
filters = []
|
filters = []
|
||||||
if strategy:
|
if strategy:
|
||||||
filters.append(Trade.strategy == strategy)
|
filters.append(Trade.strategy == strategy)
|
||||||
trades = trade_list_to_dataframe(list(Trade.get_trades(filters).all()))
|
trades = trade_list_to_dataframe(Trade.get_trades(filters).all())
|
||||||
|
|
||||||
return trades
|
return trades
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ def ohlcv_to_dataframe(ohlcv: list, timeframe: str, pair: str, *,
|
|||||||
cols = DEFAULT_DATAFRAME_COLUMNS
|
cols = DEFAULT_DATAFRAME_COLUMNS
|
||||||
df = DataFrame(ohlcv, columns=cols)
|
df = DataFrame(ohlcv, columns=cols)
|
||||||
|
|
||||||
df['date'] = to_datetime(df['date'], unit='ms', utc=True)
|
df['date'] = to_datetime(df['date'], unit='ms', utc=True, infer_datetime_format=True)
|
||||||
|
|
||||||
# Some exchanges return int values for Volume and even for OHLC.
|
# Some exchanges return int values for Volume and even for OHLC.
|
||||||
# Convert them since TA-LIB indicators used in the strategy assume floats
|
# Convert them since TA-LIB indicators used in the strategy assume floats
|
||||||
|
@ -21,7 +21,6 @@ from freqtrade.exchange import Exchange, timeframe_to_seconds
|
|||||||
from freqtrade.exchange.types import OrderBook
|
from freqtrade.exchange.types import OrderBook
|
||||||
from freqtrade.misc import append_candles_to_dataframe
|
from freqtrade.misc import append_candles_to_dataframe
|
||||||
from freqtrade.rpc import RPCManager
|
from freqtrade.rpc import RPCManager
|
||||||
from freqtrade.rpc.rpc_types import RPCAnalyzedDFMsg
|
|
||||||
from freqtrade.util import PeriodicCache
|
from freqtrade.util import PeriodicCache
|
||||||
|
|
||||||
|
|
||||||
@ -119,7 +118,8 @@ class DataProvider:
|
|||||||
:param new_candle: This is a new candle
|
:param new_candle: This is a new candle
|
||||||
"""
|
"""
|
||||||
if self.__rpc:
|
if self.__rpc:
|
||||||
msg: RPCAnalyzedDFMsg = {
|
self.__rpc.send_msg(
|
||||||
|
{
|
||||||
'type': RPCMessageType.ANALYZED_DF,
|
'type': RPCMessageType.ANALYZED_DF,
|
||||||
'data': {
|
'data': {
|
||||||
'key': pair_key,
|
'key': pair_key,
|
||||||
@ -127,7 +127,7 @@ class DataProvider:
|
|||||||
'la': datetime.now(timezone.utc)
|
'la': datetime.now(timezone.utc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.__rpc.send_msg(msg)
|
)
|
||||||
if new_candle:
|
if new_candle:
|
||||||
self.__rpc.send_msg({
|
self.__rpc.send_msg({
|
||||||
'type': RPCMessageType.NEW_CANDLE,
|
'type': RPCMessageType.NEW_CANDLE,
|
||||||
|
@ -24,9 +24,9 @@ def _load_signal_candles(backtest_dir: Path):
|
|||||||
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_signals.pkl")
|
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_signals.pkl")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with scpf.open("rb") as scp:
|
scp = open(scpf, "rb")
|
||||||
signal_candles = joblib.load(scp)
|
signal_candles = joblib.load(scp)
|
||||||
logger.info(f"Loaded signal candles: {str(scpf)}")
|
logger.info(f"Loaded signal candles: {str(scpf)}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("Cannot load signal candles from pickled results: ", e)
|
logger.error("Cannot load signal candles from pickled results: ", e)
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ from typing import Optional
|
|||||||
from pandas import DataFrame, read_feather, to_datetime
|
from pandas import DataFrame, read_feather, to_datetime
|
||||||
|
|
||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS, DEFAULT_TRADES_COLUMNS, TradeList
|
from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS, TradeList
|
||||||
from freqtrade.enums import CandleType
|
from freqtrade.enums import CandleType
|
||||||
|
|
||||||
from .idatahandler import IDataHandler
|
from .idatahandler import IDataHandler
|
||||||
@ -63,7 +63,10 @@ class FeatherDataHandler(IDataHandler):
|
|||||||
pairdata.columns = self._columns
|
pairdata.columns = self._columns
|
||||||
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
|
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
|
||||||
'low': 'float', 'close': 'float', 'volume': 'float'})
|
'low': 'float', 'close': 'float', 'volume': 'float'})
|
||||||
pairdata['date'] = to_datetime(pairdata['date'], unit='ms', utc=True)
|
pairdata['date'] = to_datetime(pairdata['date'],
|
||||||
|
unit='ms',
|
||||||
|
utc=True,
|
||||||
|
infer_datetime_format=True)
|
||||||
return pairdata
|
return pairdata
|
||||||
|
|
||||||
def ohlcv_append(
|
def ohlcv_append(
|
||||||
@ -89,11 +92,12 @@ class FeatherDataHandler(IDataHandler):
|
|||||||
:param data: List of Lists containing trade data,
|
:param data: List of Lists containing trade data,
|
||||||
column sequence as in DEFAULT_TRADES_COLUMNS
|
column sequence as in DEFAULT_TRADES_COLUMNS
|
||||||
"""
|
"""
|
||||||
filename = self._pair_trades_filename(self._datadir, pair)
|
# filename = self._pair_trades_filename(self._datadir, pair)
|
||||||
self.create_dir_if_needed(filename)
|
|
||||||
|
|
||||||
tradesdata = DataFrame(data, columns=DEFAULT_TRADES_COLUMNS)
|
raise NotImplementedError()
|
||||||
tradesdata.to_feather(filename, compression_level=9, compression='lz4')
|
# array = pa.array(data)
|
||||||
|
# array
|
||||||
|
# feather.write_feather(data, filename)
|
||||||
|
|
||||||
def trades_append(self, pair: str, data: TradeList):
|
def trades_append(self, pair: str, data: TradeList):
|
||||||
"""
|
"""
|
||||||
@ -112,13 +116,14 @@ class FeatherDataHandler(IDataHandler):
|
|||||||
:param timerange: Timerange to load trades for - currently not implemented
|
:param timerange: Timerange to load trades for - currently not implemented
|
||||||
:return: List of trades
|
:return: List of trades
|
||||||
"""
|
"""
|
||||||
filename = self._pair_trades_filename(self._datadir, pair)
|
raise NotImplementedError()
|
||||||
if not filename.exists():
|
# filename = self._pair_trades_filename(self._datadir, pair)
|
||||||
return []
|
# tradesdata = misc.file_load_json(filename)
|
||||||
|
|
||||||
tradesdata = read_feather(filename)
|
# if not tradesdata:
|
||||||
|
# return []
|
||||||
|
|
||||||
return tradesdata.values.tolist()
|
# return tradesdata
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _get_file_extension(cls):
|
def _get_file_extension(cls):
|
||||||
|
@ -75,7 +75,10 @@ class JsonDataHandler(IDataHandler):
|
|||||||
return DataFrame(columns=self._columns)
|
return DataFrame(columns=self._columns)
|
||||||
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
|
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
|
||||||
'low': 'float', 'close': 'float', 'volume': 'float'})
|
'low': 'float', 'close': 'float', 'volume': 'float'})
|
||||||
pairdata['date'] = to_datetime(pairdata['date'], unit='ms', utc=True)
|
pairdata['date'] = to_datetime(pairdata['date'],
|
||||||
|
unit='ms',
|
||||||
|
utc=True,
|
||||||
|
infer_datetime_format=True)
|
||||||
return pairdata
|
return pairdata
|
||||||
|
|
||||||
def ohlcv_append(
|
def ohlcv_append(
|
||||||
|
@ -62,7 +62,10 @@ class ParquetDataHandler(IDataHandler):
|
|||||||
pairdata.columns = self._columns
|
pairdata.columns = self._columns
|
||||||
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
|
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
|
||||||
'low': 'float', 'close': 'float', 'volume': 'float'})
|
'low': 'float', 'close': 'float', 'volume': 'float'})
|
||||||
pairdata['date'] = to_datetime(pairdata['date'], unit='ms', utc=True)
|
pairdata['date'] = to_datetime(pairdata['date'],
|
||||||
|
unit='ms',
|
||||||
|
utc=True,
|
||||||
|
infer_datetime_format=True)
|
||||||
return pairdata
|
return pairdata
|
||||||
|
|
||||||
def ohlcv_append(
|
def ohlcv_append(
|
||||||
|
@ -5,7 +5,6 @@ from freqtrade.enums.exitchecktuple import ExitCheckTuple
|
|||||||
from freqtrade.enums.exittype import ExitType
|
from freqtrade.enums.exittype import ExitType
|
||||||
from freqtrade.enums.hyperoptstate import HyperoptState
|
from freqtrade.enums.hyperoptstate import HyperoptState
|
||||||
from freqtrade.enums.marginmode import MarginMode
|
from freqtrade.enums.marginmode import MarginMode
|
||||||
from freqtrade.enums.marketstatetype import MarketDirection
|
|
||||||
from freqtrade.enums.ordertypevalue import OrderTypeValues
|
from freqtrade.enums.ordertypevalue import OrderTypeValues
|
||||||
from freqtrade.enums.pricetype import PriceType
|
from freqtrade.enums.pricetype import PriceType
|
||||||
from freqtrade.enums.rpcmessagetype import NO_ECHO_MESSAGES, RPCMessageType, RPCRequestType
|
from freqtrade.enums.rpcmessagetype import NO_ECHO_MESSAGES, RPCMessageType, RPCRequestType
|
||||||
|
@ -1,15 +0,0 @@
|
|||||||
from enum import Enum
|
|
||||||
|
|
||||||
|
|
||||||
class MarketDirection(Enum):
|
|
||||||
"""
|
|
||||||
Enum for various market directions.
|
|
||||||
"""
|
|
||||||
LONG = "long"
|
|
||||||
SHORT = "short"
|
|
||||||
EVEN = "even"
|
|
||||||
NONE = "none"
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
# convert to string
|
|
||||||
return self.value
|
|
@ -4,7 +4,6 @@ from enum import Enum
|
|||||||
class RPCMessageType(str, Enum):
|
class RPCMessageType(str, Enum):
|
||||||
STATUS = 'status'
|
STATUS = 'status'
|
||||||
WARNING = 'warning'
|
WARNING = 'warning'
|
||||||
EXCEPTION = 'exception'
|
|
||||||
STARTUP = 'startup'
|
STARTUP = 'startup'
|
||||||
|
|
||||||
ENTRY = 'entry'
|
ENTRY = 'entry'
|
||||||
|
@ -8,15 +8,15 @@ from freqtrade.exchange.bitpanda import Bitpanda
|
|||||||
from freqtrade.exchange.bittrex import Bittrex
|
from freqtrade.exchange.bittrex import Bittrex
|
||||||
from freqtrade.exchange.bybit import Bybit
|
from freqtrade.exchange.bybit import Bybit
|
||||||
from freqtrade.exchange.coinbasepro import Coinbasepro
|
from freqtrade.exchange.coinbasepro import Coinbasepro
|
||||||
from freqtrade.exchange.exchange_utils import (ROUND_DOWN, ROUND_UP, amount_to_contract_precision,
|
from freqtrade.exchange.exchange_utils import (amount_to_contract_precision, amount_to_contracts,
|
||||||
amount_to_contracts, amount_to_precision,
|
amount_to_precision, available_exchanges,
|
||||||
available_exchanges, ccxt_exchanges,
|
ccxt_exchanges, contracts_to_amount,
|
||||||
contracts_to_amount, date_minus_candles,
|
date_minus_candles, is_exchange_known_ccxt,
|
||||||
is_exchange_known_ccxt, market_is_active,
|
market_is_active, price_to_precision,
|
||||||
price_to_precision, timeframe_to_minutes,
|
timeframe_to_minutes, timeframe_to_msecs,
|
||||||
timeframe_to_msecs, timeframe_to_next_date,
|
timeframe_to_next_date, timeframe_to_prev_date,
|
||||||
timeframe_to_prev_date, timeframe_to_seconds,
|
timeframe_to_seconds, validate_exchange,
|
||||||
validate_exchange, validate_exchanges)
|
validate_exchanges)
|
||||||
from freqtrade.exchange.gate import Gate
|
from freqtrade.exchange.gate import Gate
|
||||||
from freqtrade.exchange.hitbtc import Hitbtc
|
from freqtrade.exchange.hitbtc import Hitbtc
|
||||||
from freqtrade.exchange.huobi import Huobi
|
from freqtrade.exchange.huobi import Huobi
|
||||||
|
@ -23,7 +23,7 @@ class Binance(Exchange):
|
|||||||
_ft_has: Dict = {
|
_ft_has: Dict = {
|
||||||
"stoploss_on_exchange": True,
|
"stoploss_on_exchange": True,
|
||||||
"stoploss_order_types": {"limit": "stop_loss_limit"},
|
"stoploss_order_types": {"limit": "stop_loss_limit"},
|
||||||
"order_time_in_force": ["GTC", "FOK", "IOC", "PO"],
|
"order_time_in_force": ['GTC', 'FOK', 'IOC'],
|
||||||
"ohlcv_candle_limit": 1000,
|
"ohlcv_candle_limit": 1000,
|
||||||
"trades_pagination": "id",
|
"trades_pagination": "id",
|
||||||
"trades_pagination_arg": "fromId",
|
"trades_pagination_arg": "fromId",
|
||||||
@ -31,7 +31,6 @@ class Binance(Exchange):
|
|||||||
}
|
}
|
||||||
_ft_has_futures: Dict = {
|
_ft_has_futures: Dict = {
|
||||||
"stoploss_order_types": {"limit": "stop", "market": "stop_market"},
|
"stoploss_order_types": {"limit": "stop", "market": "stop_market"},
|
||||||
"order_time_in_force": ["GTC", "FOK", "IOC"],
|
|
||||||
"tickers_have_price": False,
|
"tickers_have_price": False,
|
||||||
"floor_leverage": True,
|
"floor_leverage": True,
|
||||||
"stop_price_type_field": "workingType",
|
"stop_price_type_field": "workingType",
|
||||||
@ -196,7 +195,7 @@ class Binance(Exchange):
|
|||||||
leverage_tiers_path = (
|
leverage_tiers_path = (
|
||||||
Path(__file__).parent / 'binance_leverage_tiers.json'
|
Path(__file__).parent / 'binance_leverage_tiers.json'
|
||||||
)
|
)
|
||||||
with leverage_tiers_path.open() as json_file:
|
with open(leverage_tiers_path) as json_file:
|
||||||
return json_load(json_file)
|
return json_load(json_file)
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -27,10 +27,11 @@ class Bybit(Exchange):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
_ft_has: Dict = {
|
_ft_has: Dict = {
|
||||||
"ohlcv_candle_limit": 200,
|
"ohlcv_candle_limit": 1000,
|
||||||
"ohlcv_has_history": False,
|
"ohlcv_has_history": False,
|
||||||
}
|
}
|
||||||
_ft_has_futures: Dict = {
|
_ft_has_futures: Dict = {
|
||||||
|
"ohlcv_candle_limit": 200,
|
||||||
"ohlcv_has_history": True,
|
"ohlcv_has_history": True,
|
||||||
"mark_ohlcv_timeframe": "4h",
|
"mark_ohlcv_timeframe": "4h",
|
||||||
"funding_fee_timeframe": "8h",
|
"funding_fee_timeframe": "8h",
|
||||||
@ -114,7 +115,7 @@ class Bybit(Exchange):
|
|||||||
data = [[x['timestamp'], x['fundingRate'], 0, 0, 0, 0] for x in data]
|
data = [[x['timestamp'], x['fundingRate'], 0, 0, 0, 0] for x in data]
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False):
|
def _lev_prep(self, pair: str, leverage: float, side: BuySell):
|
||||||
if self.trading_mode != TradingMode.SPOT:
|
if self.trading_mode != TradingMode.SPOT:
|
||||||
params = {'leverage': leverage}
|
params = {'leverage': leverage}
|
||||||
self.set_margin_mode(pair, self.margin_mode, accept_fail=True, params=params)
|
self.set_margin_mode(pair, self.margin_mode, accept_fail=True, params=params)
|
||||||
|
@ -30,14 +30,13 @@ from freqtrade.exceptions import (DDosProtection, ExchangeError, InsufficientFun
|
|||||||
RetryableOrderError, TemporaryError)
|
RetryableOrderError, TemporaryError)
|
||||||
from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, remove_credentials, retrier,
|
from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, remove_credentials, retrier,
|
||||||
retrier_async)
|
retrier_async)
|
||||||
from freqtrade.exchange.exchange_utils import (ROUND, ROUND_DOWN, ROUND_UP, CcxtModuleType,
|
from freqtrade.exchange.exchange_utils import (CcxtModuleType, amount_to_contract_precision,
|
||||||
amount_to_contract_precision, amount_to_contracts,
|
amount_to_contracts, amount_to_precision,
|
||||||
amount_to_precision, contracts_to_amount,
|
contracts_to_amount, date_minus_candles,
|
||||||
date_minus_candles, is_exchange_known_ccxt,
|
is_exchange_known_ccxt, market_is_active,
|
||||||
market_is_active, price_to_precision,
|
price_to_precision, timeframe_to_minutes,
|
||||||
timeframe_to_minutes, timeframe_to_msecs,
|
timeframe_to_msecs, timeframe_to_next_date,
|
||||||
timeframe_to_next_date, timeframe_to_prev_date,
|
timeframe_to_prev_date, timeframe_to_seconds)
|
||||||
timeframe_to_seconds)
|
|
||||||
from freqtrade.exchange.types import OHLCVResponse, OrderBook, Ticker, Tickers
|
from freqtrade.exchange.types import OHLCVResponse, OrderBook, Ticker, Tickers
|
||||||
from freqtrade.misc import (chunks, deep_merge_dicts, file_dump_json, file_load_json,
|
from freqtrade.misc import (chunks, deep_merge_dicts, file_dump_json, file_load_json,
|
||||||
safe_value_fallback2)
|
safe_value_fallback2)
|
||||||
@ -60,8 +59,8 @@ class Exchange:
|
|||||||
# or by specifying them in the configuration.
|
# or by specifying them in the configuration.
|
||||||
_ft_has_default: Dict = {
|
_ft_has_default: Dict = {
|
||||||
"stoploss_on_exchange": False,
|
"stoploss_on_exchange": False,
|
||||||
"stop_price_param": "stopPrice",
|
|
||||||
"order_time_in_force": ["GTC"],
|
"order_time_in_force": ["GTC"],
|
||||||
|
"time_in_force_parameter": "timeInForce",
|
||||||
"ohlcv_params": {},
|
"ohlcv_params": {},
|
||||||
"ohlcv_candle_limit": 500,
|
"ohlcv_candle_limit": 500,
|
||||||
"ohlcv_has_history": True, # Some exchanges (Kraken) don't provide history via ohlcv
|
"ohlcv_has_history": True, # Some exchanges (Kraken) don't provide history via ohlcv
|
||||||
@ -70,7 +69,6 @@ class Exchange:
|
|||||||
# Check https://github.com/ccxt/ccxt/issues/10767 for removal of ohlcv_volume_currency
|
# Check https://github.com/ccxt/ccxt/issues/10767 for removal of ohlcv_volume_currency
|
||||||
"ohlcv_volume_currency": "base", # "base" or "quote"
|
"ohlcv_volume_currency": "base", # "base" or "quote"
|
||||||
"tickers_have_quoteVolume": True,
|
"tickers_have_quoteVolume": True,
|
||||||
"tickers_have_bid_ask": True, # bid / ask empty for fetch_tickers
|
|
||||||
"tickers_have_price": True,
|
"tickers_have_price": True,
|
||||||
"trades_pagination": "time", # Possible are "time" or "id"
|
"trades_pagination": "time", # Possible are "time" or "id"
|
||||||
"trades_pagination_arg": "since",
|
"trades_pagination_arg": "since",
|
||||||
@ -82,8 +80,6 @@ class Exchange:
|
|||||||
"fee_cost_in_contracts": False, # Fee cost needs contract conversion
|
"fee_cost_in_contracts": False, # Fee cost needs contract conversion
|
||||||
"needs_trading_fees": False, # use fetch_trading_fees to cache fees
|
"needs_trading_fees": False, # use fetch_trading_fees to cache fees
|
||||||
"order_props_in_contracts": ['amount', 'cost', 'filled', 'remaining'],
|
"order_props_in_contracts": ['amount', 'cost', 'filled', 'remaining'],
|
||||||
# Override createMarketBuyOrderRequiresPrice where ccxt has it wrong
|
|
||||||
"marketOrderRequiresPrice": False,
|
|
||||||
}
|
}
|
||||||
_ft_has: Dict = {}
|
_ft_has: Dict = {}
|
||||||
_ft_has_futures: Dict = {}
|
_ft_has_futures: Dict = {}
|
||||||
@ -209,8 +205,6 @@ class Exchange:
|
|||||||
and self._api_async.session):
|
and self._api_async.session):
|
||||||
logger.debug("Closing async ccxt session.")
|
logger.debug("Closing async ccxt session.")
|
||||||
self.loop.run_until_complete(self._api_async.close())
|
self.loop.run_until_complete(self._api_async.close())
|
||||||
if self.loop and not self.loop.is_closed():
|
|
||||||
self.loop.close()
|
|
||||||
|
|
||||||
def validate_config(self, config):
|
def validate_config(self, config):
|
||||||
# Check if timeframe is available
|
# Check if timeframe is available
|
||||||
@ -736,14 +730,12 @@ class Exchange:
|
|||||||
"""
|
"""
|
||||||
return amount_to_precision(amount, self.get_precision_amount(pair), self.precisionMode)
|
return amount_to_precision(amount, self.get_precision_amount(pair), self.precisionMode)
|
||||||
|
|
||||||
def price_to_precision(self, pair: str, price: float, *, rounding_mode: int = ROUND) -> float:
|
def price_to_precision(self, pair: str, price: float) -> float:
|
||||||
"""
|
"""
|
||||||
Returns the price rounded to the precision the Exchange accepts.
|
Returns the price rounded up to the precision the Exchange accepts.
|
||||||
The default price_rounding_mode in conf is ROUND.
|
Rounds up
|
||||||
For stoploss calculations, must use ROUND_UP for longs, and ROUND_DOWN for shorts.
|
|
||||||
"""
|
"""
|
||||||
return price_to_precision(price, self.get_precision_price(pair),
|
return price_to_precision(price, self.get_precision_price(pair), self.precisionMode)
|
||||||
self.precisionMode, rounding_mode=rounding_mode)
|
|
||||||
|
|
||||||
def price_get_one_pip(self, pair: str, price: float) -> float:
|
def price_get_one_pip(self, pair: str, price: float) -> float:
|
||||||
"""
|
"""
|
||||||
@ -766,12 +758,12 @@ class Exchange:
|
|||||||
return self._get_stake_amount_limit(pair, price, stoploss, 'min', leverage)
|
return self._get_stake_amount_limit(pair, price, stoploss, 'min', leverage)
|
||||||
|
|
||||||
def get_max_pair_stake_amount(self, pair: str, price: float, leverage: float = 1.0) -> float:
|
def get_max_pair_stake_amount(self, pair: str, price: float, leverage: float = 1.0) -> float:
|
||||||
max_stake_amount = self._get_stake_amount_limit(pair, price, 0.0, 'max', leverage)
|
max_stake_amount = self._get_stake_amount_limit(pair, price, 0.0, 'max')
|
||||||
if max_stake_amount is None:
|
if max_stake_amount is None:
|
||||||
# * Should never be executed
|
# * Should never be executed
|
||||||
raise OperationalException(f'{self.name}.get_max_pair_stake_amount should'
|
raise OperationalException(f'{self.name}.get_max_pair_stake_amount should'
|
||||||
'never set max_stake_amount to None')
|
'never set max_stake_amount to None')
|
||||||
return max_stake_amount
|
return max_stake_amount / leverage
|
||||||
|
|
||||||
def _get_stake_amount_limit(
|
def _get_stake_amount_limit(
|
||||||
self,
|
self,
|
||||||
@ -789,41 +781,43 @@ class Exchange:
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
raise ValueError(f"Can't get market information for symbol {pair}")
|
raise ValueError(f"Can't get market information for symbol {pair}")
|
||||||
|
|
||||||
if isMin:
|
|
||||||
# reserve some percent defined in config (5% default) + stoploss
|
|
||||||
margin_reserve: float = 1.0 + self._config.get('amount_reserve_percent',
|
|
||||||
DEFAULT_AMOUNT_RESERVE_PERCENT)
|
|
||||||
stoploss_reserve = (
|
|
||||||
margin_reserve / (1 - abs(stoploss)) if abs(stoploss) != 1 else 1.5
|
|
||||||
)
|
|
||||||
# it should not be more than 50%
|
|
||||||
stoploss_reserve = max(min(stoploss_reserve, 1.5), 1)
|
|
||||||
else:
|
|
||||||
margin_reserve = 1.0
|
|
||||||
stoploss_reserve = 1.0
|
|
||||||
|
|
||||||
stake_limits = []
|
stake_limits = []
|
||||||
limits = market['limits']
|
limits = market['limits']
|
||||||
if (limits['cost'][limit] is not None):
|
if (limits['cost'][limit] is not None):
|
||||||
stake_limits.append(
|
stake_limits.append(
|
||||||
self._contracts_to_amount(pair, limits['cost'][limit]) * stoploss_reserve
|
self._contracts_to_amount(
|
||||||
|
pair,
|
||||||
|
limits['cost'][limit]
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if (limits['amount'][limit] is not None):
|
if (limits['amount'][limit] is not None):
|
||||||
stake_limits.append(
|
stake_limits.append(
|
||||||
self._contracts_to_amount(pair, limits['amount'][limit]) * price * margin_reserve
|
self._contracts_to_amount(
|
||||||
|
pair,
|
||||||
|
limits['amount'][limit] * price
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if not stake_limits:
|
if not stake_limits:
|
||||||
return None if isMin else float('inf')
|
return None if isMin else float('inf')
|
||||||
|
|
||||||
|
# reserve some percent defined in config (5% default) + stoploss
|
||||||
|
amount_reserve_percent = 1.0 + self._config.get('amount_reserve_percent',
|
||||||
|
DEFAULT_AMOUNT_RESERVE_PERCENT)
|
||||||
|
amount_reserve_percent = (
|
||||||
|
amount_reserve_percent / (1 - abs(stoploss)) if abs(stoploss) != 1 else 1.5
|
||||||
|
)
|
||||||
|
# it should not be more than 50%
|
||||||
|
amount_reserve_percent = max(min(amount_reserve_percent, 1.5), 1)
|
||||||
|
|
||||||
# The value returned should satisfy both limits: for amount (base currency) and
|
# The value returned should satisfy both limits: for amount (base currency) and
|
||||||
# for cost (quote, stake currency), so max() is used here.
|
# for cost (quote, stake currency), so max() is used here.
|
||||||
# See also #2575 at github.
|
# See also #2575 at github.
|
||||||
return self._get_stake_amount_considering_leverage(
|
return self._get_stake_amount_considering_leverage(
|
||||||
max(stake_limits) if isMin else min(stake_limits),
|
max(stake_limits) * amount_reserve_percent,
|
||||||
leverage or 1.0
|
leverage or 1.0
|
||||||
)
|
) if isMin else min(stake_limits)
|
||||||
|
|
||||||
def _get_stake_amount_considering_leverage(self, stake_amount: float, leverage: float) -> float:
|
def _get_stake_amount_considering_leverage(self, stake_amount: float, leverage: float) -> float:
|
||||||
"""
|
"""
|
||||||
@ -1024,10 +1018,10 @@ class Exchange:
|
|||||||
|
|
||||||
# Order handling
|
# Order handling
|
||||||
|
|
||||||
def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False):
|
def _lev_prep(self, pair: str, leverage: float, side: BuySell):
|
||||||
if self.trading_mode != TradingMode.SPOT:
|
if self.trading_mode != TradingMode.SPOT:
|
||||||
self.set_margin_mode(pair, self.margin_mode, accept_fail)
|
self.set_margin_mode(pair, self.margin_mode)
|
||||||
self._set_leverage(leverage, pair, accept_fail)
|
self._set_leverage(leverage, pair)
|
||||||
|
|
||||||
def _get_params(
|
def _get_params(
|
||||||
self,
|
self,
|
||||||
@ -1039,18 +1033,12 @@ class Exchange:
|
|||||||
) -> Dict:
|
) -> Dict:
|
||||||
params = self._params.copy()
|
params = self._params.copy()
|
||||||
if time_in_force != 'GTC' and ordertype != 'market':
|
if time_in_force != 'GTC' and ordertype != 'market':
|
||||||
params.update({'timeInForce': time_in_force.upper()})
|
param = self._ft_has.get('time_in_force_parameter', '')
|
||||||
|
params.update({param: time_in_force.upper()})
|
||||||
if reduceOnly:
|
if reduceOnly:
|
||||||
params.update({'reduceOnly': True})
|
params.update({'reduceOnly': True})
|
||||||
return params
|
return params
|
||||||
|
|
||||||
def _order_needs_price(self, ordertype: str) -> bool:
|
|
||||||
return (
|
|
||||||
ordertype != 'market'
|
|
||||||
or self._api.options.get("createMarketBuyOrderRequiresPrice", False)
|
|
||||||
or self._ft_has.get('marketOrderRequiresPrice', False)
|
|
||||||
)
|
|
||||||
|
|
||||||
def create_order(
|
def create_order(
|
||||||
self,
|
self,
|
||||||
*,
|
*,
|
||||||
@ -1073,7 +1061,8 @@ class Exchange:
|
|||||||
try:
|
try:
|
||||||
# Set the precision for amount and price(rate) as accepted by the exchange
|
# Set the precision for amount and price(rate) as accepted by the exchange
|
||||||
amount = self.amount_to_precision(pair, self._amount_to_contracts(pair, amount))
|
amount = self.amount_to_precision(pair, self._amount_to_contracts(pair, amount))
|
||||||
needs_price = self._order_needs_price(ordertype)
|
needs_price = (ordertype != 'market'
|
||||||
|
or self._api.options.get("createMarketBuyOrderRequiresPrice", False))
|
||||||
rate_for_order = self.price_to_precision(pair, rate) if needs_price else None
|
rate_for_order = self.price_to_precision(pair, rate) if needs_price else None
|
||||||
|
|
||||||
if not reduceOnly:
|
if not reduceOnly:
|
||||||
@ -1097,7 +1086,7 @@ class Exchange:
|
|||||||
f'Tried to {side} amount {amount} at rate {rate}.'
|
f'Tried to {side} amount {amount} at rate {rate}.'
|
||||||
f'Message: {e}') from e
|
f'Message: {e}') from e
|
||||||
except ccxt.InvalidOrder as e:
|
except ccxt.InvalidOrder as e:
|
||||||
raise InvalidOrderException(
|
raise ExchangeError(
|
||||||
f'Could not create {ordertype} {side} order on market {pair}. '
|
f'Could not create {ordertype} {side} order on market {pair}. '
|
||||||
f'Tried to {side} amount {amount} at rate {rate}. '
|
f'Tried to {side} amount {amount} at rate {rate}. '
|
||||||
f'Message: {e}') from e
|
f'Message: {e}') from e
|
||||||
@ -1116,11 +1105,11 @@ class Exchange:
|
|||||||
"""
|
"""
|
||||||
if not self._ft_has.get('stoploss_on_exchange'):
|
if not self._ft_has.get('stoploss_on_exchange'):
|
||||||
raise OperationalException(f"stoploss is not implemented for {self.name}.")
|
raise OperationalException(f"stoploss is not implemented for {self.name}.")
|
||||||
price_param = self._ft_has['stop_price_param']
|
|
||||||
return (
|
return (
|
||||||
order.get(price_param, None) is None
|
order.get('stopPrice', None) is None
|
||||||
or ((side == "sell" and stop_loss > float(order[price_param])) or
|
or ((side == "sell" and stop_loss > float(order['stopPrice'])) or
|
||||||
(side == "buy" and stop_loss < float(order[price_param])))
|
(side == "buy" and stop_loss < float(order['stopPrice'])))
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_stop_order_type(self, user_order_type) -> Tuple[str, str]:
|
def _get_stop_order_type(self, user_order_type) -> Tuple[str, str]:
|
||||||
@ -1147,21 +1136,14 @@ class Exchange:
|
|||||||
"sell" else (stop_price >= limit_rate))
|
"sell" else (stop_price >= limit_rate))
|
||||||
# Ensure rate is less than stop price
|
# Ensure rate is less than stop price
|
||||||
if bad_stop_price:
|
if bad_stop_price:
|
||||||
# This can for example happen if the stop / liquidation price is set to 0
|
raise OperationalException(
|
||||||
# Which is possible if a market-order closes right away.
|
'In stoploss limit order, stop price should be more than limit price')
|
||||||
# The InvalidOrderException will bubble up to exit_positions, where it will be
|
|
||||||
# handled gracefully.
|
|
||||||
raise InvalidOrderException(
|
|
||||||
"In stoploss limit order, stop price should be more than limit price. "
|
|
||||||
f"Stop price: {stop_price}, Limit price: {limit_rate}, "
|
|
||||||
f"Limit Price pct: {limit_price_pct}"
|
|
||||||
)
|
|
||||||
return limit_rate
|
return limit_rate
|
||||||
|
|
||||||
def _get_stop_params(self, side: BuySell, ordertype: str, stop_price: float) -> Dict:
|
def _get_stop_params(self, side: BuySell, ordertype: str, stop_price: float) -> Dict:
|
||||||
params = self._params.copy()
|
params = self._params.copy()
|
||||||
# Verify if stopPrice works for your exchange, else configure stop_price_param
|
# Verify if stopPrice works for your exchange!
|
||||||
params.update({self._ft_has['stop_price_param']: stop_price})
|
params.update({'stopPrice': stop_price})
|
||||||
return params
|
return params
|
||||||
|
|
||||||
@retrier(retries=0)
|
@retrier(retries=0)
|
||||||
@ -1187,12 +1169,12 @@ class Exchange:
|
|||||||
|
|
||||||
user_order_type = order_types.get('stoploss', 'market')
|
user_order_type = order_types.get('stoploss', 'market')
|
||||||
ordertype, user_order_type = self._get_stop_order_type(user_order_type)
|
ordertype, user_order_type = self._get_stop_order_type(user_order_type)
|
||||||
round_mode = ROUND_DOWN if side == 'buy' else ROUND_UP
|
|
||||||
stop_price_norm = self.price_to_precision(pair, stop_price, rounding_mode=round_mode)
|
stop_price_norm = self.price_to_precision(pair, stop_price)
|
||||||
limit_rate = None
|
limit_rate = None
|
||||||
if user_order_type == 'limit':
|
if user_order_type == 'limit':
|
||||||
limit_rate = self._get_stop_limit_rate(stop_price, order_types, side)
|
limit_rate = self._get_stop_limit_rate(stop_price, order_types, side)
|
||||||
limit_rate = self.price_to_precision(pair, limit_rate, rounding_mode=round_mode)
|
limit_rate = self.price_to_precision(pair, limit_rate)
|
||||||
|
|
||||||
if self._config['dry_run']:
|
if self._config['dry_run']:
|
||||||
dry_order = self.create_dry_run_order(
|
dry_order = self.create_dry_run_order(
|
||||||
@ -1218,7 +1200,7 @@ class Exchange:
|
|||||||
|
|
||||||
amount = self.amount_to_precision(pair, self._amount_to_contracts(pair, amount))
|
amount = self.amount_to_precision(pair, self._amount_to_contracts(pair, amount))
|
||||||
|
|
||||||
self._lev_prep(pair, leverage, side, accept_fail=True)
|
self._lev_prep(pair, leverage, side)
|
||||||
order = self._api.create_order(symbol=pair, type=ordertype, side=side,
|
order = self._api.create_order(symbol=pair, type=ordertype, side=side,
|
||||||
amount=amount, price=limit_rate, params=params)
|
amount=amount, price=limit_rate, params=params)
|
||||||
self._log_exchange_response('create_stoploss_order', order)
|
self._log_exchange_response('create_stoploss_order', order)
|
||||||
@ -2543,6 +2525,7 @@ class Exchange:
|
|||||||
self,
|
self,
|
||||||
leverage: float,
|
leverage: float,
|
||||||
pair: Optional[str] = None,
|
pair: Optional[str] = None,
|
||||||
|
trading_mode: Optional[TradingMode] = None,
|
||||||
accept_fail: bool = False,
|
accept_fail: bool = False,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
@ -2560,7 +2543,7 @@ class Exchange:
|
|||||||
self._log_exchange_response('set_leverage', res)
|
self._log_exchange_response('set_leverage', res)
|
||||||
except ccxt.DDoSProtection as e:
|
except ccxt.DDoSProtection as e:
|
||||||
raise DDosProtection(e) from e
|
raise DDosProtection(e) from e
|
||||||
except (ccxt.BadRequest, ccxt.InsufficientFunds) as e:
|
except ccxt.BadRequest as e:
|
||||||
if not accept_fail:
|
if not accept_fail:
|
||||||
raise TemporaryError(
|
raise TemporaryError(
|
||||||
f'Could not set leverage due to {e.__class__.__name__}. Message: {e}') from e
|
f'Could not set leverage due to {e.__class__.__name__}. Message: {e}') from e
|
||||||
@ -2771,10 +2754,10 @@ class Exchange:
|
|||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
f"{self.name} does not support {self.margin_mode} {self.trading_mode}")
|
f"{self.name} does not support {self.margin_mode} {self.trading_mode}")
|
||||||
|
|
||||||
liquidation_price = None
|
isolated_liq = None
|
||||||
if self._config['dry_run'] or not self.exchange_has("fetchPositions"):
|
if self._config['dry_run'] or not self.exchange_has("fetchPositions"):
|
||||||
|
|
||||||
liquidation_price = self.dry_run_liquidation_price(
|
isolated_liq = self.dry_run_liquidation_price(
|
||||||
pair=pair,
|
pair=pair,
|
||||||
open_rate=open_rate,
|
open_rate=open_rate,
|
||||||
is_short=is_short,
|
is_short=is_short,
|
||||||
@ -2789,16 +2772,16 @@ class Exchange:
|
|||||||
positions = self.fetch_positions(pair)
|
positions = self.fetch_positions(pair)
|
||||||
if len(positions) > 0:
|
if len(positions) > 0:
|
||||||
pos = positions[0]
|
pos = positions[0]
|
||||||
liquidation_price = pos['liquidationPrice']
|
isolated_liq = pos['liquidationPrice']
|
||||||
|
|
||||||
if liquidation_price is not None:
|
if isolated_liq is not None:
|
||||||
buffer_amount = abs(open_rate - liquidation_price) * self.liquidation_buffer
|
buffer_amount = abs(open_rate - isolated_liq) * self.liquidation_buffer
|
||||||
liquidation_price_buffer = (
|
isolated_liq = (
|
||||||
liquidation_price - buffer_amount
|
isolated_liq - buffer_amount
|
||||||
if is_short else
|
if is_short else
|
||||||
liquidation_price + buffer_amount
|
isolated_liq + buffer_amount
|
||||||
)
|
)
|
||||||
return max(liquidation_price_buffer, 0.0)
|
return isolated_liq
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -2,12 +2,11 @@
|
|||||||
Exchange support utils
|
Exchange support utils
|
||||||
"""
|
"""
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from math import ceil, floor
|
from math import ceil
|
||||||
from typing import Any, Dict, List, Optional, Tuple
|
from typing import Any, Dict, List, Optional, Tuple
|
||||||
|
|
||||||
import ccxt
|
import ccxt
|
||||||
from ccxt import (DECIMAL_PLACES, ROUND, ROUND_DOWN, ROUND_UP, SIGNIFICANT_DIGITS, TICK_SIZE,
|
from ccxt import ROUND_DOWN, ROUND_UP, TICK_SIZE, TRUNCATE, decimal_to_precision
|
||||||
TRUNCATE, decimal_to_precision)
|
|
||||||
|
|
||||||
from freqtrade.exchange.common import BAD_EXCHANGES, EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED
|
from freqtrade.exchange.common import BAD_EXCHANGES, EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED
|
||||||
from freqtrade.util import FtPrecise
|
from freqtrade.util import FtPrecise
|
||||||
@ -220,51 +219,35 @@ def amount_to_contract_precision(
|
|||||||
return amount
|
return amount
|
||||||
|
|
||||||
|
|
||||||
def price_to_precision(
|
def price_to_precision(price: float, price_precision: Optional[float],
|
||||||
price: float,
|
precisionMode: Optional[int]) -> float:
|
||||||
price_precision: Optional[float],
|
|
||||||
precisionMode: Optional[int],
|
|
||||||
*,
|
|
||||||
rounding_mode: int = ROUND,
|
|
||||||
) -> float:
|
|
||||||
"""
|
"""
|
||||||
Returns the price rounded to the precision the Exchange accepts.
|
Returns the price rounded up to the precision the Exchange accepts.
|
||||||
Partial Re-implementation of ccxt internal method decimal_to_precision(),
|
Partial Re-implementation of ccxt internal method decimal_to_precision(),
|
||||||
which does not support rounding up.
|
which does not support rounding up
|
||||||
For stoploss calculations, must use ROUND_UP for longs, and ROUND_DOWN for shorts.
|
|
||||||
|
|
||||||
TODO: If ccxt supports ROUND_UP for decimal_to_precision(), we could remove this and
|
TODO: If ccxt supports ROUND_UP for decimal_to_precision(), we could remove this and
|
||||||
align with amount_to_precision().
|
align with amount_to_precision().
|
||||||
|
!!! Rounds up
|
||||||
:param price: price to convert
|
:param price: price to convert
|
||||||
:param price_precision: price precision to use. Used from markets[pair]['precision']['price']
|
:param price_precision: price precision to use. Used from markets[pair]['precision']['price']
|
||||||
:param precisionMode: precision mode to use. Should be used from precisionMode
|
:param precisionMode: precision mode to use. Should be used from precisionMode
|
||||||
one of ccxt's DECIMAL_PLACES, SIGNIFICANT_DIGITS, or TICK_SIZE
|
one of ccxt's DECIMAL_PLACES, SIGNIFICANT_DIGITS, or TICK_SIZE
|
||||||
:param rounding_mode: rounding mode to use. Defaults to ROUND
|
|
||||||
:return: price rounded up to the precision the Exchange accepts
|
:return: price rounded up to the precision the Exchange accepts
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if price_precision is not None and precisionMode is not None:
|
if price_precision is not None and precisionMode is not None:
|
||||||
|
# price = float(decimal_to_precision(price, rounding_mode=ROUND,
|
||||||
|
# precision=price_precision,
|
||||||
|
# counting_mode=self.precisionMode,
|
||||||
|
# ))
|
||||||
if precisionMode == TICK_SIZE:
|
if precisionMode == TICK_SIZE:
|
||||||
if rounding_mode == ROUND:
|
|
||||||
ticks = price / price_precision
|
|
||||||
rounded_ticks = round(ticks)
|
|
||||||
return rounded_ticks * price_precision
|
|
||||||
precision = FtPrecise(price_precision)
|
precision = FtPrecise(price_precision)
|
||||||
price_str = FtPrecise(price)
|
price_str = FtPrecise(price)
|
||||||
missing = price_str % precision
|
missing = price_str % precision
|
||||||
if not missing == FtPrecise("0"):
|
if not missing == FtPrecise("0"):
|
||||||
return round(float(str(price_str - missing + precision)), 14)
|
price = round(float(str(price_str - missing + precision)), 14)
|
||||||
return price
|
else:
|
||||||
elif precisionMode in (SIGNIFICANT_DIGITS, DECIMAL_PLACES):
|
symbol_prec = price_precision
|
||||||
ndigits = round(price_precision)
|
big_price = price * pow(10, symbol_prec)
|
||||||
if rounding_mode == ROUND:
|
price = ceil(big_price) / pow(10, symbol_prec)
|
||||||
return round(price, ndigits)
|
|
||||||
ticks = price * (10**ndigits)
|
|
||||||
if rounding_mode == ROUND_UP:
|
|
||||||
return ceil(ticks) / (10**ndigits)
|
|
||||||
if rounding_mode == TRUNCATE:
|
|
||||||
return int(ticks) / (10**ndigits)
|
|
||||||
if rounding_mode == ROUND_DOWN:
|
|
||||||
return floor(ticks) / (10**ndigits)
|
|
||||||
raise ValueError(f"Unknown rounding_mode {rounding_mode}")
|
|
||||||
raise ValueError(f"Unknown precisionMode {precisionMode}")
|
|
||||||
return price
|
return price
|
||||||
|
@ -5,6 +5,7 @@ from typing import Any, Dict, List, Optional, Tuple
|
|||||||
|
|
||||||
from freqtrade.constants import BuySell
|
from freqtrade.constants import BuySell
|
||||||
from freqtrade.enums import MarginMode, PriceType, TradingMode
|
from freqtrade.enums import MarginMode, PriceType, TradingMode
|
||||||
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.exchange import Exchange
|
from freqtrade.exchange import Exchange
|
||||||
from freqtrade.misc import safe_value_fallback2
|
from freqtrade.misc import safe_value_fallback2
|
||||||
|
|
||||||
@ -27,13 +28,10 @@ class Gate(Exchange):
|
|||||||
"order_time_in_force": ['GTC', 'IOC'],
|
"order_time_in_force": ['GTC', 'IOC'],
|
||||||
"stoploss_order_types": {"limit": "limit"},
|
"stoploss_order_types": {"limit": "limit"},
|
||||||
"stoploss_on_exchange": True,
|
"stoploss_on_exchange": True,
|
||||||
"marketOrderRequiresPrice": True,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_ft_has_futures: Dict = {
|
_ft_has_futures: Dict = {
|
||||||
"needs_trading_fees": True,
|
"needs_trading_fees": True,
|
||||||
"marketOrderRequiresPrice": False,
|
|
||||||
"tickers_have_bid_ask": False,
|
|
||||||
"fee_cost_in_contracts": False, # Set explicitly to false for clarity
|
"fee_cost_in_contracts": False, # Set explicitly to false for clarity
|
||||||
"order_props_in_contracts": ['amount', 'filled', 'remaining'],
|
"order_props_in_contracts": ['amount', 'filled', 'remaining'],
|
||||||
"stop_price_type_field": "price_type",
|
"stop_price_type_field": "price_type",
|
||||||
@ -51,6 +49,14 @@ class Gate(Exchange):
|
|||||||
(TradingMode.FUTURES, MarginMode.ISOLATED)
|
(TradingMode.FUTURES, MarginMode.ISOLATED)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def validate_ordertypes(self, order_types: Dict) -> None:
|
||||||
|
|
||||||
|
if self.trading_mode != TradingMode.FUTURES:
|
||||||
|
if any(v == 'market' for k, v in order_types.items()):
|
||||||
|
raise OperationalException(
|
||||||
|
f'Exchange {self.name} does not support market orders.')
|
||||||
|
super().validate_stop_ordertypes(order_types)
|
||||||
|
|
||||||
def _get_params(
|
def _get_params(
|
||||||
self,
|
self,
|
||||||
side: BuySell,
|
side: BuySell,
|
||||||
@ -68,7 +74,8 @@ class Gate(Exchange):
|
|||||||
)
|
)
|
||||||
if ordertype == 'market' and self.trading_mode == TradingMode.FUTURES:
|
if ordertype == 'market' and self.trading_mode == TradingMode.FUTURES:
|
||||||
params['type'] = 'market'
|
params['type'] = 'market'
|
||||||
params.update({'timeInForce': 'IOC'})
|
param = self._ft_has.get('time_in_force_parameter', '')
|
||||||
|
params.update({param: 'IOC'})
|
||||||
return params
|
return params
|
||||||
|
|
||||||
def get_trades_for_order(self, order_id: str, pair: str, since: datetime,
|
def get_trades_for_order(self, order_id: str, pair: str, since: datetime,
|
||||||
|
@ -12,7 +12,6 @@ from freqtrade.exceptions import (DDosProtection, InsufficientFundsError, Invali
|
|||||||
OperationalException, TemporaryError)
|
OperationalException, TemporaryError)
|
||||||
from freqtrade.exchange import Exchange
|
from freqtrade.exchange import Exchange
|
||||||
from freqtrade.exchange.common import retrier
|
from freqtrade.exchange.common import retrier
|
||||||
from freqtrade.exchange.exchange_utils import ROUND_DOWN, ROUND_UP
|
|
||||||
from freqtrade.exchange.types import Tickers
|
from freqtrade.exchange.types import Tickers
|
||||||
|
|
||||||
|
|
||||||
@ -110,7 +109,6 @@ class Kraken(Exchange):
|
|||||||
if self.trading_mode == TradingMode.FUTURES:
|
if self.trading_mode == TradingMode.FUTURES:
|
||||||
params.update({'reduceOnly': True})
|
params.update({'reduceOnly': True})
|
||||||
|
|
||||||
round_mode = ROUND_DOWN if side == 'buy' else ROUND_UP
|
|
||||||
if order_types.get('stoploss', 'market') == 'limit':
|
if order_types.get('stoploss', 'market') == 'limit':
|
||||||
ordertype = "stop-loss-limit"
|
ordertype = "stop-loss-limit"
|
||||||
limit_price_pct = order_types.get('stoploss_on_exchange_limit_ratio', 0.99)
|
limit_price_pct = order_types.get('stoploss_on_exchange_limit_ratio', 0.99)
|
||||||
@ -118,11 +116,11 @@ class Kraken(Exchange):
|
|||||||
limit_rate = stop_price * limit_price_pct
|
limit_rate = stop_price * limit_price_pct
|
||||||
else:
|
else:
|
||||||
limit_rate = stop_price * (2 - limit_price_pct)
|
limit_rate = stop_price * (2 - limit_price_pct)
|
||||||
params['price2'] = self.price_to_precision(pair, limit_rate, rounding_mode=round_mode)
|
params['price2'] = self.price_to_precision(pair, limit_rate)
|
||||||
else:
|
else:
|
||||||
ordertype = "stop-loss"
|
ordertype = "stop-loss"
|
||||||
|
|
||||||
stop_price = self.price_to_precision(pair, stop_price, rounding_mode=round_mode)
|
stop_price = self.price_to_precision(pair, stop_price)
|
||||||
|
|
||||||
if self._config['dry_run']:
|
if self._config['dry_run']:
|
||||||
dry_order = self.create_dry_run_order(
|
dry_order = self.create_dry_run_order(
|
||||||
@ -160,6 +158,7 @@ class Kraken(Exchange):
|
|||||||
self,
|
self,
|
||||||
leverage: float,
|
leverage: float,
|
||||||
pair: Optional[str] = None,
|
pair: Optional[str] = None,
|
||||||
|
trading_mode: Optional[TradingMode] = None,
|
||||||
accept_fail: bool = False,
|
accept_fail: bool = False,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
|
@ -1,16 +1,14 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any, Dict, List, Optional, Tuple
|
from typing import Dict, List, Optional, Tuple
|
||||||
|
|
||||||
import ccxt
|
import ccxt
|
||||||
|
|
||||||
from freqtrade.constants import BuySell
|
from freqtrade.constants import BuySell
|
||||||
from freqtrade.enums import CandleType, MarginMode, TradingMode
|
from freqtrade.enums import CandleType, MarginMode, TradingMode
|
||||||
from freqtrade.enums.pricetype import PriceType
|
from freqtrade.enums.pricetype import PriceType
|
||||||
from freqtrade.exceptions import (DDosProtection, OperationalException, RetryableOrderError,
|
from freqtrade.exceptions import DDosProtection, OperationalException, TemporaryError
|
||||||
TemporaryError)
|
|
||||||
from freqtrade.exchange import Exchange, date_minus_candles
|
from freqtrade.exchange import Exchange, date_minus_candles
|
||||||
from freqtrade.exchange.common import retrier
|
from freqtrade.exchange.common import retrier
|
||||||
from freqtrade.misc import safe_value_fallback2
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -26,14 +24,11 @@ class Okx(Exchange):
|
|||||||
"ohlcv_candle_limit": 100, # Warning, special case with data prior to X months
|
"ohlcv_candle_limit": 100, # Warning, special case with data prior to X months
|
||||||
"mark_ohlcv_timeframe": "4h",
|
"mark_ohlcv_timeframe": "4h",
|
||||||
"funding_fee_timeframe": "8h",
|
"funding_fee_timeframe": "8h",
|
||||||
"stoploss_order_types": {"limit": "limit"},
|
|
||||||
"stoploss_on_exchange": True,
|
|
||||||
"stop_price_param": "stopLossPrice",
|
|
||||||
}
|
}
|
||||||
_ft_has_futures: Dict = {
|
_ft_has_futures: Dict = {
|
||||||
"tickers_have_quoteVolume": False,
|
"tickers_have_quoteVolume": False,
|
||||||
"fee_cost_in_contracts": True,
|
"fee_cost_in_contracts": True,
|
||||||
"stop_price_type_field": "slTriggerPxType",
|
"stop_price_type_field": "tpTriggerPxType",
|
||||||
"stop_price_type_value_mapping": {
|
"stop_price_type_value_mapping": {
|
||||||
PriceType.LAST: "last",
|
PriceType.LAST: "last",
|
||||||
PriceType.MARK: "index",
|
PriceType.MARK: "index",
|
||||||
@ -126,9 +121,10 @@ class Okx(Exchange):
|
|||||||
return params
|
return params
|
||||||
|
|
||||||
@retrier
|
@retrier
|
||||||
def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False):
|
def _lev_prep(self, pair: str, leverage: float, side: BuySell):
|
||||||
if self.trading_mode != TradingMode.SPOT and self.margin_mode is not None:
|
if self.trading_mode != TradingMode.SPOT and self.margin_mode is not None:
|
||||||
try:
|
try:
|
||||||
|
# TODO-lev: Test me properly (check mgnMode passed)
|
||||||
res = self._api.set_leverage(
|
res = self._api.set_leverage(
|
||||||
leverage=leverage,
|
leverage=leverage,
|
||||||
symbol=pair,
|
symbol=pair,
|
||||||
@ -161,61 +157,3 @@ class Okx(Exchange):
|
|||||||
|
|
||||||
pair_tiers = self._leverage_tiers[pair]
|
pair_tiers = self._leverage_tiers[pair]
|
||||||
return pair_tiers[-1]['maxNotional'] / leverage
|
return pair_tiers[-1]['maxNotional'] / leverage
|
||||||
|
|
||||||
def _get_stop_params(self, side: BuySell, ordertype: str, stop_price: float) -> Dict:
|
|
||||||
params = super()._get_stop_params(side, ordertype, stop_price)
|
|
||||||
if self.trading_mode == TradingMode.FUTURES and self.margin_mode:
|
|
||||||
params['tdMode'] = self.margin_mode.value
|
|
||||||
params['posSide'] = self._get_posSide(side, True)
|
|
||||||
return params
|
|
||||||
|
|
||||||
def fetch_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict:
|
|
||||||
if self._config['dry_run']:
|
|
||||||
return self.fetch_dry_run_order(order_id)
|
|
||||||
|
|
||||||
try:
|
|
||||||
params1 = {'stop': True}
|
|
||||||
order_reg = self._api.fetch_order(order_id, pair, params=params1)
|
|
||||||
self._log_exchange_response('fetch_stoploss_order', order_reg)
|
|
||||||
return order_reg
|
|
||||||
except ccxt.OrderNotFound:
|
|
||||||
pass
|
|
||||||
params2 = {'stop': True, 'ordType': 'conditional'}
|
|
||||||
for method in (self._api.fetch_open_orders, self._api.fetch_closed_orders,
|
|
||||||
self._api.fetch_canceled_orders):
|
|
||||||
try:
|
|
||||||
orders = method(pair, params=params2)
|
|
||||||
orders_f = [order for order in orders if order['id'] == order_id]
|
|
||||||
if orders_f:
|
|
||||||
order = orders_f[0]
|
|
||||||
if (order['status'] == 'closed'
|
|
||||||
and (real_order_id := order.get('info', {}).get('ordId')) is not None):
|
|
||||||
# Once a order triggered, we fetch the regular followup order.
|
|
||||||
order_reg = self.fetch_order(real_order_id, pair)
|
|
||||||
self._log_exchange_response('fetch_stoploss_order1', order_reg)
|
|
||||||
order_reg['id_stop'] = order_reg['id']
|
|
||||||
order_reg['id'] = order_id
|
|
||||||
order_reg['type'] = 'stoploss'
|
|
||||||
order_reg['status_stop'] = 'triggered'
|
|
||||||
return order_reg
|
|
||||||
order['type'] = 'stoploss'
|
|
||||||
return order
|
|
||||||
except ccxt.BaseError:
|
|
||||||
pass
|
|
||||||
raise RetryableOrderError(
|
|
||||||
f'StoplossOrder not found (pair: {pair} id: {order_id}).')
|
|
||||||
|
|
||||||
def get_order_id_conditional(self, order: Dict[str, Any]) -> str:
|
|
||||||
if order['type'] == 'stop':
|
|
||||||
return safe_value_fallback2(order, order, 'id_stop', 'id')
|
|
||||||
return order['id']
|
|
||||||
|
|
||||||
def cancel_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict:
|
|
||||||
params1 = {'stop': True}
|
|
||||||
# 'ordType': 'conditional'
|
|
||||||
#
|
|
||||||
return self.cancel_order(
|
|
||||||
order_id=order_id,
|
|
||||||
pair=pair,
|
|
||||||
params=params1,
|
|
||||||
)
|
|
||||||
|
@ -47,7 +47,7 @@ class Base3ActionRLEnv(BaseEnvironment):
|
|||||||
self._update_unrealized_total_profit()
|
self._update_unrealized_total_profit()
|
||||||
step_reward = self.calculate_reward(action)
|
step_reward = self.calculate_reward(action)
|
||||||
self.total_reward += step_reward
|
self.total_reward += step_reward
|
||||||
self.tensorboard_log(self.actions._member_names_[action], category="actions")
|
self.tensorboard_log(self.actions._member_names_[action])
|
||||||
|
|
||||||
trade_type = None
|
trade_type = None
|
||||||
if self.is_tradesignal(action):
|
if self.is_tradesignal(action):
|
||||||
@ -66,7 +66,7 @@ class Base3ActionRLEnv(BaseEnvironment):
|
|||||||
elif action == Actions.Sell.value and not self.can_short:
|
elif action == Actions.Sell.value and not self.can_short:
|
||||||
self._update_total_profit()
|
self._update_total_profit()
|
||||||
self._position = Positions.Neutral
|
self._position = Positions.Neutral
|
||||||
trade_type = "exit"
|
trade_type = "neutral"
|
||||||
self._last_trade_tick = None
|
self._last_trade_tick = None
|
||||||
else:
|
else:
|
||||||
print("case not defined")
|
print("case not defined")
|
||||||
@ -74,7 +74,7 @@ class Base3ActionRLEnv(BaseEnvironment):
|
|||||||
if trade_type is not None:
|
if trade_type is not None:
|
||||||
self.trade_history.append(
|
self.trade_history.append(
|
||||||
{'price': self.current_price(), 'index': self._current_tick,
|
{'price': self.current_price(), 'index': self._current_tick,
|
||||||
'type': trade_type, 'profit': self.get_unrealized_profit()})
|
'type': trade_type})
|
||||||
|
|
||||||
if (self._total_profit < self.max_drawdown or
|
if (self._total_profit < self.max_drawdown or
|
||||||
self._total_unrealized_profit < self.max_drawdown):
|
self._total_unrealized_profit < self.max_drawdown):
|
||||||
|
@ -48,10 +48,20 @@ class Base4ActionRLEnv(BaseEnvironment):
|
|||||||
self._update_unrealized_total_profit()
|
self._update_unrealized_total_profit()
|
||||||
step_reward = self.calculate_reward(action)
|
step_reward = self.calculate_reward(action)
|
||||||
self.total_reward += step_reward
|
self.total_reward += step_reward
|
||||||
self.tensorboard_log(self.actions._member_names_[action], category="actions")
|
self.tensorboard_log(self.actions._member_names_[action])
|
||||||
|
|
||||||
trade_type = None
|
trade_type = None
|
||||||
if self.is_tradesignal(action):
|
if self.is_tradesignal(action):
|
||||||
|
"""
|
||||||
|
Action: Neutral, position: Long -> Close Long
|
||||||
|
Action: Neutral, position: Short -> Close Short
|
||||||
|
|
||||||
|
Action: Long, position: Neutral -> Open Long
|
||||||
|
Action: Long, position: Short -> Close Short and Open Long
|
||||||
|
|
||||||
|
Action: Short, position: Neutral -> Open Short
|
||||||
|
Action: Short, position: Long -> Close Long and Open Short
|
||||||
|
"""
|
||||||
|
|
||||||
if action == Actions.Neutral.value:
|
if action == Actions.Neutral.value:
|
||||||
self._position = Positions.Neutral
|
self._position = Positions.Neutral
|
||||||
@ -59,16 +69,16 @@ class Base4ActionRLEnv(BaseEnvironment):
|
|||||||
self._last_trade_tick = None
|
self._last_trade_tick = None
|
||||||
elif action == Actions.Long_enter.value:
|
elif action == Actions.Long_enter.value:
|
||||||
self._position = Positions.Long
|
self._position = Positions.Long
|
||||||
trade_type = "enter_long"
|
trade_type = "long"
|
||||||
self._last_trade_tick = self._current_tick
|
self._last_trade_tick = self._current_tick
|
||||||
elif action == Actions.Short_enter.value:
|
elif action == Actions.Short_enter.value:
|
||||||
self._position = Positions.Short
|
self._position = Positions.Short
|
||||||
trade_type = "enter_short"
|
trade_type = "short"
|
||||||
self._last_trade_tick = self._current_tick
|
self._last_trade_tick = self._current_tick
|
||||||
elif action == Actions.Exit.value:
|
elif action == Actions.Exit.value:
|
||||||
self._update_total_profit()
|
self._update_total_profit()
|
||||||
self._position = Positions.Neutral
|
self._position = Positions.Neutral
|
||||||
trade_type = "exit"
|
trade_type = "neutral"
|
||||||
self._last_trade_tick = None
|
self._last_trade_tick = None
|
||||||
else:
|
else:
|
||||||
print("case not defined")
|
print("case not defined")
|
||||||
@ -76,7 +86,7 @@ class Base4ActionRLEnv(BaseEnvironment):
|
|||||||
if trade_type is not None:
|
if trade_type is not None:
|
||||||
self.trade_history.append(
|
self.trade_history.append(
|
||||||
{'price': self.current_price(), 'index': self._current_tick,
|
{'price': self.current_price(), 'index': self._current_tick,
|
||||||
'type': trade_type, 'profit': self.get_unrealized_profit()})
|
'type': trade_type})
|
||||||
|
|
||||||
if (self._total_profit < self.max_drawdown or
|
if (self._total_profit < self.max_drawdown or
|
||||||
self._total_unrealized_profit < self.max_drawdown):
|
self._total_unrealized_profit < self.max_drawdown):
|
||||||
|
@ -49,10 +49,20 @@ class Base5ActionRLEnv(BaseEnvironment):
|
|||||||
self._update_unrealized_total_profit()
|
self._update_unrealized_total_profit()
|
||||||
step_reward = self.calculate_reward(action)
|
step_reward = self.calculate_reward(action)
|
||||||
self.total_reward += step_reward
|
self.total_reward += step_reward
|
||||||
self.tensorboard_log(self.actions._member_names_[action], category="actions")
|
self.tensorboard_log(self.actions._member_names_[action])
|
||||||
|
|
||||||
trade_type = None
|
trade_type = None
|
||||||
if self.is_tradesignal(action):
|
if self.is_tradesignal(action):
|
||||||
|
"""
|
||||||
|
Action: Neutral, position: Long -> Close Long
|
||||||
|
Action: Neutral, position: Short -> Close Short
|
||||||
|
|
||||||
|
Action: Long, position: Neutral -> Open Long
|
||||||
|
Action: Long, position: Short -> Close Short and Open Long
|
||||||
|
|
||||||
|
Action: Short, position: Neutral -> Open Short
|
||||||
|
Action: Short, position: Long -> Close Long and Open Short
|
||||||
|
"""
|
||||||
|
|
||||||
if action == Actions.Neutral.value:
|
if action == Actions.Neutral.value:
|
||||||
self._position = Positions.Neutral
|
self._position = Positions.Neutral
|
||||||
@ -60,21 +70,21 @@ class Base5ActionRLEnv(BaseEnvironment):
|
|||||||
self._last_trade_tick = None
|
self._last_trade_tick = None
|
||||||
elif action == Actions.Long_enter.value:
|
elif action == Actions.Long_enter.value:
|
||||||
self._position = Positions.Long
|
self._position = Positions.Long
|
||||||
trade_type = "enter_long"
|
trade_type = "long"
|
||||||
self._last_trade_tick = self._current_tick
|
self._last_trade_tick = self._current_tick
|
||||||
elif action == Actions.Short_enter.value:
|
elif action == Actions.Short_enter.value:
|
||||||
self._position = Positions.Short
|
self._position = Positions.Short
|
||||||
trade_type = "enter_short"
|
trade_type = "short"
|
||||||
self._last_trade_tick = self._current_tick
|
self._last_trade_tick = self._current_tick
|
||||||
elif action == Actions.Long_exit.value:
|
elif action == Actions.Long_exit.value:
|
||||||
self._update_total_profit()
|
self._update_total_profit()
|
||||||
self._position = Positions.Neutral
|
self._position = Positions.Neutral
|
||||||
trade_type = "exit_long"
|
trade_type = "neutral"
|
||||||
self._last_trade_tick = None
|
self._last_trade_tick = None
|
||||||
elif action == Actions.Short_exit.value:
|
elif action == Actions.Short_exit.value:
|
||||||
self._update_total_profit()
|
self._update_total_profit()
|
||||||
self._position = Positions.Neutral
|
self._position = Positions.Neutral
|
||||||
trade_type = "exit_short"
|
trade_type = "neutral"
|
||||||
self._last_trade_tick = None
|
self._last_trade_tick = None
|
||||||
else:
|
else:
|
||||||
print("case not defined")
|
print("case not defined")
|
||||||
@ -82,7 +92,7 @@ class Base5ActionRLEnv(BaseEnvironment):
|
|||||||
if trade_type is not None:
|
if trade_type is not None:
|
||||||
self.trade_history.append(
|
self.trade_history.append(
|
||||||
{'price': self.current_price(), 'index': self._current_tick,
|
{'price': self.current_price(), 'index': self._current_tick,
|
||||||
'type': trade_type, 'profit': self.get_unrealized_profit()})
|
'type': trade_type})
|
||||||
|
|
||||||
if (self._total_profit < self.max_drawdown or
|
if (self._total_profit < self.max_drawdown or
|
||||||
self._total_unrealized_profit < self.max_drawdown):
|
self._total_unrealized_profit < self.max_drawdown):
|
||||||
|
@ -137,8 +137,7 @@ class BaseEnvironment(gym.Env):
|
|||||||
self.np_random, seed = seeding.np_random(seed)
|
self.np_random, seed = seeding.np_random(seed)
|
||||||
return [seed]
|
return [seed]
|
||||||
|
|
||||||
def tensorboard_log(self, metric: str, value: Optional[Union[int, float]] = None,
|
def tensorboard_log(self, metric: str, value: Union[int, float] = 1, inc: bool = True):
|
||||||
inc: Optional[bool] = None, category: str = "custom"):
|
|
||||||
"""
|
"""
|
||||||
Function builds the tensorboard_metrics dictionary
|
Function builds the tensorboard_metrics dictionary
|
||||||
to be parsed by the TensorboardCallback. This
|
to be parsed by the TensorboardCallback. This
|
||||||
@ -150,24 +149,17 @@ class BaseEnvironment(gym.Env):
|
|||||||
|
|
||||||
def calculate_reward(self, action: int) -> float:
|
def calculate_reward(self, action: int) -> float:
|
||||||
if not self._is_valid(action):
|
if not self._is_valid(action):
|
||||||
self.tensorboard_log("invalid")
|
self.tensorboard_log("is_valid")
|
||||||
return -2
|
return -2
|
||||||
|
|
||||||
:param metric: metric to be tracked and incremented
|
:param metric: metric to be tracked and incremented
|
||||||
:param value: `metric` value
|
:param value: value to increment `metric` by
|
||||||
:param inc: (deprecated) sets whether the `value` is incremented or not
|
:param inc: sets whether the `value` is incremented or not
|
||||||
:param category: `metric` category
|
|
||||||
"""
|
"""
|
||||||
increment = True if value is None else False
|
if not inc or metric not in self.tensorboard_metrics:
|
||||||
value = 1 if increment else value
|
self.tensorboard_metrics[metric] = value
|
||||||
|
|
||||||
if category not in self.tensorboard_metrics:
|
|
||||||
self.tensorboard_metrics[category] = {}
|
|
||||||
|
|
||||||
if not increment or metric not in self.tensorboard_metrics[category]:
|
|
||||||
self.tensorboard_metrics[category][metric] = value
|
|
||||||
else:
|
else:
|
||||||
self.tensorboard_metrics[category][metric] += value
|
self.tensorboard_metrics[metric] += value
|
||||||
|
|
||||||
def reset_tensorboard_log(self):
|
def reset_tensorboard_log(self):
|
||||||
self.tensorboard_metrics = {}
|
self.tensorboard_metrics = {}
|
||||||
|
@ -114,7 +114,6 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
|||||||
|
|
||||||
# normalize all data based on train_dataset only
|
# normalize all data based on train_dataset only
|
||||||
prices_train, prices_test = self.build_ohlc_price_dataframes(dk.data_dictionary, pair, dk)
|
prices_train, prices_test = self.build_ohlc_price_dataframes(dk.data_dictionary, pair, dk)
|
||||||
|
|
||||||
data_dictionary = dk.normalize_data(data_dictionary)
|
data_dictionary = dk.normalize_data(data_dictionary)
|
||||||
|
|
||||||
# data cleaning/analysis
|
# data cleaning/analysis
|
||||||
@ -149,8 +148,12 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
|||||||
|
|
||||||
env_info = self.pack_env_dict(dk.pair)
|
env_info = self.pack_env_dict(dk.pair)
|
||||||
|
|
||||||
self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, **env_info)
|
self.train_env = self.MyRLEnv(df=train_df,
|
||||||
self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, **env_info))
|
prices=prices_train,
|
||||||
|
**env_info)
|
||||||
|
self.eval_env = Monitor(self.MyRLEnv(df=test_df,
|
||||||
|
prices=prices_test,
|
||||||
|
**env_info))
|
||||||
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
||||||
render=False, eval_freq=len(train_df),
|
render=False, eval_freq=len(train_df),
|
||||||
best_model_save_path=str(dk.data_path))
|
best_model_save_path=str(dk.data_path))
|
||||||
@ -235,9 +238,6 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
|||||||
filtered_dataframe, _ = dk.filter_features(
|
filtered_dataframe, _ = dk.filter_features(
|
||||||
unfiltered_df, dk.training_features_list, training_filter=False
|
unfiltered_df, dk.training_features_list, training_filter=False
|
||||||
)
|
)
|
||||||
|
|
||||||
filtered_dataframe = self.drop_ohlc_from_df(filtered_dataframe, dk)
|
|
||||||
|
|
||||||
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
|
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
|
||||||
dk.data_dictionary["prediction_features"] = filtered_dataframe
|
dk.data_dictionary["prediction_features"] = filtered_dataframe
|
||||||
|
|
||||||
@ -285,6 +285,7 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
|||||||
train_df = data_dictionary["train_features"]
|
train_df = data_dictionary["train_features"]
|
||||||
test_df = data_dictionary["test_features"]
|
test_df = data_dictionary["test_features"]
|
||||||
|
|
||||||
|
# %-raw_volume_gen_shift-2_ETH/USDT_1h
|
||||||
# price data for model training and evaluation
|
# price data for model training and evaluation
|
||||||
tf = self.config['timeframe']
|
tf = self.config['timeframe']
|
||||||
rename_dict = {'%-raw_open': 'open', '%-raw_low': 'low',
|
rename_dict = {'%-raw_open': 'open', '%-raw_low': 'low',
|
||||||
@ -317,24 +318,8 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
|||||||
prices_test.rename(columns=rename_dict, inplace=True)
|
prices_test.rename(columns=rename_dict, inplace=True)
|
||||||
prices_test.reset_index(drop=True)
|
prices_test.reset_index(drop=True)
|
||||||
|
|
||||||
train_df = self.drop_ohlc_from_df(train_df, dk)
|
|
||||||
test_df = self.drop_ohlc_from_df(test_df, dk)
|
|
||||||
|
|
||||||
return prices_train, prices_test
|
return prices_train, prices_test
|
||||||
|
|
||||||
def drop_ohlc_from_df(self, df: DataFrame, dk: FreqaiDataKitchen):
|
|
||||||
"""
|
|
||||||
Given a dataframe, drop the ohlc data
|
|
||||||
"""
|
|
||||||
drop_list = ['%-raw_open', '%-raw_low', '%-raw_high', '%-raw_close']
|
|
||||||
|
|
||||||
if self.rl_config["drop_ohlc_from_features"]:
|
|
||||||
df.drop(drop_list, axis=1, inplace=True)
|
|
||||||
feature_list = dk.training_features_list
|
|
||||||
dk.training_features_list = [e for e in feature_list if e not in drop_list]
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
def load_model_from_disk(self, dk: FreqaiDataKitchen) -> Any:
|
def load_model_from_disk(self, dk: FreqaiDataKitchen) -> Any:
|
||||||
"""
|
"""
|
||||||
Can be used by user if they are trying to limit_ram_usage *and*
|
Can be used by user if they are trying to limit_ram_usage *and*
|
||||||
|
@ -13,7 +13,7 @@ class TensorboardCallback(BaseCallback):
|
|||||||
episodic summary reports.
|
episodic summary reports.
|
||||||
"""
|
"""
|
||||||
def __init__(self, verbose=1, actions: Type[Enum] = BaseActions):
|
def __init__(self, verbose=1, actions: Type[Enum] = BaseActions):
|
||||||
super().__init__(verbose)
|
super(TensorboardCallback, self).__init__(verbose)
|
||||||
self.model: Any = None
|
self.model: Any = None
|
||||||
self.logger = None # type: Any
|
self.logger = None # type: Any
|
||||||
self.training_env: BaseEnvironment = None # type: ignore
|
self.training_env: BaseEnvironment = None # type: ignore
|
||||||
@ -46,12 +46,14 @@ class TensorboardCallback(BaseCallback):
|
|||||||
local_info = self.locals["infos"][0]
|
local_info = self.locals["infos"][0]
|
||||||
tensorboard_metrics = self.training_env.get_attr("tensorboard_metrics")[0]
|
tensorboard_metrics = self.training_env.get_attr("tensorboard_metrics")[0]
|
||||||
|
|
||||||
for metric in local_info:
|
for info in local_info:
|
||||||
if metric not in ["episode", "terminal_observation"]:
|
if info not in ["episode", "terminal_observation"]:
|
||||||
self.logger.record(f"info/{metric}", local_info[metric])
|
self.logger.record(f"_info/{info}", local_info[info])
|
||||||
|
|
||||||
for category in tensorboard_metrics:
|
for info in tensorboard_metrics:
|
||||||
for metric in tensorboard_metrics[category]:
|
if info in [action.name for action in self.actions]:
|
||||||
self.logger.record(f"{category}/{metric}", tensorboard_metrics[category][metric])
|
self.logger.record(f"_actions/{info}", tensorboard_metrics[info])
|
||||||
|
else:
|
||||||
|
self.logger.record(f"_custom/{info}", tensorboard_metrics[info])
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
@ -1,147 +0,0 @@
|
|||||||
import logging
|
|
||||||
from typing import Dict, List, Tuple
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import numpy.typing as npt
|
|
||||||
import pandas as pd
|
|
||||||
import torch
|
|
||||||
from pandas import DataFrame
|
|
||||||
from torch.nn import functional as F
|
|
||||||
|
|
||||||
from freqtrade.exceptions import OperationalException
|
|
||||||
from freqtrade.freqai.base_models.BasePyTorchModel import BasePyTorchModel
|
|
||||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class BasePyTorchClassifier(BasePyTorchModel):
|
|
||||||
"""
|
|
||||||
A PyTorch implementation of a classifier.
|
|
||||||
User must implement fit method
|
|
||||||
|
|
||||||
Important!
|
|
||||||
|
|
||||||
- User must declare the target class names in the strategy,
|
|
||||||
under IStrategy.set_freqai_targets method.
|
|
||||||
|
|
||||||
for example, in your strategy:
|
|
||||||
```
|
|
||||||
def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
|
|
||||||
self.freqai.class_names = ["down", "up"]
|
|
||||||
dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-100) >
|
|
||||||
dataframe["close"], 'up', 'down')
|
|
||||||
|
|
||||||
return dataframe
|
|
||||||
"""
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
self.class_name_to_index = None
|
|
||||||
self.index_to_class_name = None
|
|
||||||
|
|
||||||
def predict(
|
|
||||||
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
|
|
||||||
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
|
||||||
"""
|
|
||||||
Filter the prediction features data and predict with it.
|
|
||||||
:param unfiltered_df: Full dataframe for the current backtest period.
|
|
||||||
:return:
|
|
||||||
:pred_df: dataframe containing the predictions
|
|
||||||
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
|
||||||
data (NaNs) or felt uncertain about data (PCA and DI index)
|
|
||||||
:raises ValueError: if 'class_names' doesn't exist in model meta_data.
|
|
||||||
"""
|
|
||||||
|
|
||||||
class_names = self.model.model_meta_data.get("class_names", None)
|
|
||||||
if not class_names:
|
|
||||||
raise ValueError(
|
|
||||||
"Missing class names. "
|
|
||||||
"self.model.model_meta_data['class_names'] is None."
|
|
||||||
)
|
|
||||||
|
|
||||||
if not self.class_name_to_index:
|
|
||||||
self.init_class_names_to_index_mapping(class_names)
|
|
||||||
|
|
||||||
dk.find_features(unfiltered_df)
|
|
||||||
filtered_df, _ = dk.filter_features(
|
|
||||||
unfiltered_df, dk.training_features_list, training_filter=False
|
|
||||||
)
|
|
||||||
filtered_df = dk.normalize_data_from_metadata(filtered_df)
|
|
||||||
dk.data_dictionary["prediction_features"] = filtered_df
|
|
||||||
self.data_cleaning_predict(dk)
|
|
||||||
x = self.data_convertor.convert_x(
|
|
||||||
dk.data_dictionary["prediction_features"],
|
|
||||||
device=self.device
|
|
||||||
)
|
|
||||||
logits = self.model.model(x)
|
|
||||||
probs = F.softmax(logits, dim=-1)
|
|
||||||
predicted_classes = torch.argmax(probs, dim=-1)
|
|
||||||
predicted_classes_str = self.decode_class_names(predicted_classes)
|
|
||||||
pred_df_prob = DataFrame(probs.detach().numpy(), columns=class_names)
|
|
||||||
pred_df = DataFrame(predicted_classes_str, columns=[dk.label_list[0]])
|
|
||||||
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
|
|
||||||
return (pred_df, dk.do_predict)
|
|
||||||
|
|
||||||
def encode_class_names(
|
|
||||||
self,
|
|
||||||
data_dictionary: Dict[str, pd.DataFrame],
|
|
||||||
dk: FreqaiDataKitchen,
|
|
||||||
class_names: List[str],
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
encode class name, str -> int
|
|
||||||
assuming first column of *_labels data frame to be the target column
|
|
||||||
containing the class names
|
|
||||||
"""
|
|
||||||
|
|
||||||
target_column_name = dk.label_list[0]
|
|
||||||
for split in self.splits:
|
|
||||||
label_df = data_dictionary[f"{split}_labels"]
|
|
||||||
self.assert_valid_class_names(label_df[target_column_name], class_names)
|
|
||||||
label_df[target_column_name] = list(
|
|
||||||
map(lambda x: self.class_name_to_index[x], label_df[target_column_name])
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def assert_valid_class_names(
|
|
||||||
target_column: pd.Series,
|
|
||||||
class_names: List[str]
|
|
||||||
):
|
|
||||||
non_defined_labels = set(target_column) - set(class_names)
|
|
||||||
if len(non_defined_labels) != 0:
|
|
||||||
raise OperationalException(
|
|
||||||
f"Found non defined labels: {non_defined_labels}, ",
|
|
||||||
f"expecting labels: {class_names}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def decode_class_names(self, class_ints: torch.Tensor) -> List[str]:
|
|
||||||
"""
|
|
||||||
decode class name, int -> str
|
|
||||||
"""
|
|
||||||
|
|
||||||
return list(map(lambda x: self.index_to_class_name[x.item()], class_ints))
|
|
||||||
|
|
||||||
def init_class_names_to_index_mapping(self, class_names):
|
|
||||||
self.class_name_to_index = {s: i for i, s in enumerate(class_names)}
|
|
||||||
self.index_to_class_name = {i: s for i, s in enumerate(class_names)}
|
|
||||||
logger.info(f"encoded class name to index: {self.class_name_to_index}")
|
|
||||||
|
|
||||||
def convert_label_column_to_int(
|
|
||||||
self,
|
|
||||||
data_dictionary: Dict[str, pd.DataFrame],
|
|
||||||
dk: FreqaiDataKitchen,
|
|
||||||
class_names: List[str]
|
|
||||||
):
|
|
||||||
self.init_class_names_to_index_mapping(class_names)
|
|
||||||
self.encode_class_names(data_dictionary, dk, class_names)
|
|
||||||
|
|
||||||
def get_class_names(self) -> List[str]:
|
|
||||||
if not self.class_names:
|
|
||||||
raise ValueError(
|
|
||||||
"self.class_names is empty, "
|
|
||||||
"set self.freqai.class_names = ['class a', 'class b', 'class c'] "
|
|
||||||
"inside IStrategy.set_freqai_targets method."
|
|
||||||
)
|
|
||||||
|
|
||||||
return self.class_names
|
|
@ -1,83 +0,0 @@
|
|||||||
import logging
|
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
from time import time
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from pandas import DataFrame
|
|
||||||
|
|
||||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
|
||||||
from freqtrade.freqai.freqai_interface import IFreqaiModel
|
|
||||||
from freqtrade.freqai.torch.PyTorchDataConvertor import PyTorchDataConvertor
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class BasePyTorchModel(IFreqaiModel, ABC):
|
|
||||||
"""
|
|
||||||
Base class for PyTorch type models.
|
|
||||||
User *must* inherit from this class and set fit() and predict() and
|
|
||||||
data_convertor property.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
super().__init__(config=kwargs["config"])
|
|
||||||
self.dd.model_type = "pytorch"
|
|
||||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
||||||
test_size = self.freqai_info.get('data_split_parameters', {}).get('test_size')
|
|
||||||
self.splits = ["train", "test"] if test_size != 0 else ["train"]
|
|
||||||
|
|
||||||
def train(
|
|
||||||
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
|
|
||||||
) -> Any:
|
|
||||||
"""
|
|
||||||
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
|
||||||
for storing, saving, loading, and analyzing the data.
|
|
||||||
:param unfiltered_df: Full dataframe for the current training period
|
|
||||||
:return:
|
|
||||||
:model: Trained model which can be used to inference (self.predict)
|
|
||||||
"""
|
|
||||||
|
|
||||||
logger.info(f"-------------------- Starting training {pair} --------------------")
|
|
||||||
|
|
||||||
start_time = time()
|
|
||||||
|
|
||||||
features_filtered, labels_filtered = dk.filter_features(
|
|
||||||
unfiltered_df,
|
|
||||||
dk.training_features_list,
|
|
||||||
dk.label_list,
|
|
||||||
training_filter=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# split data into train/test data.
|
|
||||||
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
|
|
||||||
if not self.freqai_info.get("fit_live_predictions", 0) or not self.live:
|
|
||||||
dk.fit_labels()
|
|
||||||
# normalize all data based on train_dataset only
|
|
||||||
data_dictionary = dk.normalize_data(data_dictionary)
|
|
||||||
|
|
||||||
# optional additional data cleaning/analysis
|
|
||||||
self.data_cleaning_train(dk)
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
|
|
||||||
)
|
|
||||||
logger.info(f"Training model on {len(data_dictionary['train_features'])} data points")
|
|
||||||
|
|
||||||
model = self.fit(data_dictionary, dk)
|
|
||||||
end_time = time()
|
|
||||||
|
|
||||||
logger.info(f"-------------------- Done training {pair} "
|
|
||||||
f"({end_time - start_time:.2f} secs) --------------------")
|
|
||||||
|
|
||||||
return model
|
|
||||||
|
|
||||||
@property
|
|
||||||
@abstractmethod
|
|
||||||
def data_convertor(self) -> PyTorchDataConvertor:
|
|
||||||
"""
|
|
||||||
a class responsible for converting `*_features` & `*_labels` pandas dataframes
|
|
||||||
to pytorch tensors.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError("Abstract property")
|
|
@ -1,49 +0,0 @@
|
|||||||
import logging
|
|
||||||
from typing import Tuple
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import numpy.typing as npt
|
|
||||||
from pandas import DataFrame
|
|
||||||
|
|
||||||
from freqtrade.freqai.base_models.BasePyTorchModel import BasePyTorchModel
|
|
||||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class BasePyTorchRegressor(BasePyTorchModel):
|
|
||||||
"""
|
|
||||||
A PyTorch implementation of a regressor.
|
|
||||||
User must implement fit method
|
|
||||||
"""
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
|
|
||||||
def predict(
|
|
||||||
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
|
|
||||||
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
|
||||||
"""
|
|
||||||
Filter the prediction features data and predict with it.
|
|
||||||
:param unfiltered_df: Full dataframe for the current backtest period.
|
|
||||||
:return:
|
|
||||||
:pred_df: dataframe containing the predictions
|
|
||||||
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
|
||||||
data (NaNs) or felt uncertain about data (PCA and DI index)
|
|
||||||
"""
|
|
||||||
|
|
||||||
dk.find_features(unfiltered_df)
|
|
||||||
filtered_df, _ = dk.filter_features(
|
|
||||||
unfiltered_df, dk.training_features_list, training_filter=False
|
|
||||||
)
|
|
||||||
filtered_df = dk.normalize_data_from_metadata(filtered_df)
|
|
||||||
dk.data_dictionary["prediction_features"] = filtered_df
|
|
||||||
|
|
||||||
self.data_cleaning_predict(dk)
|
|
||||||
x = self.data_convertor.convert_x(
|
|
||||||
dk.data_dictionary["prediction_features"],
|
|
||||||
device=self.device
|
|
||||||
)
|
|
||||||
y = self.model.model(x)
|
|
||||||
pred_df = DataFrame(y.detach().numpy(), columns=[dk.label_list[0]])
|
|
||||||
return (pred_df, dk.do_predict)
|
|
@ -126,7 +126,7 @@ class FreqaiDataDrawer:
|
|||||||
"""
|
"""
|
||||||
exists = self.global_metadata_path.is_file()
|
exists = self.global_metadata_path.is_file()
|
||||||
if exists:
|
if exists:
|
||||||
with self.global_metadata_path.open("r") as fp:
|
with open(self.global_metadata_path, "r") as fp:
|
||||||
metatada_dict = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
metatada_dict = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
||||||
return metatada_dict
|
return metatada_dict
|
||||||
return {}
|
return {}
|
||||||
@ -139,7 +139,7 @@ class FreqaiDataDrawer:
|
|||||||
"""
|
"""
|
||||||
exists = self.pair_dictionary_path.is_file()
|
exists = self.pair_dictionary_path.is_file()
|
||||||
if exists:
|
if exists:
|
||||||
with self.pair_dictionary_path.open("r") as fp:
|
with open(self.pair_dictionary_path, "r") as fp:
|
||||||
self.pair_dict = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
self.pair_dict = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
||||||
else:
|
else:
|
||||||
logger.info("Could not find existing datadrawer, starting from scratch")
|
logger.info("Could not find existing datadrawer, starting from scratch")
|
||||||
@ -152,7 +152,7 @@ class FreqaiDataDrawer:
|
|||||||
if self.freqai_info.get('write_metrics_to_disk', False):
|
if self.freqai_info.get('write_metrics_to_disk', False):
|
||||||
exists = self.metric_tracker_path.is_file()
|
exists = self.metric_tracker_path.is_file()
|
||||||
if exists:
|
if exists:
|
||||||
with self.metric_tracker_path.open("r") as fp:
|
with open(self.metric_tracker_path, "r") as fp:
|
||||||
self.metric_tracker = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
self.metric_tracker = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
||||||
logger.info("Loading existing metric tracker from disk.")
|
logger.info("Loading existing metric tracker from disk.")
|
||||||
else:
|
else:
|
||||||
@ -166,7 +166,7 @@ class FreqaiDataDrawer:
|
|||||||
exists = self.historic_predictions_path.is_file()
|
exists = self.historic_predictions_path.is_file()
|
||||||
if exists:
|
if exists:
|
||||||
try:
|
try:
|
||||||
with self.historic_predictions_path.open("rb") as fp:
|
with open(self.historic_predictions_path, "rb") as fp:
|
||||||
self.historic_predictions = cloudpickle.load(fp)
|
self.historic_predictions = cloudpickle.load(fp)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Found existing historic predictions at {self.full_path}, but beware "
|
f"Found existing historic predictions at {self.full_path}, but beware "
|
||||||
@ -176,7 +176,7 @@ class FreqaiDataDrawer:
|
|||||||
except EOFError:
|
except EOFError:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
'Historical prediction file was corrupted. Trying to load backup file.')
|
'Historical prediction file was corrupted. Trying to load backup file.')
|
||||||
with self.historic_predictions_bkp_path.open("rb") as fp:
|
with open(self.historic_predictions_bkp_path, "rb") as fp:
|
||||||
self.historic_predictions = cloudpickle.load(fp)
|
self.historic_predictions = cloudpickle.load(fp)
|
||||||
logger.warning('FreqAI successfully loaded the backup historical predictions file.')
|
logger.warning('FreqAI successfully loaded the backup historical predictions file.')
|
||||||
|
|
||||||
@ -189,7 +189,7 @@ class FreqaiDataDrawer:
|
|||||||
"""
|
"""
|
||||||
Save historic predictions pickle to disk
|
Save historic predictions pickle to disk
|
||||||
"""
|
"""
|
||||||
with self.historic_predictions_path.open("wb") as fp:
|
with open(self.historic_predictions_path, "wb") as fp:
|
||||||
cloudpickle.dump(self.historic_predictions, fp, protocol=cloudpickle.DEFAULT_PROTOCOL)
|
cloudpickle.dump(self.historic_predictions, fp, protocol=cloudpickle.DEFAULT_PROTOCOL)
|
||||||
|
|
||||||
# create a backup
|
# create a backup
|
||||||
@ -200,16 +200,16 @@ class FreqaiDataDrawer:
|
|||||||
Save metric tracker of all pair metrics collected.
|
Save metric tracker of all pair metrics collected.
|
||||||
"""
|
"""
|
||||||
with self.save_lock:
|
with self.save_lock:
|
||||||
with self.metric_tracker_path.open('w') as fp:
|
with open(self.metric_tracker_path, 'w') as fp:
|
||||||
rapidjson.dump(self.metric_tracker, fp, default=self.np_encoder,
|
rapidjson.dump(self.metric_tracker, fp, default=self.np_encoder,
|
||||||
number_mode=rapidjson.NM_NATIVE)
|
number_mode=rapidjson.NM_NATIVE)
|
||||||
|
|
||||||
def save_drawer_to_disk(self) -> None:
|
def save_drawer_to_disk(self):
|
||||||
"""
|
"""
|
||||||
Save data drawer full of all pair model metadata in present model folder.
|
Save data drawer full of all pair model metadata in present model folder.
|
||||||
"""
|
"""
|
||||||
with self.save_lock:
|
with self.save_lock:
|
||||||
with self.pair_dictionary_path.open('w') as fp:
|
with open(self.pair_dictionary_path, 'w') as fp:
|
||||||
rapidjson.dump(self.pair_dict, fp, default=self.np_encoder,
|
rapidjson.dump(self.pair_dict, fp, default=self.np_encoder,
|
||||||
number_mode=rapidjson.NM_NATIVE)
|
number_mode=rapidjson.NM_NATIVE)
|
||||||
|
|
||||||
@ -218,7 +218,7 @@ class FreqaiDataDrawer:
|
|||||||
Save global metadata json to disk
|
Save global metadata json to disk
|
||||||
"""
|
"""
|
||||||
with self.save_lock:
|
with self.save_lock:
|
||||||
with self.global_metadata_path.open('w') as fp:
|
with open(self.global_metadata_path, 'w') as fp:
|
||||||
rapidjson.dump(metadata, fp, default=self.np_encoder,
|
rapidjson.dump(metadata, fp, default=self.np_encoder,
|
||||||
number_mode=rapidjson.NM_NATIVE)
|
number_mode=rapidjson.NM_NATIVE)
|
||||||
|
|
||||||
@ -424,7 +424,7 @@ class FreqaiDataDrawer:
|
|||||||
dk.data["training_features_list"] = list(dk.data_dictionary["train_features"].columns)
|
dk.data["training_features_list"] = list(dk.data_dictionary["train_features"].columns)
|
||||||
dk.data["label_list"] = dk.label_list
|
dk.data["label_list"] = dk.label_list
|
||||||
|
|
||||||
with (save_path / f"{dk.model_filename}_metadata.json").open("w") as fp:
|
with open(save_path / f"{dk.model_filename}_metadata.json", "w") as fp:
|
||||||
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
|
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
|
||||||
|
|
||||||
return
|
return
|
||||||
@ -446,7 +446,7 @@ class FreqaiDataDrawer:
|
|||||||
dump(model, save_path / f"{dk.model_filename}_model.joblib")
|
dump(model, save_path / f"{dk.model_filename}_model.joblib")
|
||||||
elif self.model_type == 'keras':
|
elif self.model_type == 'keras':
|
||||||
model.save(save_path / f"{dk.model_filename}_model.h5")
|
model.save(save_path / f"{dk.model_filename}_model.h5")
|
||||||
elif self.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]:
|
elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type:
|
||||||
model.save(save_path / f"{dk.model_filename}_model.zip")
|
model.save(save_path / f"{dk.model_filename}_model.zip")
|
||||||
|
|
||||||
if dk.svm_model is not None:
|
if dk.svm_model is not None:
|
||||||
@ -457,7 +457,7 @@ class FreqaiDataDrawer:
|
|||||||
dk.data["training_features_list"] = dk.training_features_list
|
dk.data["training_features_list"] = dk.training_features_list
|
||||||
dk.data["label_list"] = dk.label_list
|
dk.data["label_list"] = dk.label_list
|
||||||
# store the metadata
|
# store the metadata
|
||||||
with (save_path / f"{dk.model_filename}_metadata.json").open("w") as fp:
|
with open(save_path / f"{dk.model_filename}_metadata.json", "w") as fp:
|
||||||
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
|
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
|
||||||
|
|
||||||
# save the train data to file so we can check preds for area of applicability later
|
# save the train data to file so we can check preds for area of applicability later
|
||||||
@ -471,7 +471,7 @@ class FreqaiDataDrawer:
|
|||||||
|
|
||||||
if self.freqai_info["feature_parameters"].get("principal_component_analysis"):
|
if self.freqai_info["feature_parameters"].get("principal_component_analysis"):
|
||||||
cloudpickle.dump(
|
cloudpickle.dump(
|
||||||
dk.pca, (dk.data_path / f"{dk.model_filename}_pca_object.pkl").open("wb")
|
dk.pca, open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "wb")
|
||||||
)
|
)
|
||||||
|
|
||||||
self.model_dictionary[coin] = model
|
self.model_dictionary[coin] = model
|
||||||
@ -491,12 +491,12 @@ class FreqaiDataDrawer:
|
|||||||
Load only metadata into datakitchen to increase performance during
|
Load only metadata into datakitchen to increase performance during
|
||||||
presaved backtesting (prediction file loading).
|
presaved backtesting (prediction file loading).
|
||||||
"""
|
"""
|
||||||
with (dk.data_path / f"{dk.model_filename}_metadata.json").open("r") as fp:
|
with open(dk.data_path / f"{dk.model_filename}_metadata.json", "r") as fp:
|
||||||
dk.data = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
dk.data = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
||||||
dk.training_features_list = dk.data["training_features_list"]
|
dk.training_features_list = dk.data["training_features_list"]
|
||||||
dk.label_list = dk.data["label_list"]
|
dk.label_list = dk.data["label_list"]
|
||||||
|
|
||||||
def load_data(self, coin: str, dk: FreqaiDataKitchen) -> Any: # noqa: C901
|
def load_data(self, coin: str, dk: FreqaiDataKitchen) -> Any:
|
||||||
"""
|
"""
|
||||||
loads all data required to make a prediction on a sub-train time range
|
loads all data required to make a prediction on a sub-train time range
|
||||||
:returns:
|
:returns:
|
||||||
@ -514,7 +514,7 @@ class FreqaiDataDrawer:
|
|||||||
dk.data = self.meta_data_dictionary[coin]["meta_data"]
|
dk.data = self.meta_data_dictionary[coin]["meta_data"]
|
||||||
dk.data_dictionary["train_features"] = self.meta_data_dictionary[coin]["train_df"]
|
dk.data_dictionary["train_features"] = self.meta_data_dictionary[coin]["train_df"]
|
||||||
else:
|
else:
|
||||||
with (dk.data_path / f"{dk.model_filename}_metadata.json").open("r") as fp:
|
with open(dk.data_path / f"{dk.model_filename}_metadata.json", "r") as fp:
|
||||||
dk.data = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
dk.data = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
||||||
|
|
||||||
dk.data_dictionary["train_features"] = pd.read_pickle(
|
dk.data_dictionary["train_features"] = pd.read_pickle(
|
||||||
@ -537,11 +537,6 @@ class FreqaiDataDrawer:
|
|||||||
self.model_type, self.freqai_info['rl_config']['model_type'])
|
self.model_type, self.freqai_info['rl_config']['model_type'])
|
||||||
MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type'])
|
MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type'])
|
||||||
model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model")
|
model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model")
|
||||||
elif self.model_type == 'pytorch':
|
|
||||||
import torch
|
|
||||||
zip = torch.load(dk.data_path / f"{dk.model_filename}_model.zip")
|
|
||||||
model = zip["pytrainer"]
|
|
||||||
model = model.load_from_checkpoint(zip)
|
|
||||||
|
|
||||||
if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file():
|
if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file():
|
||||||
dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib")
|
dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib")
|
||||||
@ -557,7 +552,7 @@ class FreqaiDataDrawer:
|
|||||||
|
|
||||||
if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]:
|
if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]:
|
||||||
dk.pca = cloudpickle.load(
|
dk.pca = cloudpickle.load(
|
||||||
(dk.data_path / f"{dk.model_filename}_pca_object.pkl").open("rb")
|
open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "rb")
|
||||||
)
|
)
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
@ -251,7 +251,7 @@ class FreqaiDataKitchen:
|
|||||||
(drop_index == 0) & (drop_index_labels == 0)
|
(drop_index == 0) & (drop_index_labels == 0)
|
||||||
]
|
]
|
||||||
logger.info(
|
logger.info(
|
||||||
f"{self.pair}: dropped {len(unfiltered_df) - len(filtered_df)} training points"
|
f"dropped {len(unfiltered_df) - len(filtered_df)} training points"
|
||||||
f" due to NaNs in populated dataset {len(unfiltered_df)}."
|
f" due to NaNs in populated dataset {len(unfiltered_df)}."
|
||||||
)
|
)
|
||||||
if (1 - len(filtered_df) / len(unfiltered_df)) > 0.1 and self.live:
|
if (1 - len(filtered_df) / len(unfiltered_df)) > 0.1 and self.live:
|
||||||
@ -675,7 +675,7 @@ class FreqaiDataKitchen:
|
|||||||
]
|
]
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"{self.pair}: SVM tossed {len(y_pred) - kept_points.sum()}"
|
f"SVM tossed {len(y_pred) - kept_points.sum()}"
|
||||||
f" test points from {len(y_pred)} total points."
|
f" test points from {len(y_pred)} total points."
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -949,7 +949,7 @@ class FreqaiDataKitchen:
|
|||||||
|
|
||||||
if (len(do_predict) - do_predict.sum()) > 0:
|
if (len(do_predict) - do_predict.sum()) > 0:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"{self.pair}: DI tossed {len(do_predict) - do_predict.sum()} predictions for "
|
f"DI tossed {len(do_predict) - do_predict.sum()} predictions for "
|
||||||
"being too far from training data."
|
"being too far from training data."
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1291,7 +1291,7 @@ class FreqaiDataKitchen:
|
|||||||
|
|
||||||
return dataframe
|
return dataframe
|
||||||
|
|
||||||
def use_strategy_to_populate_indicators( # noqa: C901
|
def use_strategy_to_populate_indicators(
|
||||||
self,
|
self,
|
||||||
strategy: IStrategy,
|
strategy: IStrategy,
|
||||||
corr_dataframes: dict = {},
|
corr_dataframes: dict = {},
|
||||||
@ -1315,59 +1315,128 @@ class FreqaiDataKitchen:
|
|||||||
dataframe: DataFrame = dataframe containing populated indicators
|
dataframe: DataFrame = dataframe containing populated indicators
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# check if the user is using the deprecated populate_any_indicators function
|
# this is a hack to check if the user is using the populate_any_indicators function
|
||||||
new_version = inspect.getsource(strategy.populate_any_indicators) == (
|
new_version = inspect.getsource(strategy.populate_any_indicators) == (
|
||||||
inspect.getsource(IStrategy.populate_any_indicators))
|
inspect.getsource(IStrategy.populate_any_indicators))
|
||||||
|
|
||||||
if not new_version:
|
if new_version:
|
||||||
raise OperationalException(
|
tfs: List[str] = self.freqai_config["feature_parameters"].get("include_timeframes")
|
||||||
"You are using the `populate_any_indicators()` function"
|
pairs: List[str] = self.freqai_config["feature_parameters"].get(
|
||||||
" which was deprecated on March 1, 2023. Please refer "
|
"include_corr_pairlist", [])
|
||||||
"to the strategy migration guide to use the new "
|
|
||||||
"feature_engineering_* methods: \n"
|
|
||||||
"https://www.freqtrade.io/en/stable/strategy_migration/#freqai-strategy \n"
|
|
||||||
"And the feature_engineering_* documentation: \n"
|
|
||||||
"https://www.freqtrade.io/en/latest/freqai-feature-engineering/"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
for tf in tfs:
|
||||||
|
if tf not in base_dataframes:
|
||||||
|
base_dataframes[tf] = pd.DataFrame()
|
||||||
|
for p in pairs:
|
||||||
|
if p not in corr_dataframes:
|
||||||
|
corr_dataframes[p] = {}
|
||||||
|
if tf not in corr_dataframes[p]:
|
||||||
|
corr_dataframes[p][tf] = pd.DataFrame()
|
||||||
|
|
||||||
|
if not prediction_dataframe.empty:
|
||||||
|
dataframe = prediction_dataframe.copy()
|
||||||
|
else:
|
||||||
|
dataframe = base_dataframes[self.config["timeframe"]].copy()
|
||||||
|
|
||||||
|
corr_pairs: List[str] = self.freqai_config["feature_parameters"].get(
|
||||||
|
"include_corr_pairlist", [])
|
||||||
|
dataframe = self.populate_features(dataframe.copy(), pair, strategy,
|
||||||
|
corr_dataframes, base_dataframes)
|
||||||
|
metadata = {"pair": pair}
|
||||||
|
dataframe = strategy.feature_engineering_standard(dataframe.copy(), metadata=metadata)
|
||||||
|
# ensure corr pairs are always last
|
||||||
|
for corr_pair in corr_pairs:
|
||||||
|
if pair == corr_pair:
|
||||||
|
continue # dont repeat anything from whitelist
|
||||||
|
if corr_pairs and do_corr_pairs:
|
||||||
|
dataframe = self.populate_features(dataframe.copy(), corr_pair, strategy,
|
||||||
|
corr_dataframes, base_dataframes, True)
|
||||||
|
|
||||||
|
dataframe = strategy.set_freqai_targets(dataframe.copy(), metadata=metadata)
|
||||||
|
|
||||||
|
self.get_unique_classes_from_labels(dataframe)
|
||||||
|
|
||||||
|
dataframe = self.remove_special_chars_from_feature_names(dataframe)
|
||||||
|
|
||||||
|
if self.config.get('reduce_df_footprint', False):
|
||||||
|
dataframe = reduce_dataframe_footprint(dataframe)
|
||||||
|
|
||||||
|
return dataframe
|
||||||
|
|
||||||
|
else:
|
||||||
|
# the user is using the populate_any_indicators functions which is deprecated
|
||||||
|
|
||||||
|
df = self.use_strategy_to_populate_indicators_old_version(
|
||||||
|
strategy, corr_dataframes, base_dataframes, pair,
|
||||||
|
prediction_dataframe, do_corr_pairs)
|
||||||
|
return df
|
||||||
|
|
||||||
|
def use_strategy_to_populate_indicators_old_version(
|
||||||
|
self,
|
||||||
|
strategy: IStrategy,
|
||||||
|
corr_dataframes: dict = {},
|
||||||
|
base_dataframes: dict = {},
|
||||||
|
pair: str = "",
|
||||||
|
prediction_dataframe: DataFrame = pd.DataFrame(),
|
||||||
|
do_corr_pairs: bool = True,
|
||||||
|
) -> DataFrame:
|
||||||
|
"""
|
||||||
|
Use the user defined strategy for populating indicators during retrain
|
||||||
|
:param strategy: IStrategy = user defined strategy object
|
||||||
|
:param corr_dataframes: dict = dict containing the df pair dataframes
|
||||||
|
(for user defined timeframes)
|
||||||
|
:param base_dataframes: dict = dict containing the current pair dataframes
|
||||||
|
(for user defined timeframes)
|
||||||
|
:param metadata: dict = strategy furnished pair metadata
|
||||||
|
:return:
|
||||||
|
dataframe: DataFrame = dataframe containing populated indicators
|
||||||
|
"""
|
||||||
|
|
||||||
|
# for prediction dataframe creation, we let dataprovider handle everything in the strategy
|
||||||
|
# so we create empty dictionaries, which allows us to pass None to
|
||||||
|
# `populate_any_indicators()`. Signaling we want the dp to give us the live dataframe.
|
||||||
tfs: List[str] = self.freqai_config["feature_parameters"].get("include_timeframes")
|
tfs: List[str] = self.freqai_config["feature_parameters"].get("include_timeframes")
|
||||||
pairs: List[str] = self.freqai_config["feature_parameters"].get(
|
pairs: List[str] = self.freqai_config["feature_parameters"].get("include_corr_pairlist", [])
|
||||||
"include_corr_pairlist", [])
|
|
||||||
|
|
||||||
for tf in tfs:
|
|
||||||
if tf not in base_dataframes:
|
|
||||||
base_dataframes[tf] = pd.DataFrame()
|
|
||||||
for p in pairs:
|
|
||||||
if p not in corr_dataframes:
|
|
||||||
corr_dataframes[p] = {}
|
|
||||||
if tf not in corr_dataframes[p]:
|
|
||||||
corr_dataframes[p][tf] = pd.DataFrame()
|
|
||||||
|
|
||||||
if not prediction_dataframe.empty:
|
if not prediction_dataframe.empty:
|
||||||
dataframe = prediction_dataframe.copy()
|
dataframe = prediction_dataframe.copy()
|
||||||
|
for tf in tfs:
|
||||||
|
base_dataframes[tf] = None
|
||||||
|
for p in pairs:
|
||||||
|
if p not in corr_dataframes:
|
||||||
|
corr_dataframes[p] = {}
|
||||||
|
corr_dataframes[p][tf] = None
|
||||||
else:
|
else:
|
||||||
dataframe = base_dataframes[self.config["timeframe"]].copy()
|
dataframe = base_dataframes[self.config["timeframe"]].copy()
|
||||||
|
|
||||||
corr_pairs: List[str] = self.freqai_config["feature_parameters"].get(
|
sgi = False
|
||||||
"include_corr_pairlist", [])
|
for tf in tfs:
|
||||||
dataframe = self.populate_features(dataframe.copy(), pair, strategy,
|
if tf == tfs[-1]:
|
||||||
corr_dataframes, base_dataframes)
|
sgi = True # doing this last allows user to use all tf raw prices in labels
|
||||||
metadata = {"pair": pair}
|
dataframe = strategy.populate_any_indicators(
|
||||||
dataframe = strategy.feature_engineering_standard(dataframe.copy(), metadata=metadata)
|
pair,
|
||||||
|
dataframe.copy(),
|
||||||
|
tf,
|
||||||
|
informative=base_dataframes[tf],
|
||||||
|
set_generalized_indicators=sgi
|
||||||
|
)
|
||||||
|
|
||||||
# ensure corr pairs are always last
|
# ensure corr pairs are always last
|
||||||
for corr_pair in corr_pairs:
|
for corr_pair in pairs:
|
||||||
if pair == corr_pair:
|
if pair == corr_pair:
|
||||||
continue # dont repeat anything from whitelist
|
continue # dont repeat anything from whitelist
|
||||||
if corr_pairs and do_corr_pairs:
|
for tf in tfs:
|
||||||
dataframe = self.populate_features(dataframe.copy(), corr_pair, strategy,
|
if pairs and do_corr_pairs:
|
||||||
corr_dataframes, base_dataframes, True)
|
dataframe = strategy.populate_any_indicators(
|
||||||
|
corr_pair,
|
||||||
if self.live:
|
dataframe.copy(),
|
||||||
dataframe = strategy.set_freqai_targets(dataframe.copy(), metadata=metadata)
|
tf,
|
||||||
dataframe = self.remove_special_chars_from_feature_names(dataframe)
|
informative=corr_dataframes[corr_pair][tf]
|
||||||
|
)
|
||||||
|
|
||||||
self.get_unique_classes_from_labels(dataframe)
|
self.get_unique_classes_from_labels(dataframe)
|
||||||
|
|
||||||
|
dataframe = self.remove_special_chars_from_feature_names(dataframe)
|
||||||
|
|
||||||
if self.config.get('reduce_df_footprint', False):
|
if self.config.get('reduce_df_footprint', False):
|
||||||
dataframe = reduce_dataframe_footprint(dataframe)
|
dataframe = reduce_dataframe_footprint(dataframe)
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
import inspect
|
||||||
import logging
|
import logging
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
@ -83,7 +84,6 @@ class IFreqaiModel(ABC):
|
|||||||
self.CONV_WIDTH = self.freqai_info.get('conv_width', 1)
|
self.CONV_WIDTH = self.freqai_info.get('conv_width', 1)
|
||||||
if self.ft_params.get("inlier_metric_window", 0):
|
if self.ft_params.get("inlier_metric_window", 0):
|
||||||
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
|
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
|
||||||
self.class_names: List[str] = [] # used in classification subclasses
|
|
||||||
self.pair_it = 0
|
self.pair_it = 0
|
||||||
self.pair_it_train = 0
|
self.pair_it_train = 0
|
||||||
self.total_pairs = len(self.config.get("exchange", {}).get("pair_whitelist"))
|
self.total_pairs = len(self.config.get("exchange", {}).get("pair_whitelist"))
|
||||||
@ -105,10 +105,8 @@ class IFreqaiModel(ABC):
|
|||||||
self.data_provider: Optional[DataProvider] = None
|
self.data_provider: Optional[DataProvider] = None
|
||||||
self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1)
|
self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1)
|
||||||
self.can_short = True # overridden in start() with strategy.can_short
|
self.can_short = True # overridden in start() with strategy.can_short
|
||||||
self.model: Any = None
|
|
||||||
if self.ft_params.get('principal_component_analysis', False) and self.continual_learning:
|
self.warned_deprecated_populate_any_indicators = False
|
||||||
self.ft_params.update({'principal_component_analysis': False})
|
|
||||||
logger.warning('User tried to use PCA with continual learning. Deactivating PCA.')
|
|
||||||
|
|
||||||
record_params(config, self.full_path)
|
record_params(config, self.full_path)
|
||||||
|
|
||||||
@ -140,6 +138,9 @@ class IFreqaiModel(ABC):
|
|||||||
self.data_provider = strategy.dp
|
self.data_provider = strategy.dp
|
||||||
self.can_short = strategy.can_short
|
self.can_short = strategy.can_short
|
||||||
|
|
||||||
|
# check if the strategy has deprecated populate_any_indicators function
|
||||||
|
self.check_deprecated_populate_any_indicators(strategy)
|
||||||
|
|
||||||
if self.live:
|
if self.live:
|
||||||
self.inference_timer('start')
|
self.inference_timer('start')
|
||||||
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
||||||
@ -158,7 +159,8 @@ class IFreqaiModel(ABC):
|
|||||||
dk = self.start_backtesting(dataframe, metadata, self.dk, strategy)
|
dk = self.start_backtesting(dataframe, metadata, self.dk, strategy)
|
||||||
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
||||||
else:
|
else:
|
||||||
logger.info("Backtesting using historic predictions (live models)")
|
logger.info(
|
||||||
|
"Backtesting using historic predictions (live models)")
|
||||||
dk = self.start_backtesting_from_historic_predictions(
|
dk = self.start_backtesting_from_historic_predictions(
|
||||||
dataframe, metadata, self.dk)
|
dataframe, metadata, self.dk)
|
||||||
dataframe = dk.return_dataframe
|
dataframe = dk.return_dataframe
|
||||||
@ -307,7 +309,7 @@ class IFreqaiModel(ABC):
|
|||||||
if check_features:
|
if check_features:
|
||||||
self.dd.load_metadata(dk)
|
self.dd.load_metadata(dk)
|
||||||
dataframe_dummy_features = self.dk.use_strategy_to_populate_indicators(
|
dataframe_dummy_features = self.dk.use_strategy_to_populate_indicators(
|
||||||
strategy, prediction_dataframe=dataframe.tail(1), pair=pair
|
strategy, prediction_dataframe=dataframe.tail(1), pair=metadata["pair"]
|
||||||
)
|
)
|
||||||
dk.find_features(dataframe_dummy_features)
|
dk.find_features(dataframe_dummy_features)
|
||||||
self.check_if_feature_list_matches_strategy(dk)
|
self.check_if_feature_list_matches_strategy(dk)
|
||||||
@ -317,7 +319,7 @@ class IFreqaiModel(ABC):
|
|||||||
else:
|
else:
|
||||||
if populate_indicators:
|
if populate_indicators:
|
||||||
dataframe = self.dk.use_strategy_to_populate_indicators(
|
dataframe = self.dk.use_strategy_to_populate_indicators(
|
||||||
strategy, prediction_dataframe=dataframe, pair=pair
|
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
|
||||||
)
|
)
|
||||||
populate_indicators = False
|
populate_indicators = False
|
||||||
|
|
||||||
@ -333,10 +335,6 @@ class IFreqaiModel(ABC):
|
|||||||
dataframe_train = dk.slice_dataframe(tr_train, dataframe_base_train)
|
dataframe_train = dk.slice_dataframe(tr_train, dataframe_base_train)
|
||||||
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe_base_backtest)
|
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe_base_backtest)
|
||||||
|
|
||||||
dataframe_train = dk.remove_special_chars_from_feature_names(dataframe_train)
|
|
||||||
dataframe_backtest = dk.remove_special_chars_from_feature_names(dataframe_backtest)
|
|
||||||
dk.get_unique_classes_from_labels(dataframe_train)
|
|
||||||
|
|
||||||
if not self.model_exists(dk):
|
if not self.model_exists(dk):
|
||||||
dk.find_features(dataframe_train)
|
dk.find_features(dataframe_train)
|
||||||
dk.find_labels(dataframe_train)
|
dk.find_labels(dataframe_train)
|
||||||
@ -346,14 +344,13 @@ class IFreqaiModel(ABC):
|
|||||||
except Exception as msg:
|
except Exception as msg:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Training {pair} raised exception {msg.__class__.__name__}. "
|
f"Training {pair} raised exception {msg.__class__.__name__}. "
|
||||||
f"Message: {msg}, skipping.", exc_info=True)
|
f"Message: {msg}, skipping.")
|
||||||
self.model = None
|
|
||||||
|
|
||||||
self.dd.pair_dict[pair]["trained_timestamp"] = int(
|
self.dd.pair_dict[pair]["trained_timestamp"] = int(
|
||||||
tr_train.stopts)
|
tr_train.stopts)
|
||||||
if self.plot_features and self.model is not None:
|
if self.plot_features:
|
||||||
plot_feature_importance(self.model, pair, dk, self.plot_features)
|
plot_feature_importance(self.model, pair, dk, self.plot_features)
|
||||||
if self.save_backtest_models and self.model is not None:
|
if self.save_backtest_models:
|
||||||
logger.info('Saving backtest model to disk.')
|
logger.info('Saving backtest model to disk.')
|
||||||
self.dd.save_data(self.model, pair, dk)
|
self.dd.save_data(self.model, pair, dk)
|
||||||
else:
|
else:
|
||||||
@ -494,7 +491,7 @@ class IFreqaiModel(ABC):
|
|||||||
"strategy is furnishing the same features as the pretrained"
|
"strategy is furnishing the same features as the pretrained"
|
||||||
"model. In case of --strategy-list, please be aware that FreqAI "
|
"model. In case of --strategy-list, please be aware that FreqAI "
|
||||||
"requires all strategies to maintain identical "
|
"requires all strategies to maintain identical "
|
||||||
"feature_engineering_* functions"
|
"populate_any_indicator() functions"
|
||||||
)
|
)
|
||||||
|
|
||||||
def data_cleaning_train(self, dk: FreqaiDataKitchen) -> None:
|
def data_cleaning_train(self, dk: FreqaiDataKitchen) -> None:
|
||||||
@ -572,9 +569,8 @@ class IFreqaiModel(ABC):
|
|||||||
file_type = ".joblib"
|
file_type = ".joblib"
|
||||||
elif self.dd.model_type == 'keras':
|
elif self.dd.model_type == 'keras':
|
||||||
file_type = ".h5"
|
file_type = ".h5"
|
||||||
elif self.dd.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]:
|
elif 'stable_baselines' in self.dd.model_type or 'sb3_contrib' == self.dd.model_type:
|
||||||
file_type = ".zip"
|
file_type = ".zip"
|
||||||
|
|
||||||
path_to_modelfile = Path(dk.data_path / f"{dk.model_filename}_model{file_type}")
|
path_to_modelfile = Path(dk.data_path / f"{dk.model_filename}_model{file_type}")
|
||||||
file_exists = path_to_modelfile.is_file()
|
file_exists = path_to_modelfile.is_file()
|
||||||
if file_exists:
|
if file_exists:
|
||||||
@ -607,7 +603,7 @@ class IFreqaiModel(ABC):
|
|||||||
:param strategy: IStrategy = user defined strategy object
|
:param strategy: IStrategy = user defined strategy object
|
||||||
:param dk: FreqaiDataKitchen = non-persistent data container for current coin/loop
|
:param dk: FreqaiDataKitchen = non-persistent data container for current coin/loop
|
||||||
:param data_load_timerange: TimeRange = the amount of data to be loaded
|
:param data_load_timerange: TimeRange = the amount of data to be loaded
|
||||||
for populating indicators
|
for populate_any_indicators
|
||||||
(larger than new_trained_timerange so that
|
(larger than new_trained_timerange so that
|
||||||
new_trained_timerange does not contain any NaNs)
|
new_trained_timerange does not contain any NaNs)
|
||||||
"""
|
"""
|
||||||
@ -813,7 +809,7 @@ class IFreqaiModel(ABC):
|
|||||||
logger.warning("Couldn't cache corr_pair dataframes for improved performance. "
|
logger.warning("Couldn't cache corr_pair dataframes for improved performance. "
|
||||||
"Consider ensuring that the full coin/stake, e.g. XYZ/USD, "
|
"Consider ensuring that the full coin/stake, e.g. XYZ/USD, "
|
||||||
"is included in the column names when you are creating features "
|
"is included in the column names when you are creating features "
|
||||||
"in `feature_engineering_*` functions.")
|
"in `populate_any_indicators()`.")
|
||||||
self.get_corr_dataframes = not bool(self.corr_dataframes)
|
self.get_corr_dataframes = not bool(self.corr_dataframes)
|
||||||
elif self.corr_dataframes:
|
elif self.corr_dataframes:
|
||||||
dataframe = dk.attach_corr_pair_columns(
|
dataframe = dk.attach_corr_pair_columns(
|
||||||
@ -940,6 +936,26 @@ class IFreqaiModel(ABC):
|
|||||||
dk.return_dataframe, saved_dataframe, how='left', left_on='date', right_on="date_pred")
|
dk.return_dataframe, saved_dataframe, how='left', left_on='date', right_on="date_pred")
|
||||||
return dk
|
return dk
|
||||||
|
|
||||||
|
def check_deprecated_populate_any_indicators(self, strategy: IStrategy):
|
||||||
|
"""
|
||||||
|
Check and warn if the deprecated populate_any_indicators function is used.
|
||||||
|
:param strategy: strategy object
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not self.warned_deprecated_populate_any_indicators:
|
||||||
|
self.warned_deprecated_populate_any_indicators = True
|
||||||
|
old_version = inspect.getsource(strategy.populate_any_indicators) != (
|
||||||
|
inspect.getsource(IStrategy.populate_any_indicators))
|
||||||
|
|
||||||
|
if old_version:
|
||||||
|
logger.warning("DEPRECATION WARNING: "
|
||||||
|
"You are using the deprecated populate_any_indicators function. "
|
||||||
|
"This function will raise an error on March 1 2023. "
|
||||||
|
"Please update your strategy by using "
|
||||||
|
"the new feature_engineering functions. See \n"
|
||||||
|
"https://www.freqtrade.io/en/latest/freqai-feature-engineering/"
|
||||||
|
"for details.")
|
||||||
|
|
||||||
# Following methods which are overridden by user made prediction models.
|
# Following methods which are overridden by user made prediction models.
|
||||||
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
||||||
|
|
||||||
|
@ -14,20 +14,16 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class CatboostClassifier(BaseClassifierModel):
|
class CatboostClassifier(BaseClassifierModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
labels, weights
|
all the training and test data/labels.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
train_data = Pool(
|
train_data = Pool(
|
||||||
|
@ -15,20 +15,16 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class CatboostClassifierMultiTarget(BaseClassifierModel):
|
class CatboostClassifierMultiTarget(BaseClassifierModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
labels, weights
|
all the training and test data/labels.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cbc = CatBoostClassifier(
|
cbc = CatBoostClassifier(
|
||||||
|
@ -14,20 +14,16 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class CatboostRegressor(BaseRegressionModel):
|
class CatboostRegressor(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
labels, weights
|
all the training and test data/labels.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
train_data = Pool(
|
train_data = Pool(
|
||||||
|
@ -15,20 +15,16 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class CatboostRegressorMultiTarget(BaseRegressionModel):
|
class CatboostRegressorMultiTarget(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
labels, weights
|
all the training and test data/labels.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cbr = CatBoostRegressor(
|
cbr = CatBoostRegressor(
|
||||||
|
@ -12,20 +12,16 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class LightGBMClassifier(BaseClassifierModel):
|
class LightGBMClassifier(BaseClassifierModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
labels, weights
|
all the training and test data/labels.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
|
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
|
||||||
|
@ -13,20 +13,16 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class LightGBMClassifierMultiTarget(BaseClassifierModel):
|
class LightGBMClassifierMultiTarget(BaseClassifierModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
labels, weights
|
all the training and test data/labels.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
lgb = LGBMClassifier(**self.model_training_parameters)
|
lgb = LGBMClassifier(**self.model_training_parameters)
|
||||||
|
@ -12,20 +12,18 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class LightGBMRegressor(BaseRegressionModel):
|
class LightGBMRegressor(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
Most regressors use the same function names and arguments e.g. user
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
can drop in LGBMRegressor in place of CatBoostRegressor and all data
|
||||||
labels, weights
|
management will be properly handled by Freqai.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
|
all the training and test data/labels.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
|
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
|
||||||
|
@ -13,20 +13,16 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class LightGBMRegressorMultiTarget(BaseRegressionModel):
|
class LightGBMRegressorMultiTarget(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
labels, weights
|
all the training and test data/labels.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
lgb = LGBMRegressor(**self.model_training_parameters)
|
lgb = LGBMRegressor(**self.model_training_parameters)
|
||||||
|
@ -1,89 +0,0 @@
|
|||||||
from typing import Any, Dict
|
|
||||||
|
|
||||||
import torch
|
|
||||||
|
|
||||||
from freqtrade.freqai.base_models.BasePyTorchClassifier import BasePyTorchClassifier
|
|
||||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
|
||||||
from freqtrade.freqai.torch.PyTorchDataConvertor import (DefaultPyTorchDataConvertor,
|
|
||||||
PyTorchDataConvertor)
|
|
||||||
from freqtrade.freqai.torch.PyTorchMLPModel import PyTorchMLPModel
|
|
||||||
from freqtrade.freqai.torch.PyTorchModelTrainer import PyTorchModelTrainer
|
|
||||||
|
|
||||||
|
|
||||||
class PyTorchMLPClassifier(BasePyTorchClassifier):
|
|
||||||
"""
|
|
||||||
This class implements the fit method of IFreqaiModel.
|
|
||||||
in the fit method we initialize the model and trainer objects.
|
|
||||||
the only requirement from the model is to be aligned to PyTorchClassifier
|
|
||||||
predict method that expects the model to predict a tensor of type long.
|
|
||||||
|
|
||||||
parameters are passed via `model_training_parameters` under the freqai
|
|
||||||
section in the config file. e.g:
|
|
||||||
{
|
|
||||||
...
|
|
||||||
"freqai": {
|
|
||||||
...
|
|
||||||
"model_training_parameters" : {
|
|
||||||
"learning_rate": 3e-4,
|
|
||||||
"trainer_kwargs": {
|
|
||||||
"max_iters": 5000,
|
|
||||||
"batch_size": 64,
|
|
||||||
"max_n_eval_batches": null,
|
|
||||||
},
|
|
||||||
"model_kwargs": {
|
|
||||||
"hidden_dim": 512,
|
|
||||||
"dropout_percent": 0.2,
|
|
||||||
"n_layer": 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
@property
|
|
||||||
def data_convertor(self) -> PyTorchDataConvertor:
|
|
||||||
return DefaultPyTorchDataConvertor(
|
|
||||||
target_tensor_type=torch.long,
|
|
||||||
squeeze_target_tensor=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self, **kwargs) -> None:
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
config = self.freqai_info.get("model_training_parameters", {})
|
|
||||||
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
|
||||||
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
|
|
||||||
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
|
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
|
||||||
"""
|
|
||||||
User sets up the training and test data to fit their desired model here
|
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
|
||||||
labels, weights
|
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
:raises ValueError: If self.class_names is not defined in the parent class.
|
|
||||||
"""
|
|
||||||
|
|
||||||
class_names = self.get_class_names()
|
|
||||||
self.convert_label_column_to_int(data_dictionary, dk, class_names)
|
|
||||||
n_features = data_dictionary["train_features"].shape[-1]
|
|
||||||
model = PyTorchMLPModel(
|
|
||||||
input_dim=n_features,
|
|
||||||
output_dim=len(class_names),
|
|
||||||
**self.model_kwargs
|
|
||||||
)
|
|
||||||
model.to(self.device)
|
|
||||||
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
|
||||||
criterion = torch.nn.CrossEntropyLoss()
|
|
||||||
init_model = self.get_init_model(dk.pair)
|
|
||||||
trainer = PyTorchModelTrainer(
|
|
||||||
model=model,
|
|
||||||
optimizer=optimizer,
|
|
||||||
criterion=criterion,
|
|
||||||
model_meta_data={"class_names": class_names},
|
|
||||||
device=self.device,
|
|
||||||
init_model=init_model,
|
|
||||||
data_convertor=self.data_convertor,
|
|
||||||
**self.trainer_kwargs,
|
|
||||||
)
|
|
||||||
trainer.fit(data_dictionary, self.splits)
|
|
||||||
return trainer
|
|
@ -1,83 +0,0 @@
|
|||||||
from typing import Any, Dict
|
|
||||||
|
|
||||||
import torch
|
|
||||||
|
|
||||||
from freqtrade.freqai.base_models.BasePyTorchRegressor import BasePyTorchRegressor
|
|
||||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
|
||||||
from freqtrade.freqai.torch.PyTorchDataConvertor import (DefaultPyTorchDataConvertor,
|
|
||||||
PyTorchDataConvertor)
|
|
||||||
from freqtrade.freqai.torch.PyTorchMLPModel import PyTorchMLPModel
|
|
||||||
from freqtrade.freqai.torch.PyTorchModelTrainer import PyTorchModelTrainer
|
|
||||||
|
|
||||||
|
|
||||||
class PyTorchMLPRegressor(BasePyTorchRegressor):
|
|
||||||
"""
|
|
||||||
This class implements the fit method of IFreqaiModel.
|
|
||||||
in the fit method we initialize the model and trainer objects.
|
|
||||||
the only requirement from the model is to be aligned to PyTorchRegressor
|
|
||||||
predict method that expects the model to predict tensor of type float.
|
|
||||||
the trainer defines the training loop.
|
|
||||||
|
|
||||||
parameters are passed via `model_training_parameters` under the freqai
|
|
||||||
section in the config file. e.g:
|
|
||||||
{
|
|
||||||
...
|
|
||||||
"freqai": {
|
|
||||||
...
|
|
||||||
"model_training_parameters" : {
|
|
||||||
"learning_rate": 3e-4,
|
|
||||||
"trainer_kwargs": {
|
|
||||||
"max_iters": 5000,
|
|
||||||
"batch_size": 64,
|
|
||||||
"max_n_eval_batches": null,
|
|
||||||
},
|
|
||||||
"model_kwargs": {
|
|
||||||
"hidden_dim": 512,
|
|
||||||
"dropout_percent": 0.2,
|
|
||||||
"n_layer": 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
@property
|
|
||||||
def data_convertor(self) -> PyTorchDataConvertor:
|
|
||||||
return DefaultPyTorchDataConvertor(target_tensor_type=torch.float)
|
|
||||||
|
|
||||||
def __init__(self, **kwargs) -> None:
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
config = self.freqai_info.get("model_training_parameters", {})
|
|
||||||
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
|
||||||
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
|
|
||||||
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
|
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
|
||||||
"""
|
|
||||||
User sets up the training and test data to fit their desired model here
|
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
|
||||||
labels, weights
|
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
|
||||||
|
|
||||||
n_features = data_dictionary["train_features"].shape[-1]
|
|
||||||
model = PyTorchMLPModel(
|
|
||||||
input_dim=n_features,
|
|
||||||
output_dim=1,
|
|
||||||
**self.model_kwargs
|
|
||||||
)
|
|
||||||
model.to(self.device)
|
|
||||||
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
|
||||||
criterion = torch.nn.MSELoss()
|
|
||||||
init_model = self.get_init_model(dk.pair)
|
|
||||||
trainer = PyTorchModelTrainer(
|
|
||||||
model=model,
|
|
||||||
optimizer=optimizer,
|
|
||||||
criterion=criterion,
|
|
||||||
device=self.device,
|
|
||||||
init_model=init_model,
|
|
||||||
data_convertor=self.data_convertor,
|
|
||||||
**self.trainer_kwargs,
|
|
||||||
)
|
|
||||||
trainer.fit(data_dictionary, self.splits)
|
|
||||||
return trainer
|
|
@ -100,7 +100,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
|
|||||||
"""
|
"""
|
||||||
# first, penalize if the action is not valid
|
# first, penalize if the action is not valid
|
||||||
if not self._is_valid(action):
|
if not self._is_valid(action):
|
||||||
self.tensorboard_log("invalid", category="actions")
|
self.tensorboard_log("is_valid")
|
||||||
return -2
|
return -2
|
||||||
|
|
||||||
pnl = self.get_unrealized_profit()
|
pnl = self.get_unrealized_profit()
|
||||||
|
@ -18,20 +18,16 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class XGBoostClassifier(BaseClassifierModel):
|
class XGBoostClassifier(BaseClassifierModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
labels, weights
|
all the training and test data/labels.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
X = data_dictionary["train_features"].to_numpy()
|
X = data_dictionary["train_features"].to_numpy()
|
||||||
|
@ -18,20 +18,16 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class XGBoostRFClassifier(BaseClassifierModel):
|
class XGBoostRFClassifier(BaseClassifierModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
labels, weights
|
all the training and test data/labels.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
X = data_dictionary["train_features"].to_numpy()
|
X = data_dictionary["train_features"].to_numpy()
|
||||||
|
@ -12,20 +12,16 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class XGBoostRFRegressor(BaseRegressionModel):
|
class XGBoostRFRegressor(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
labels, weights
|
all the training and test data/labels.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
X = data_dictionary["train_features"]
|
X = data_dictionary["train_features"]
|
||||||
|
@ -12,20 +12,16 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class XGBoostRegressor(BaseRegressionModel):
|
class XGBoostRegressor(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
labels, weights
|
all the training and test data/labels.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
X = data_dictionary["train_features"]
|
X = data_dictionary["train_features"]
|
||||||
|
@ -13,20 +13,16 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class XGBoostRegressorMultiTarget(BaseRegressionModel):
|
class XGBoostRegressorMultiTarget(BaseRegressionModel):
|
||||||
"""
|
"""
|
||||||
User created prediction model. The class inherits IFreqaiModel, which
|
User created prediction model. The class needs to override three necessary
|
||||||
means it has full access to all Frequency AI functionality. Typically,
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
users would use this to override the common `fit()`, `train()`, or
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
`predict()` methods to add their custom data handling tools or change
|
|
||||||
various aspects of the training that cannot be configured via the
|
|
||||||
top level config.json file.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
labels, weights
|
all the training and test data/labels.
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
xgb = XGBRegressor(**self.model_training_parameters)
|
xgb = XGBRegressor(**self.model_training_parameters)
|
||||||
|
@ -1,67 +0,0 @@
|
|||||||
from abc import ABC, abstractmethod
|
|
||||||
from typing import List, Optional
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
import torch
|
|
||||||
|
|
||||||
|
|
||||||
class PyTorchDataConvertor(ABC):
|
|
||||||
"""
|
|
||||||
This class is responsible for converting `*_features` & `*_labels` pandas dataframes
|
|
||||||
to pytorch tensors.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
|
|
||||||
"""
|
|
||||||
:param df: "*_features" dataframe.
|
|
||||||
:param device: The device to use for training (e.g. 'cpu', 'cuda').
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
|
|
||||||
"""
|
|
||||||
:param df: "*_labels" dataframe.
|
|
||||||
:param device: The device to use for training (e.g. 'cpu', 'cuda').
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class DefaultPyTorchDataConvertor(PyTorchDataConvertor):
|
|
||||||
"""
|
|
||||||
A default conversion that keeps features dataframe shapes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
target_tensor_type: Optional[torch.dtype] = None,
|
|
||||||
squeeze_target_tensor: bool = False
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
:param target_tensor_type: type of target tensor, for classification use
|
|
||||||
torch.long, for regressor use torch.float or torch.double.
|
|
||||||
:param squeeze_target_tensor: controls the target shape, used for loss functions
|
|
||||||
that requires 0D or 1D.
|
|
||||||
"""
|
|
||||||
self._target_tensor_type = target_tensor_type
|
|
||||||
self._squeeze_target_tensor = squeeze_target_tensor
|
|
||||||
|
|
||||||
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
|
|
||||||
x = torch.from_numpy(df.values).float()
|
|
||||||
if device:
|
|
||||||
x = x.to(device)
|
|
||||||
|
|
||||||
return [x]
|
|
||||||
|
|
||||||
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
|
|
||||||
y = torch.from_numpy(df.values)
|
|
||||||
|
|
||||||
if self._target_tensor_type:
|
|
||||||
y = y.to(self._target_tensor_type)
|
|
||||||
|
|
||||||
if self._squeeze_target_tensor:
|
|
||||||
y = y.squeeze()
|
|
||||||
|
|
||||||
if device:
|
|
||||||
y = y.to(device)
|
|
||||||
|
|
||||||
return [y]
|
|
@ -1,97 +0,0 @@
|
|||||||
import logging
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from torch import nn
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class PyTorchMLPModel(nn.Module):
|
|
||||||
"""
|
|
||||||
A multi-layer perceptron (MLP) model implemented using PyTorch.
|
|
||||||
|
|
||||||
This class mainly serves as a simple example for the integration of PyTorch model's
|
|
||||||
to freqai. It is not optimized at all and should not be used for production purposes.
|
|
||||||
|
|
||||||
:param input_dim: The number of input features. This parameter specifies the number
|
|
||||||
of features in the input data that the MLP will use to make predictions.
|
|
||||||
:param output_dim: The number of output classes. This parameter specifies the number
|
|
||||||
of classes that the MLP will predict.
|
|
||||||
:param hidden_dim: The number of hidden units in each layer. This parameter controls
|
|
||||||
the complexity of the MLP and determines how many nonlinear relationships the MLP
|
|
||||||
can represent. Increasing the number of hidden units can increase the capacity of
|
|
||||||
the MLP to model complex patterns, but it also increases the risk of overfitting
|
|
||||||
the training data. Default: 256
|
|
||||||
:param dropout_percent: The dropout rate for regularization. This parameter specifies
|
|
||||||
the probability of dropping out a neuron during training to prevent overfitting.
|
|
||||||
The dropout rate should be tuned carefully to balance between underfitting and
|
|
||||||
overfitting. Default: 0.2
|
|
||||||
:param n_layer: The number of layers in the MLP. This parameter specifies the number
|
|
||||||
of layers in the MLP architecture. Adding more layers to the MLP can increase its
|
|
||||||
capacity to model complex patterns, but it also increases the risk of overfitting
|
|
||||||
the training data. Default: 1
|
|
||||||
|
|
||||||
:returns: The output of the MLP, with shape (batch_size, output_dim)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, input_dim: int, output_dim: int, **kwargs):
|
|
||||||
super().__init__()
|
|
||||||
hidden_dim: int = kwargs.get("hidden_dim", 256)
|
|
||||||
dropout_percent: int = kwargs.get("dropout_percent", 0.2)
|
|
||||||
n_layer: int = kwargs.get("n_layer", 1)
|
|
||||||
self.input_layer = nn.Linear(input_dim, hidden_dim)
|
|
||||||
self.blocks = nn.Sequential(*[Block(hidden_dim, dropout_percent) for _ in range(n_layer)])
|
|
||||||
self.output_layer = nn.Linear(hidden_dim, output_dim)
|
|
||||||
self.relu = nn.ReLU()
|
|
||||||
self.dropout = nn.Dropout(p=dropout_percent)
|
|
||||||
|
|
||||||
def forward(self, tensors: List[torch.Tensor]) -> torch.Tensor:
|
|
||||||
x: torch.Tensor = tensors[0]
|
|
||||||
x = self.relu(self.input_layer(x))
|
|
||||||
x = self.dropout(x)
|
|
||||||
x = self.blocks(x)
|
|
||||||
x = self.output_layer(x)
|
|
||||||
return x
|
|
||||||
|
|
||||||
|
|
||||||
class Block(nn.Module):
|
|
||||||
"""
|
|
||||||
A building block for a multi-layer perceptron (MLP).
|
|
||||||
|
|
||||||
:param hidden_dim: The number of hidden units in the feedforward network.
|
|
||||||
:param dropout_percent: The dropout rate for regularization.
|
|
||||||
|
|
||||||
:returns: torch.Tensor. with shape (batch_size, hidden_dim)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, hidden_dim: int, dropout_percent: int):
|
|
||||||
super().__init__()
|
|
||||||
self.ff = FeedForward(hidden_dim)
|
|
||||||
self.dropout = nn.Dropout(p=dropout_percent)
|
|
||||||
self.ln = nn.LayerNorm(hidden_dim)
|
|
||||||
|
|
||||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
||||||
x = self.ff(self.ln(x))
|
|
||||||
x = self.dropout(x)
|
|
||||||
return x
|
|
||||||
|
|
||||||
|
|
||||||
class FeedForward(nn.Module):
|
|
||||||
"""
|
|
||||||
A simple fully-connected feedforward neural network block.
|
|
||||||
|
|
||||||
:param hidden_dim: The number of hidden units in the block.
|
|
||||||
:return: torch.Tensor. with shape (batch_size, hidden_dim)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, hidden_dim: int):
|
|
||||||
super().__init__()
|
|
||||||
self.net = nn.Sequential(
|
|
||||||
nn.Linear(hidden_dim, hidden_dim),
|
|
||||||
nn.ReLU(),
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
||||||
return self.net(x)
|
|
@ -1,208 +0,0 @@
|
|||||||
import logging
|
|
||||||
import math
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any, Dict, List, Optional
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
import torch
|
|
||||||
from torch import nn
|
|
||||||
from torch.optim import Optimizer
|
|
||||||
from torch.utils.data import DataLoader, TensorDataset
|
|
||||||
|
|
||||||
from freqtrade.freqai.torch.PyTorchDataConvertor import PyTorchDataConvertor
|
|
||||||
from freqtrade.freqai.torch.PyTorchTrainerInterface import PyTorchTrainerInterface
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class PyTorchModelTrainer(PyTorchTrainerInterface):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
model: nn.Module,
|
|
||||||
optimizer: Optimizer,
|
|
||||||
criterion: nn.Module,
|
|
||||||
device: str,
|
|
||||||
init_model: Dict,
|
|
||||||
data_convertor: PyTorchDataConvertor,
|
|
||||||
model_meta_data: Dict[str, Any] = {},
|
|
||||||
**kwargs
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
:param model: The PyTorch model to be trained.
|
|
||||||
:param optimizer: The optimizer to use for training.
|
|
||||||
:param criterion: The loss function to use for training.
|
|
||||||
:param device: The device to use for training (e.g. 'cpu', 'cuda').
|
|
||||||
:param init_model: A dictionary containing the initial model/optimizer
|
|
||||||
state_dict and model_meta_data saved by self.save() method.
|
|
||||||
:param model_meta_data: Additional metadata about the model (optional).
|
|
||||||
:param data_convertor: convertor from pd.DataFrame to torch.tensor.
|
|
||||||
:param max_iters: The number of training iterations to run.
|
|
||||||
iteration here refers to the number of times we call
|
|
||||||
self.optimizer.step(). used to calculate n_epochs.
|
|
||||||
:param batch_size: The size of the batches to use during training.
|
|
||||||
:param max_n_eval_batches: The maximum number batches to use for evaluation.
|
|
||||||
"""
|
|
||||||
self.model = model
|
|
||||||
self.optimizer = optimizer
|
|
||||||
self.criterion = criterion
|
|
||||||
self.model_meta_data = model_meta_data
|
|
||||||
self.device = device
|
|
||||||
self.max_iters: int = kwargs.get("max_iters", 100)
|
|
||||||
self.batch_size: int = kwargs.get("batch_size", 64)
|
|
||||||
self.max_n_eval_batches: Optional[int] = kwargs.get("max_n_eval_batches", None)
|
|
||||||
self.data_convertor = data_convertor
|
|
||||||
if init_model:
|
|
||||||
self.load_from_checkpoint(init_model)
|
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict[str, pd.DataFrame], splits: List[str]):
|
|
||||||
"""
|
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
|
||||||
all the training and test data/labels.
|
|
||||||
:param splits: splits to use in training, splits must contain "train",
|
|
||||||
optional "test" could be added by setting freqai.data_split_parameters.test_size > 0
|
|
||||||
in the config file.
|
|
||||||
|
|
||||||
- Calculates the predicted output for the batch using the PyTorch model.
|
|
||||||
- Calculates the loss between the predicted and actual output using a loss function.
|
|
||||||
- Computes the gradients of the loss with respect to the model's parameters using
|
|
||||||
backpropagation.
|
|
||||||
- Updates the model's parameters using an optimizer.
|
|
||||||
"""
|
|
||||||
data_loaders_dictionary = self.create_data_loaders_dictionary(data_dictionary, splits)
|
|
||||||
epochs = self.calc_n_epochs(
|
|
||||||
n_obs=len(data_dictionary["train_features"]),
|
|
||||||
batch_size=self.batch_size,
|
|
||||||
n_iters=self.max_iters
|
|
||||||
)
|
|
||||||
for epoch in range(1, epochs + 1):
|
|
||||||
# training
|
|
||||||
losses = []
|
|
||||||
for i, batch_data in enumerate(data_loaders_dictionary["train"]):
|
|
||||||
|
|
||||||
for tensor in batch_data:
|
|
||||||
tensor.to(self.device)
|
|
||||||
|
|
||||||
xb = batch_data[:-1]
|
|
||||||
yb = batch_data[-1]
|
|
||||||
yb_pred = self.model(xb)
|
|
||||||
loss = self.criterion(yb_pred, yb)
|
|
||||||
|
|
||||||
self.optimizer.zero_grad(set_to_none=True)
|
|
||||||
loss.backward()
|
|
||||||
self.optimizer.step()
|
|
||||||
losses.append(loss.item())
|
|
||||||
train_loss = sum(losses) / len(losses)
|
|
||||||
log_message = f"epoch {epoch}/{epochs}: train loss {train_loss:.4f}"
|
|
||||||
|
|
||||||
# evaluation
|
|
||||||
if "test" in splits:
|
|
||||||
test_loss = self.estimate_loss(
|
|
||||||
data_loaders_dictionary,
|
|
||||||
self.max_n_eval_batches,
|
|
||||||
"test"
|
|
||||||
)
|
|
||||||
log_message += f" ; test loss {test_loss:.4f}"
|
|
||||||
|
|
||||||
logger.info(log_message)
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def estimate_loss(
|
|
||||||
self,
|
|
||||||
data_loader_dictionary: Dict[str, DataLoader],
|
|
||||||
max_n_eval_batches: Optional[int],
|
|
||||||
split: str,
|
|
||||||
) -> float:
|
|
||||||
self.model.eval()
|
|
||||||
n_batches = 0
|
|
||||||
losses = []
|
|
||||||
for i, batch_data in enumerate(data_loader_dictionary[split]):
|
|
||||||
if max_n_eval_batches and i > max_n_eval_batches:
|
|
||||||
n_batches += 1
|
|
||||||
break
|
|
||||||
|
|
||||||
for tensor in batch_data:
|
|
||||||
tensor.to(self.device)
|
|
||||||
|
|
||||||
xb = batch_data[:-1]
|
|
||||||
yb = batch_data[-1]
|
|
||||||
yb_pred = self.model(xb)
|
|
||||||
loss = self.criterion(yb_pred, yb)
|
|
||||||
losses.append(loss.item())
|
|
||||||
|
|
||||||
self.model.train()
|
|
||||||
return sum(losses) / len(losses)
|
|
||||||
|
|
||||||
def create_data_loaders_dictionary(
|
|
||||||
self,
|
|
||||||
data_dictionary: Dict[str, pd.DataFrame],
|
|
||||||
splits: List[str]
|
|
||||||
) -> Dict[str, DataLoader]:
|
|
||||||
"""
|
|
||||||
Converts the input data to PyTorch tensors using a data loader.
|
|
||||||
"""
|
|
||||||
data_loader_dictionary = {}
|
|
||||||
for split in splits:
|
|
||||||
x = self.data_convertor.convert_x(data_dictionary[f"{split}_features"])
|
|
||||||
y = self.data_convertor.convert_y(data_dictionary[f"{split}_labels"])
|
|
||||||
dataset = TensorDataset(*x, *y)
|
|
||||||
data_loader = DataLoader(
|
|
||||||
dataset,
|
|
||||||
batch_size=self.batch_size,
|
|
||||||
shuffle=True,
|
|
||||||
drop_last=True,
|
|
||||||
num_workers=0,
|
|
||||||
)
|
|
||||||
data_loader_dictionary[split] = data_loader
|
|
||||||
|
|
||||||
return data_loader_dictionary
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def calc_n_epochs(n_obs: int, batch_size: int, n_iters: int) -> int:
|
|
||||||
"""
|
|
||||||
Calculates the number of epochs required to reach the maximum number
|
|
||||||
of iterations specified in the model training parameters.
|
|
||||||
|
|
||||||
the motivation here is that `max_iters` is easier to optimize and keep stable,
|
|
||||||
across different n_obs - the number of data points.
|
|
||||||
"""
|
|
||||||
|
|
||||||
n_batches = math.ceil(n_obs // batch_size)
|
|
||||||
epochs = math.ceil(n_iters // n_batches)
|
|
||||||
if epochs <= 10:
|
|
||||||
logger.warning("User set `max_iters` in such a way that the trainer will only perform "
|
|
||||||
f" {epochs} epochs. Please consider increasing this value accordingly")
|
|
||||||
if epochs <= 1:
|
|
||||||
logger.warning("Epochs set to 1. Please review your `max_iters` value")
|
|
||||||
epochs = 1
|
|
||||||
return epochs
|
|
||||||
|
|
||||||
def save(self, path: Path):
|
|
||||||
"""
|
|
||||||
- Saving any nn.Module state_dict
|
|
||||||
- Saving model_meta_data, this dict should contain any additional data that the
|
|
||||||
user needs to store. e.g class_names for classification models.
|
|
||||||
"""
|
|
||||||
|
|
||||||
torch.save({
|
|
||||||
"model_state_dict": self.model.state_dict(),
|
|
||||||
"optimizer_state_dict": self.optimizer.state_dict(),
|
|
||||||
"model_meta_data": self.model_meta_data,
|
|
||||||
"pytrainer": self
|
|
||||||
}, path)
|
|
||||||
|
|
||||||
def load(self, path: Path):
|
|
||||||
checkpoint = torch.load(path)
|
|
||||||
return self.load_from_checkpoint(checkpoint)
|
|
||||||
|
|
||||||
def load_from_checkpoint(self, checkpoint: Dict):
|
|
||||||
"""
|
|
||||||
when using continual_learning, DataDrawer will load the dictionary
|
|
||||||
(containing state dicts and model_meta_data) by calling torch.load(path).
|
|
||||||
you can access this dict from any class that inherits IFreqaiModel by calling
|
|
||||||
get_init_model method.
|
|
||||||
"""
|
|
||||||
self.model.load_state_dict(checkpoint["model_state_dict"])
|
|
||||||
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
|
|
||||||
self.model_meta_data = checkpoint["model_meta_data"]
|
|
||||||
return self
|
|
@ -1,53 +0,0 @@
|
|||||||
from abc import ABC, abstractmethod
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Dict, List
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
import torch
|
|
||||||
from torch import nn
|
|
||||||
|
|
||||||
|
|
||||||
class PyTorchTrainerInterface(ABC):
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def fit(self, data_dictionary: Dict[str, pd.DataFrame], splits: List[str]) -> None:
|
|
||||||
"""
|
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
|
||||||
all the training and test data/labels.
|
|
||||||
:param splits: splits to use in training, splits must contain "train",
|
|
||||||
optional "test" could be added by setting freqai.data_split_parameters.test_size > 0
|
|
||||||
in the config file.
|
|
||||||
|
|
||||||
- Calculates the predicted output for the batch using the PyTorch model.
|
|
||||||
- Calculates the loss between the predicted and actual output using a loss function.
|
|
||||||
- Computes the gradients of the loss with respect to the model's parameters using
|
|
||||||
backpropagation.
|
|
||||||
- Updates the model's parameters using an optimizer.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def save(self, path: Path) -> None:
|
|
||||||
"""
|
|
||||||
- Saving any nn.Module state_dict
|
|
||||||
- Saving model_meta_data, this dict should contain any additional data that the
|
|
||||||
user needs to store. e.g class_names for classification models.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def load(self, path: Path) -> nn.Module:
|
|
||||||
"""
|
|
||||||
:param path: path to zip file.
|
|
||||||
:returns: pytorch model.
|
|
||||||
"""
|
|
||||||
checkpoint = torch.load(path)
|
|
||||||
return self.load_from_checkpoint(checkpoint)
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def load_from_checkpoint(self, checkpoint: Dict) -> nn.Module:
|
|
||||||
"""
|
|
||||||
when using continual_learning, DataDrawer will load the dictionary
|
|
||||||
(containing state dicts and model_meta_data) by calling torch.load(path).
|
|
||||||
you can access this dict from any class that inherits IFreqaiModel by calling
|
|
||||||
get_init_model method.
|
|
||||||
:checkpoint checkpoint: dict containing the model & optimizer state dicts,
|
|
||||||
model_meta_data, etc..
|
|
||||||
"""
|
|
@ -211,7 +211,7 @@ def record_params(config: Dict[str, Any], full_path: Path) -> None:
|
|||||||
"pairs": config.get('exchange', {}).get('pair_whitelist')
|
"pairs": config.get('exchange', {}).get('pair_whitelist')
|
||||||
}
|
}
|
||||||
|
|
||||||
with params_record_path.open("w") as handle:
|
with open(params_record_path, "w") as handle:
|
||||||
rapidjson.dump(
|
rapidjson.dump(
|
||||||
run_params,
|
run_params,
|
||||||
handle,
|
handle,
|
||||||
|
@ -21,19 +21,15 @@ from freqtrade.enums import (ExitCheckTuple, ExitType, RPCMessageType, RunMode,
|
|||||||
State, TradingMode)
|
State, TradingMode)
|
||||||
from freqtrade.exceptions import (DependencyException, ExchangeError, InsufficientFundsError,
|
from freqtrade.exceptions import (DependencyException, ExchangeError, InsufficientFundsError,
|
||||||
InvalidOrderException, PricingError)
|
InvalidOrderException, PricingError)
|
||||||
from freqtrade.exchange import (ROUND_DOWN, ROUND_UP, timeframe_to_minutes, timeframe_to_next_date,
|
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_next_date, timeframe_to_seconds
|
||||||
timeframe_to_seconds)
|
|
||||||
from freqtrade.misc import safe_value_fallback, safe_value_fallback2
|
from freqtrade.misc import safe_value_fallback, safe_value_fallback2
|
||||||
from freqtrade.mixins import LoggingMixin
|
from freqtrade.mixins import LoggingMixin
|
||||||
from freqtrade.persistence import Order, PairLocks, Trade, init_db
|
from freqtrade.persistence import Order, PairLocks, Trade, init_db
|
||||||
from freqtrade.persistence.key_value_store import set_startup_time
|
|
||||||
from freqtrade.plugins.pairlistmanager import PairListManager
|
from freqtrade.plugins.pairlistmanager import PairListManager
|
||||||
from freqtrade.plugins.protectionmanager import ProtectionManager
|
from freqtrade.plugins.protectionmanager import ProtectionManager
|
||||||
from freqtrade.resolvers import ExchangeResolver, StrategyResolver
|
from freqtrade.resolvers import ExchangeResolver, StrategyResolver
|
||||||
from freqtrade.rpc import RPCManager
|
from freqtrade.rpc import RPCManager
|
||||||
from freqtrade.rpc.external_message_consumer import ExternalMessageConsumer
|
from freqtrade.rpc.external_message_consumer import ExternalMessageConsumer
|
||||||
from freqtrade.rpc.rpc_types import (RPCBuyMsg, RPCCancelMsg, RPCProtectionMsg, RPCSellCancelMsg,
|
|
||||||
RPCSellMsg)
|
|
||||||
from freqtrade.strategy.interface import IStrategy
|
from freqtrade.strategy.interface import IStrategy
|
||||||
from freqtrade.strategy.strategy_wrapper import strategy_safe_wrapper
|
from freqtrade.strategy.strategy_wrapper import strategy_safe_wrapper
|
||||||
from freqtrade.util import FtPrecise
|
from freqtrade.util import FtPrecise
|
||||||
@ -131,19 +127,19 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
for minutes in [0, 15, 30, 45]:
|
for minutes in [0, 15, 30, 45]:
|
||||||
t = str(time(time_slot, minutes, 2))
|
t = str(time(time_slot, minutes, 2))
|
||||||
self._schedule.every().day.at(t).do(update)
|
self._schedule.every().day.at(t).do(update)
|
||||||
self.last_process: Optional[datetime] = None
|
self.last_process = datetime(1970, 1, 1, tzinfo=timezone.utc)
|
||||||
|
|
||||||
self.strategy.ft_bot_start()
|
self.strategy.ft_bot_start()
|
||||||
# Initialize protections AFTER bot start - otherwise parameters are not loaded.
|
# Initialize protections AFTER bot start - otherwise parameters are not loaded.
|
||||||
self.protections = ProtectionManager(self.config, self.strategy.protections)
|
self.protections = ProtectionManager(self.config, self.strategy.protections)
|
||||||
|
|
||||||
def notify_status(self, msg: str, msg_type=RPCMessageType.STATUS) -> None:
|
def notify_status(self, msg: str) -> None:
|
||||||
"""
|
"""
|
||||||
Public method for users of this class (worker, etc.) to send notifications
|
Public method for users of this class (worker, etc.) to send notifications
|
||||||
via RPC about changes in the bot status.
|
via RPC about changes in the bot status.
|
||||||
"""
|
"""
|
||||||
self.rpc.send_msg({
|
self.rpc.send_msg({
|
||||||
'type': msg_type,
|
'type': RPCMessageType.STATUS,
|
||||||
'status': msg
|
'status': msg
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -183,7 +179,6 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
performs startup tasks
|
performs startup tasks
|
||||||
"""
|
"""
|
||||||
migrate_binance_futures_names(self.config)
|
migrate_binance_futures_names(self.config)
|
||||||
set_startup_time()
|
|
||||||
|
|
||||||
self.rpc.startup_messages(self.config, self.pairlists, self.protections)
|
self.rpc.startup_messages(self.config, self.pairlists, self.protections)
|
||||||
# Update older trades with precision and precision mode
|
# Update older trades with precision and precision mode
|
||||||
@ -217,8 +212,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
self.dataprovider.refresh(self.pairlists.create_pair_list(self.active_pair_whitelist),
|
self.dataprovider.refresh(self.pairlists.create_pair_list(self.active_pair_whitelist),
|
||||||
self.strategy.gather_informative_pairs())
|
self.strategy.gather_informative_pairs())
|
||||||
|
|
||||||
strategy_safe_wrapper(self.strategy.bot_loop_start, supress_error=True)(
|
strategy_safe_wrapper(self.strategy.bot_loop_start, supress_error=True)()
|
||||||
current_time=datetime.now(timezone.utc))
|
|
||||||
|
|
||||||
self.strategy.analyze(self.active_pair_whitelist)
|
self.strategy.analyze(self.active_pair_whitelist)
|
||||||
|
|
||||||
@ -592,7 +586,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
|
|
||||||
min_entry_stake = self.exchange.get_min_pair_stake_amount(trade.pair,
|
min_entry_stake = self.exchange.get_min_pair_stake_amount(trade.pair,
|
||||||
current_entry_rate,
|
current_entry_rate,
|
||||||
0.0)
|
self.strategy.stoploss)
|
||||||
min_exit_stake = self.exchange.get_min_pair_stake_amount(trade.pair,
|
min_exit_stake = self.exchange.get_min_pair_stake_amount(trade.pair,
|
||||||
current_exit_rate,
|
current_exit_rate,
|
||||||
self.strategy.stoploss)
|
self.strategy.stoploss)
|
||||||
@ -600,7 +594,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
stake_available = self.wallets.get_available_stake_amount()
|
stake_available = self.wallets.get_available_stake_amount()
|
||||||
logger.debug(f"Calling adjust_trade_position for pair {trade.pair}")
|
logger.debug(f"Calling adjust_trade_position for pair {trade.pair}")
|
||||||
stake_amount = strategy_safe_wrapper(self.strategy.adjust_trade_position,
|
stake_amount = strategy_safe_wrapper(self.strategy.adjust_trade_position,
|
||||||
default_retval=None, supress_error=True)(
|
default_retval=None)(
|
||||||
trade=trade,
|
trade=trade,
|
||||||
current_time=datetime.now(timezone.utc), current_rate=current_entry_rate,
|
current_time=datetime.now(timezone.utc), current_rate=current_entry_rate,
|
||||||
current_profit=current_entry_profit, min_stake=min_entry_stake,
|
current_profit=current_entry_profit, min_stake=min_entry_stake,
|
||||||
@ -639,7 +633,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
return
|
return
|
||||||
|
|
||||||
remaining = (trade.amount - amount) * current_exit_rate
|
remaining = (trade.amount - amount) * current_exit_rate
|
||||||
if min_exit_stake and remaining < min_exit_stake:
|
if remaining < min_exit_stake:
|
||||||
logger.info(f"Remaining amount of {remaining} would be smaller "
|
logger.info(f"Remaining amount of {remaining} would be smaller "
|
||||||
f"than the minimum of {min_exit_stake}.")
|
f"than the minimum of {min_exit_stake}.")
|
||||||
return
|
return
|
||||||
@ -706,8 +700,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
pos_adjust = trade is not None
|
pos_adjust = trade is not None
|
||||||
|
|
||||||
enter_limit_requested, stake_amount, leverage = self.get_valid_enter_price_and_stake(
|
enter_limit_requested, stake_amount, leverage = self.get_valid_enter_price_and_stake(
|
||||||
pair, price, stake_amount, trade_side, enter_tag, trade, order_adjust, leverage_,
|
pair, price, stake_amount, trade_side, enter_tag, trade, order_adjust, leverage_)
|
||||||
pos_adjust)
|
|
||||||
|
|
||||||
if not stake_amount:
|
if not stake_amount:
|
||||||
return False
|
return False
|
||||||
@ -816,9 +809,6 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
precision_mode=self.exchange.precisionMode,
|
precision_mode=self.exchange.precisionMode,
|
||||||
contract_size=self.exchange.get_contract_size(pair),
|
contract_size=self.exchange.get_contract_size(pair),
|
||||||
)
|
)
|
||||||
stoploss = self.strategy.stoploss if not self.edge else self.edge.get_stoploss(pair)
|
|
||||||
trade.adjust_stop_loss(trade.open_rate, stoploss, initial=True)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# This is additional buy, we reset fee_open_currency so timeout checking can work
|
# This is additional buy, we reset fee_open_currency so timeout checking can work
|
||||||
trade.is_open = True
|
trade.is_open = True
|
||||||
@ -828,7 +818,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
|
|
||||||
trade.orders.append(order_obj)
|
trade.orders.append(order_obj)
|
||||||
trade.recalc_trade_from_orders()
|
trade.recalc_trade_from_orders()
|
||||||
Trade.session.add(trade)
|
Trade.query.session.add(trade)
|
||||||
Trade.commit()
|
Trade.commit()
|
||||||
|
|
||||||
# Updating wallets
|
# Updating wallets
|
||||||
@ -851,18 +841,16 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
|
|
||||||
def cancel_stoploss_on_exchange(self, trade: Trade) -> Trade:
|
def cancel_stoploss_on_exchange(self, trade: Trade) -> Trade:
|
||||||
# First cancelling stoploss on exchange ...
|
# First cancelling stoploss on exchange ...
|
||||||
if trade.stoploss_order_id:
|
if self.strategy.order_types.get('stoploss_on_exchange') and trade.stoploss_order_id:
|
||||||
try:
|
try:
|
||||||
logger.info(f"Canceling stoploss on exchange for {trade}")
|
logger.info(f"Canceling stoploss on exchange for {trade}")
|
||||||
co = self.exchange.cancel_stoploss_order_with_result(
|
co = self.exchange.cancel_stoploss_order_with_result(
|
||||||
trade.stoploss_order_id, trade.pair, trade.amount)
|
trade.stoploss_order_id, trade.pair, trade.amount)
|
||||||
self.update_trade_state(trade, trade.stoploss_order_id, co, stoploss_order=True)
|
trade.update_order(co)
|
||||||
|
|
||||||
# Reset stoploss order id.
|
# Reset stoploss order id.
|
||||||
trade.stoploss_order_id = None
|
trade.stoploss_order_id = None
|
||||||
except InvalidOrderException:
|
except InvalidOrderException:
|
||||||
logger.exception(f"Could not cancel stoploss order {trade.stoploss_order_id} "
|
logger.exception(f"Could not cancel stoploss order {trade.stoploss_order_id}")
|
||||||
f"for pair {trade.pair}")
|
|
||||||
return trade
|
return trade
|
||||||
|
|
||||||
def get_valid_enter_price_and_stake(
|
def get_valid_enter_price_and_stake(
|
||||||
@ -872,12 +860,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
trade: Optional[Trade],
|
trade: Optional[Trade],
|
||||||
order_adjust: bool,
|
order_adjust: bool,
|
||||||
leverage_: Optional[float],
|
leverage_: Optional[float],
|
||||||
pos_adjust: bool,
|
|
||||||
) -> Tuple[float, float, float]:
|
) -> Tuple[float, float, float]:
|
||||||
"""
|
|
||||||
Validate and eventually adjust (within limits) limit, amount and leverage
|
|
||||||
:return: Tuple with (price, amount, leverage)
|
|
||||||
"""
|
|
||||||
|
|
||||||
if price:
|
if price:
|
||||||
enter_limit_requested = price
|
enter_limit_requested = price
|
||||||
@ -923,9 +906,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
# We do however also need min-stake to determine leverage, therefore this is ignored as
|
# We do however also need min-stake to determine leverage, therefore this is ignored as
|
||||||
# edge-case for now.
|
# edge-case for now.
|
||||||
min_stake_amount = self.exchange.get_min_pair_stake_amount(
|
min_stake_amount = self.exchange.get_min_pair_stake_amount(
|
||||||
pair, enter_limit_requested,
|
pair, enter_limit_requested, self.strategy.stoploss, leverage)
|
||||||
self.strategy.stoploss if not pos_adjust else 0.0,
|
|
||||||
leverage)
|
|
||||||
max_stake_amount = self.exchange.get_max_pair_stake_amount(
|
max_stake_amount = self.exchange.get_max_pair_stake_amount(
|
||||||
pair, enter_limit_requested, leverage)
|
pair, enter_limit_requested, leverage)
|
||||||
|
|
||||||
@ -949,11 +930,12 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
|
|
||||||
return enter_limit_requested, stake_amount, leverage
|
return enter_limit_requested, stake_amount, leverage
|
||||||
|
|
||||||
def _notify_enter(self, trade: Trade, order: Order, order_type: str,
|
def _notify_enter(self, trade: Trade, order: Order, order_type: Optional[str] = None,
|
||||||
fill: bool = False, sub_trade: bool = False) -> None:
|
fill: bool = False, sub_trade: bool = False) -> None:
|
||||||
"""
|
"""
|
||||||
Sends rpc notification when a entry order occurred.
|
Sends rpc notification when a entry order occurred.
|
||||||
"""
|
"""
|
||||||
|
msg_type = RPCMessageType.ENTRY_FILL if fill else RPCMessageType.ENTRY
|
||||||
open_rate = order.safe_price
|
open_rate = order.safe_price
|
||||||
|
|
||||||
if open_rate is None:
|
if open_rate is None:
|
||||||
@ -964,9 +946,9 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
current_rate = self.exchange.get_rate(
|
current_rate = self.exchange.get_rate(
|
||||||
trade.pair, side='entry', is_short=trade.is_short, refresh=False)
|
trade.pair, side='entry', is_short=trade.is_short, refresh=False)
|
||||||
|
|
||||||
msg: RPCBuyMsg = {
|
msg = {
|
||||||
'trade_id': trade.id,
|
'trade_id': trade.id,
|
||||||
'type': RPCMessageType.ENTRY_FILL if fill else RPCMessageType.ENTRY,
|
'type': msg_type,
|
||||||
'buy_tag': trade.enter_tag,
|
'buy_tag': trade.enter_tag,
|
||||||
'enter_tag': trade.enter_tag,
|
'enter_tag': trade.enter_tag,
|
||||||
'exchange': trade.exchange.capitalize(),
|
'exchange': trade.exchange.capitalize(),
|
||||||
@ -978,7 +960,6 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
'order_type': order_type,
|
'order_type': order_type,
|
||||||
'stake_amount': trade.stake_amount,
|
'stake_amount': trade.stake_amount,
|
||||||
'stake_currency': self.config['stake_currency'],
|
'stake_currency': self.config['stake_currency'],
|
||||||
'base_currency': self.exchange.get_pair_base_currency(trade.pair),
|
|
||||||
'fiat_currency': self.config.get('fiat_display_currency', None),
|
'fiat_currency': self.config.get('fiat_display_currency', None),
|
||||||
'amount': order.safe_amount_after_fee if fill else (order.amount or trade.amount),
|
'amount': order.safe_amount_after_fee if fill else (order.amount or trade.amount),
|
||||||
'open_date': trade.open_date or datetime.utcnow(),
|
'open_date': trade.open_date or datetime.utcnow(),
|
||||||
@ -997,7 +978,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
current_rate = self.exchange.get_rate(
|
current_rate = self.exchange.get_rate(
|
||||||
trade.pair, side='entry', is_short=trade.is_short, refresh=False)
|
trade.pair, side='entry', is_short=trade.is_short, refresh=False)
|
||||||
|
|
||||||
msg: RPCCancelMsg = {
|
msg = {
|
||||||
'trade_id': trade.id,
|
'trade_id': trade.id,
|
||||||
'type': RPCMessageType.ENTRY_CANCEL,
|
'type': RPCMessageType.ENTRY_CANCEL,
|
||||||
'buy_tag': trade.enter_tag,
|
'buy_tag': trade.enter_tag,
|
||||||
@ -1009,9 +990,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
'limit': trade.open_rate,
|
'limit': trade.open_rate,
|
||||||
'order_type': order_type,
|
'order_type': order_type,
|
||||||
'stake_amount': trade.stake_amount,
|
'stake_amount': trade.stake_amount,
|
||||||
'open_rate': trade.open_rate,
|
|
||||||
'stake_currency': self.config['stake_currency'],
|
'stake_currency': self.config['stake_currency'],
|
||||||
'base_currency': self.exchange.get_pair_base_currency(trade.pair),
|
|
||||||
'fiat_currency': self.config.get('fiat_display_currency', None),
|
'fiat_currency': self.config.get('fiat_display_currency', None),
|
||||||
'amount': trade.amount,
|
'amount': trade.amount,
|
||||||
'open_date': trade.open_date,
|
'open_date': trade.open_date,
|
||||||
@ -1034,16 +1013,12 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
trades_closed = 0
|
trades_closed = 0
|
||||||
for trade in trades:
|
for trade in trades:
|
||||||
try:
|
try:
|
||||||
try:
|
|
||||||
if (self.strategy.order_types.get('stoploss_on_exchange') and
|
|
||||||
self.handle_stoploss_on_exchange(trade)):
|
|
||||||
trades_closed += 1
|
|
||||||
Trade.commit()
|
|
||||||
continue
|
|
||||||
|
|
||||||
except InvalidOrderException as exception:
|
if (self.strategy.order_types.get('stoploss_on_exchange') and
|
||||||
logger.warning(
|
self.handle_stoploss_on_exchange(trade)):
|
||||||
f'Unable to handle stoploss on exchange for {trade.pair}: {exception}')
|
trades_closed += 1
|
||||||
|
Trade.commit()
|
||||||
|
continue
|
||||||
# Check if we can sell our current pair
|
# Check if we can sell our current pair
|
||||||
if trade.open_order_id is None and trade.is_open and self.handle_trade(trade):
|
if trade.open_order_id is None and trade.is_open and self.handle_trade(trade):
|
||||||
trades_closed += 1
|
trades_closed += 1
|
||||||
@ -1147,7 +1122,8 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
trade.stoploss_order_id = None
|
trade.stoploss_order_id = None
|
||||||
logger.error(f'Unable to place a stoploss order on exchange. {e}')
|
logger.error(f'Unable to place a stoploss order on exchange. {e}')
|
||||||
logger.warning('Exiting the trade forcefully')
|
logger.warning('Exiting the trade forcefully')
|
||||||
self.emergency_exit(trade, stop_price)
|
self.execute_trade_exit(trade, stop_price, exit_check=ExitCheckTuple(
|
||||||
|
exit_type=ExitType.EMERGENCY_EXIT))
|
||||||
|
|
||||||
except ExchangeError:
|
except ExchangeError:
|
||||||
trade.stoploss_order_id = None
|
trade.stoploss_order_id = None
|
||||||
@ -1175,8 +1151,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
logger.warning('Unable to fetch stoploss order: %s', exception)
|
logger.warning('Unable to fetch stoploss order: %s', exception)
|
||||||
|
|
||||||
if stoploss_order:
|
if stoploss_order:
|
||||||
self.update_trade_state(trade, trade.stoploss_order_id, stoploss_order,
|
trade.update_order(stoploss_order)
|
||||||
stoploss_order=True)
|
|
||||||
|
|
||||||
# We check if stoploss order is fulfilled
|
# We check if stoploss order is fulfilled
|
||||||
if stoploss_order and stoploss_order['status'] in ('closed', 'triggered'):
|
if stoploss_order and stoploss_order['status'] in ('closed', 'triggered'):
|
||||||
@ -1240,9 +1215,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
:param order: Current on exchange stoploss order
|
:param order: Current on exchange stoploss order
|
||||||
:return: None
|
:return: None
|
||||||
"""
|
"""
|
||||||
stoploss_norm = self.exchange.price_to_precision(
|
stoploss_norm = self.exchange.price_to_precision(trade.pair, trade.stoploss_or_liquidation)
|
||||||
trade.pair, trade.stoploss_or_liquidation,
|
|
||||||
rounding_mode=ROUND_DOWN if trade.is_short else ROUND_UP)
|
|
||||||
|
|
||||||
if self.exchange.stoploss_adjust(stoploss_norm, order, side=trade.exit_side):
|
if self.exchange.stoploss_adjust(stoploss_norm, order, side=trade.exit_side):
|
||||||
# we check if the update is necessary
|
# we check if the update is necessary
|
||||||
@ -1252,8 +1225,13 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
# cancelling the current stoploss on exchange first
|
# cancelling the current stoploss on exchange first
|
||||||
logger.info(f"Cancelling current stoploss on exchange for pair {trade.pair} "
|
logger.info(f"Cancelling current stoploss on exchange for pair {trade.pair} "
|
||||||
f"(orderid:{order['id']}) in order to add another one ...")
|
f"(orderid:{order['id']}) in order to add another one ...")
|
||||||
|
try:
|
||||||
self.cancel_stoploss_on_exchange(trade)
|
co = self.exchange.cancel_stoploss_order_with_result(order['id'], trade.pair,
|
||||||
|
trade.amount)
|
||||||
|
trade.update_order(co)
|
||||||
|
except InvalidOrderException:
|
||||||
|
logger.exception(f"Could not cancel stoploss order {order['id']} "
|
||||||
|
f"for pair {trade.pair}")
|
||||||
|
|
||||||
# Create new stoploss order
|
# Create new stoploss order
|
||||||
if not self.create_stoploss_order(trade=trade, stop_price=stoploss_norm):
|
if not self.create_stoploss_order(trade=trade, stop_price=stoploss_norm):
|
||||||
@ -1297,22 +1275,20 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
if order['side'] == trade.entry_side:
|
if order['side'] == trade.entry_side:
|
||||||
self.handle_cancel_enter(trade, order, reason)
|
self.handle_cancel_enter(trade, order, reason)
|
||||||
else:
|
else:
|
||||||
canceled = self.handle_cancel_exit(trade, order, reason)
|
canceled = self.handle_cancel_exit(
|
||||||
|
trade, order, reason)
|
||||||
canceled_count = trade.get_exit_order_count()
|
canceled_count = trade.get_exit_order_count()
|
||||||
max_timeouts = self.config.get('unfilledtimeout', {}).get('exit_timeout_count', 0)
|
max_timeouts = self.config.get('unfilledtimeout', {}).get('exit_timeout_count', 0)
|
||||||
if canceled and max_timeouts > 0 and canceled_count >= max_timeouts:
|
if canceled and max_timeouts > 0 and canceled_count >= max_timeouts:
|
||||||
logger.warning(f'Emergency exiting trade {trade}, as the exit order '
|
logger.warning(f'Emergency exiting trade {trade}, as the exit order '
|
||||||
f'timed out {max_timeouts} times.')
|
f'timed out {max_timeouts} times.')
|
||||||
self.emergency_exit(trade, order['price'])
|
try:
|
||||||
|
self.execute_trade_exit(
|
||||||
def emergency_exit(self, trade: Trade, price: float) -> None:
|
trade, order['price'],
|
||||||
try:
|
exit_check=ExitCheckTuple(exit_type=ExitType.EMERGENCY_EXIT))
|
||||||
self.execute_trade_exit(
|
except DependencyException as exception:
|
||||||
trade, price,
|
logger.warning(
|
||||||
exit_check=ExitCheckTuple(exit_type=ExitType.EMERGENCY_EXIT))
|
f'Unable to emergency sell trade {trade.pair}: {exception}')
|
||||||
except DependencyException as exception:
|
|
||||||
logger.warning(
|
|
||||||
f'Unable to emergency exit trade {trade.pair}: {exception}')
|
|
||||||
|
|
||||||
def replace_order(self, order: Dict, order_obj: Optional[Order], trade: Trade) -> None:
|
def replace_order(self, order: Dict, order_obj: Optional[Order], trade: Trade) -> None:
|
||||||
"""
|
"""
|
||||||
@ -1339,7 +1315,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
default_retval=order_obj.price)(
|
default_retval=order_obj.price)(
|
||||||
trade=trade, order=order_obj, pair=trade.pair,
|
trade=trade, order=order_obj, pair=trade.pair,
|
||||||
current_time=datetime.now(timezone.utc), proposed_rate=proposed_rate,
|
current_time=datetime.now(timezone.utc), proposed_rate=proposed_rate,
|
||||||
current_order_rate=order_obj.safe_price, entry_tag=trade.enter_tag,
|
current_order_rate=order_obj.price, entry_tag=trade.enter_tag,
|
||||||
side=trade.entry_side)
|
side=trade.entry_side)
|
||||||
|
|
||||||
replacing = True
|
replacing = True
|
||||||
@ -1355,8 +1331,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
# place new order only if new price is supplied
|
# place new order only if new price is supplied
|
||||||
self.execute_entry(
|
self.execute_entry(
|
||||||
pair=trade.pair,
|
pair=trade.pair,
|
||||||
stake_amount=(
|
stake_amount=(order_obj.remaining * order_obj.price / trade.leverage),
|
||||||
order_obj.safe_remaining * order_obj.safe_price / trade.leverage),
|
|
||||||
price=adjusted_entry_price,
|
price=adjusted_entry_price,
|
||||||
trade=trade,
|
trade=trade,
|
||||||
is_short=trade.is_short,
|
is_short=trade.is_short,
|
||||||
@ -1370,8 +1345,6 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
for trade in Trade.get_open_order_trades():
|
for trade in Trade.get_open_order_trades():
|
||||||
if not trade.open_order_id:
|
|
||||||
continue
|
|
||||||
try:
|
try:
|
||||||
order = self.exchange.fetch_order(trade.open_order_id, trade.pair)
|
order = self.exchange.fetch_order(trade.open_order_id, trade.pair)
|
||||||
except (ExchangeError):
|
except (ExchangeError):
|
||||||
@ -1396,9 +1369,6 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
"""
|
"""
|
||||||
was_trade_fully_canceled = False
|
was_trade_fully_canceled = False
|
||||||
side = trade.entry_side.capitalize()
|
side = trade.entry_side.capitalize()
|
||||||
if not trade.open_order_id:
|
|
||||||
logger.warning(f"No open order for {trade}.")
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Cancelled orders may have the status of 'canceled' or 'closed'
|
# Cancelled orders may have the status of 'canceled' or 'closed'
|
||||||
if order['status'] not in constants.NON_OPEN_EXCHANGE_STATES:
|
if order['status'] not in constants.NON_OPEN_EXCHANGE_STATES:
|
||||||
@ -1485,34 +1455,35 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
order = self.exchange.cancel_order_with_result(
|
co = self.exchange.cancel_order_with_result(trade.open_order_id, trade.pair,
|
||||||
order['id'], trade.pair, trade.amount)
|
trade.amount)
|
||||||
except InvalidOrderException:
|
except InvalidOrderException:
|
||||||
logger.exception(
|
logger.exception(
|
||||||
f"Could not cancel {trade.exit_side} order {trade.open_order_id}")
|
f"Could not cancel {trade.exit_side} order {trade.open_order_id}")
|
||||||
return False
|
return False
|
||||||
|
trade.close_rate = None
|
||||||
|
trade.close_rate_requested = None
|
||||||
|
trade.close_profit = None
|
||||||
|
trade.close_profit_abs = None
|
||||||
# Set exit_reason for fill message
|
# Set exit_reason for fill message
|
||||||
exit_reason_prev = trade.exit_reason
|
exit_reason_prev = trade.exit_reason
|
||||||
trade.exit_reason = trade.exit_reason + f", {reason}" if trade.exit_reason else reason
|
trade.exit_reason = trade.exit_reason + f", {reason}" if trade.exit_reason else reason
|
||||||
|
self.update_trade_state(trade, trade.open_order_id, co)
|
||||||
# Order might be filled above in odd timing issues.
|
# Order might be filled above in odd timing issues.
|
||||||
if order.get('status') in ('canceled', 'cancelled'):
|
if co.get('status') in ('canceled', 'cancelled'):
|
||||||
trade.exit_reason = None
|
trade.exit_reason = None
|
||||||
trade.open_order_id = None
|
trade.open_order_id = None
|
||||||
else:
|
else:
|
||||||
trade.exit_reason = exit_reason_prev
|
trade.exit_reason = exit_reason_prev
|
||||||
|
|
||||||
|
logger.info(f'{trade.exit_side.capitalize()} order {reason} for {trade}.')
|
||||||
cancelled = True
|
cancelled = True
|
||||||
else:
|
else:
|
||||||
reason = constants.CANCEL_REASON['CANCELLED_ON_EXCHANGE']
|
reason = constants.CANCEL_REASON['CANCELLED_ON_EXCHANGE']
|
||||||
trade.exit_reason = None
|
logger.info(f'{trade.exit_side.capitalize()} order {reason} for {trade}.')
|
||||||
|
self.update_trade_state(trade, trade.open_order_id, order)
|
||||||
trade.open_order_id = None
|
trade.open_order_id = None
|
||||||
|
|
||||||
self.update_trade_state(trade, trade.open_order_id, order)
|
|
||||||
|
|
||||||
logger.info(f'{trade.exit_side.capitalize()} order {reason} for {trade}.')
|
|
||||||
trade.close_rate = None
|
|
||||||
trade.close_rate_requested = None
|
|
||||||
|
|
||||||
self._notify_exit_cancel(
|
self._notify_exit_cancel(
|
||||||
trade,
|
trade,
|
||||||
order_type=self.strategy.order_types['exit'],
|
order_type=self.strategy.order_types['exit'],
|
||||||
@ -1669,13 +1640,13 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
profit = trade.calc_profit(rate=order_rate, amount=amount, open_rate=trade.open_rate)
|
profit = trade.calc_profit(rate=order_rate, amount=amount, open_rate=trade.open_rate)
|
||||||
profit_ratio = trade.calc_profit_ratio(order_rate, amount, trade.open_rate)
|
profit_ratio = trade.calc_profit_ratio(order_rate, amount, trade.open_rate)
|
||||||
else:
|
else:
|
||||||
order_rate = trade.safe_close_rate
|
order_rate = trade.close_rate if trade.close_rate else trade.close_rate_requested
|
||||||
profit = trade.calc_profit(rate=order_rate) + (0.0 if fill else trade.realized_profit)
|
profit = trade.calc_profit(rate=order_rate) + (0.0 if fill else trade.realized_profit)
|
||||||
profit_ratio = trade.calc_profit_ratio(order_rate)
|
profit_ratio = trade.calc_profit_ratio(order_rate)
|
||||||
amount = trade.amount
|
amount = trade.amount
|
||||||
gain = "profit" if profit_ratio > 0 else "loss"
|
gain = "profit" if profit_ratio > 0 else "loss"
|
||||||
|
|
||||||
msg: RPCSellMsg = {
|
msg = {
|
||||||
'type': (RPCMessageType.EXIT_FILL if fill
|
'type': (RPCMessageType.EXIT_FILL if fill
|
||||||
else RPCMessageType.EXIT),
|
else RPCMessageType.EXIT),
|
||||||
'trade_id': trade.id,
|
'trade_id': trade.id,
|
||||||
@ -1701,7 +1672,6 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
'close_date': trade.close_date or datetime.utcnow(),
|
'close_date': trade.close_date or datetime.utcnow(),
|
||||||
'stake_amount': trade.stake_amount,
|
'stake_amount': trade.stake_amount,
|
||||||
'stake_currency': self.config['stake_currency'],
|
'stake_currency': self.config['stake_currency'],
|
||||||
'base_currency': self.exchange.get_pair_base_currency(trade.pair),
|
|
||||||
'fiat_currency': self.config.get('fiat_display_currency'),
|
'fiat_currency': self.config.get('fiat_display_currency'),
|
||||||
'sub_trade': sub_trade,
|
'sub_trade': sub_trade,
|
||||||
'cumulative_profit': trade.realized_profit,
|
'cumulative_profit': trade.realized_profit,
|
||||||
@ -1725,14 +1695,14 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
raise DependencyException(
|
raise DependencyException(
|
||||||
f"Order_obj not found for {order_id}. This should not have happened.")
|
f"Order_obj not found for {order_id}. This should not have happened.")
|
||||||
|
|
||||||
profit_rate: float = trade.safe_close_rate
|
profit_rate = trade.close_rate if trade.close_rate else trade.close_rate_requested
|
||||||
profit_trade = trade.calc_profit(rate=profit_rate)
|
profit_trade = trade.calc_profit(rate=profit_rate)
|
||||||
current_rate = self.exchange.get_rate(
|
current_rate = self.exchange.get_rate(
|
||||||
trade.pair, side='exit', is_short=trade.is_short, refresh=False)
|
trade.pair, side='exit', is_short=trade.is_short, refresh=False)
|
||||||
profit_ratio = trade.calc_profit_ratio(profit_rate)
|
profit_ratio = trade.calc_profit_ratio(profit_rate)
|
||||||
gain = "profit" if profit_ratio > 0 else "loss"
|
gain = "profit" if profit_ratio > 0 else "loss"
|
||||||
|
|
||||||
msg: RPCSellCancelMsg = {
|
msg = {
|
||||||
'type': RPCMessageType.EXIT_CANCEL,
|
'type': RPCMessageType.EXIT_CANCEL,
|
||||||
'trade_id': trade.id,
|
'trade_id': trade.id,
|
||||||
'exchange': trade.exchange.capitalize(),
|
'exchange': trade.exchange.capitalize(),
|
||||||
@ -1754,7 +1724,6 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
'open_date': trade.open_date,
|
'open_date': trade.open_date,
|
||||||
'close_date': trade.close_date or datetime.now(timezone.utc),
|
'close_date': trade.close_date or datetime.now(timezone.utc),
|
||||||
'stake_currency': self.config['stake_currency'],
|
'stake_currency': self.config['stake_currency'],
|
||||||
'base_currency': self.exchange.get_pair_base_currency(trade.pair),
|
|
||||||
'fiat_currency': self.config.get('fiat_display_currency', None),
|
'fiat_currency': self.config.get('fiat_display_currency', None),
|
||||||
'reason': reason,
|
'reason': reason,
|
||||||
'sub_trade': sub_trade,
|
'sub_trade': sub_trade,
|
||||||
@ -1769,8 +1738,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
#
|
#
|
||||||
|
|
||||||
def update_trade_state(
|
def update_trade_state(
|
||||||
self, trade: Trade, order_id: Optional[str],
|
self, trade: Trade, order_id: str, action_order: Optional[Dict[str, Any]] = None,
|
||||||
action_order: Optional[Dict[str, Any]] = None,
|
|
||||||
stoploss_order: bool = False, send_msg: bool = True) -> bool:
|
stoploss_order: bool = False, send_msg: bool = True) -> bool:
|
||||||
"""
|
"""
|
||||||
Checks trades with open orders and updates the amount if necessary
|
Checks trades with open orders and updates the amount if necessary
|
||||||
@ -1786,11 +1754,11 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
# Update trade with order values
|
# Update trade with order values
|
||||||
if not stoploss_order:
|
logger.info(f'Found open order for {trade}')
|
||||||
logger.info(f'Found open order for {trade}')
|
|
||||||
try:
|
try:
|
||||||
order = action_order or self.exchange.fetch_order_or_stoploss_order(
|
order = action_order or self.exchange.fetch_order_or_stoploss_order(order_id,
|
||||||
order_id, trade.pair, stoploss_order)
|
trade.pair,
|
||||||
|
stoploss_order)
|
||||||
except InvalidOrderException as exception:
|
except InvalidOrderException as exception:
|
||||||
logger.warning('Unable to fetch order %s: %s', order_id, exception)
|
logger.warning('Unable to fetch order %s: %s', order_id, exception)
|
||||||
return False
|
return False
|
||||||
@ -1819,7 +1787,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
# TODO: should shorting/leverage be supported by Edge,
|
# TODO: should shorting/leverage be supported by Edge,
|
||||||
# then this will need to be fixed.
|
# then this will need to be fixed.
|
||||||
trade.adjust_stop_loss(trade.open_rate, self.strategy.stoploss, initial=True)
|
trade.adjust_stop_loss(trade.open_rate, self.strategy.stoploss, initial=True)
|
||||||
if order.get('side') == trade.entry_side or (trade.amount > 0 and trade.is_open):
|
if order.get('side') == trade.entry_side or trade.amount > 0:
|
||||||
# Must also run for partial exits
|
# Must also run for partial exits
|
||||||
# TODO: Margin will need to use interest_rate as well.
|
# TODO: Margin will need to use interest_rate as well.
|
||||||
# interest_rate = self.exchange.get_interest_rate()
|
# interest_rate = self.exchange.get_interest_rate()
|
||||||
@ -1855,27 +1823,21 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
self.handle_protections(trade.pair, trade.trade_direction)
|
self.handle_protections(trade.pair, trade.trade_direction)
|
||||||
elif send_msg and not trade.open_order_id and not stoploss_order:
|
elif send_msg and not trade.open_order_id and not stoploss_order:
|
||||||
# Enter fill
|
# Enter fill
|
||||||
self._notify_enter(trade, order, order.order_type, fill=True, sub_trade=sub_trade)
|
self._notify_enter(trade, order, fill=True, sub_trade=sub_trade)
|
||||||
|
|
||||||
def handle_protections(self, pair: str, side: LongShort) -> None:
|
def handle_protections(self, pair: str, side: LongShort) -> None:
|
||||||
# Lock pair for one candle to prevent immediate rebuys
|
# Lock pair for one candle to prevent immediate rebuys
|
||||||
self.strategy.lock_pair(pair, datetime.now(timezone.utc), reason='Auto lock')
|
self.strategy.lock_pair(pair, datetime.now(timezone.utc), reason='Auto lock')
|
||||||
prot_trig = self.protections.stop_per_pair(pair, side=side)
|
prot_trig = self.protections.stop_per_pair(pair, side=side)
|
||||||
if prot_trig:
|
if prot_trig:
|
||||||
msg: RPCProtectionMsg = {
|
msg = {'type': RPCMessageType.PROTECTION_TRIGGER, }
|
||||||
'type': RPCMessageType.PROTECTION_TRIGGER,
|
msg.update(prot_trig.to_json())
|
||||||
'base_currency': self.exchange.get_pair_base_currency(prot_trig.pair),
|
|
||||||
**prot_trig.to_json() # type: ignore
|
|
||||||
}
|
|
||||||
self.rpc.send_msg(msg)
|
self.rpc.send_msg(msg)
|
||||||
|
|
||||||
prot_trig_glb = self.protections.global_stop(side=side)
|
prot_trig_glb = self.protections.global_stop(side=side)
|
||||||
if prot_trig_glb:
|
if prot_trig_glb:
|
||||||
msg = {
|
msg = {'type': RPCMessageType.PROTECTION_TRIGGER_GLOBAL, }
|
||||||
'type': RPCMessageType.PROTECTION_TRIGGER_GLOBAL,
|
msg.update(prot_trig_glb.to_json())
|
||||||
'base_currency': self.exchange.get_pair_base_currency(prot_trig_glb.pair),
|
|
||||||
**prot_trig_glb.to_json() # type: ignore
|
|
||||||
}
|
|
||||||
self.rpc.send_msg(msg)
|
self.rpc.send_msg(msg)
|
||||||
|
|
||||||
def apply_fee_conditional(self, trade: Trade, trade_base_currency: str,
|
def apply_fee_conditional(self, trade: Trade, trade_base_currency: str,
|
||||||
|
@ -6,7 +6,8 @@ import logging
|
|||||||
import re
|
import re
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, Iterator, List, Mapping, Optional, TextIO, Union
|
from typing import Any, Dict, Iterator, List, Mapping, Optional, Union
|
||||||
|
from typing.io import IO
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
import orjson
|
import orjson
|
||||||
@ -80,7 +81,7 @@ def file_dump_json(filename: Path, data: Any, is_zip: bool = False, log: bool =
|
|||||||
else:
|
else:
|
||||||
if log:
|
if log:
|
||||||
logger.info(f'dumping json to "{filename}"')
|
logger.info(f'dumping json to "{filename}"')
|
||||||
with filename.open('w') as fp:
|
with open(filename, 'w') as fp:
|
||||||
rapidjson.dump(data, fp, default=str, number_mode=rapidjson.NM_NATIVE)
|
rapidjson.dump(data, fp, default=str, number_mode=rapidjson.NM_NATIVE)
|
||||||
|
|
||||||
logger.debug(f'done json to "{filename}"')
|
logger.debug(f'done json to "{filename}"')
|
||||||
@ -97,12 +98,12 @@ def file_dump_joblib(filename: Path, data: Any, log: bool = True) -> None:
|
|||||||
|
|
||||||
if log:
|
if log:
|
||||||
logger.info(f'dumping joblib to "{filename}"')
|
logger.info(f'dumping joblib to "{filename}"')
|
||||||
with filename.open('wb') as fp:
|
with open(filename, 'wb') as fp:
|
||||||
joblib.dump(data, fp)
|
joblib.dump(data, fp)
|
||||||
logger.debug(f'done joblib dump to "{filename}"')
|
logger.debug(f'done joblib dump to "{filename}"')
|
||||||
|
|
||||||
|
|
||||||
def json_load(datafile: Union[gzip.GzipFile, TextIO]) -> Any:
|
def json_load(datafile: IO) -> Any:
|
||||||
"""
|
"""
|
||||||
load data with rapidjson
|
load data with rapidjson
|
||||||
Use this to have a consistent experience,
|
Use this to have a consistent experience,
|
||||||
@ -111,7 +112,7 @@ def json_load(datafile: Union[gzip.GzipFile, TextIO]) -> Any:
|
|||||||
return rapidjson.load(datafile, number_mode=rapidjson.NM_NATIVE)
|
return rapidjson.load(datafile, number_mode=rapidjson.NM_NATIVE)
|
||||||
|
|
||||||
|
|
||||||
def file_load_json(file: Path):
|
def file_load_json(file):
|
||||||
|
|
||||||
if file.suffix != ".gz":
|
if file.suffix != ".gz":
|
||||||
gzipfile = file.with_suffix(file.suffix + '.gz')
|
gzipfile = file.with_suffix(file.suffix + '.gz')
|
||||||
@ -124,7 +125,7 @@ def file_load_json(file: Path):
|
|||||||
pairdata = json_load(datafile)
|
pairdata = json_load(datafile)
|
||||||
elif file.is_file():
|
elif file.is_file():
|
||||||
logger.debug(f"Loading historical data from file {file}")
|
logger.debug(f"Loading historical data from file {file}")
|
||||||
with file.open() as datafile:
|
with open(file) as datafile:
|
||||||
pairdata = json_load(datafile)
|
pairdata = json_load(datafile)
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
@ -29,7 +29,7 @@ def get_strategy_run_id(strategy) -> str:
|
|||||||
# Include _ft_params_from_file - so changing parameter files cause cache eviction
|
# Include _ft_params_from_file - so changing parameter files cause cache eviction
|
||||||
digest.update(rapidjson.dumps(
|
digest.update(rapidjson.dumps(
|
||||||
strategy._ft_params_from_file, default=str, number_mode=rapidjson.NM_NAN).encode('utf-8'))
|
strategy._ft_params_from_file, default=str, number_mode=rapidjson.NM_NAN).encode('utf-8'))
|
||||||
with Path(strategy.__file__).open('rb') as fp:
|
with open(strategy.__file__, 'rb') as fp:
|
||||||
digest.update(fp.read())
|
digest.update(fp.read())
|
||||||
return digest.hexdigest().lower()
|
return digest.hexdigest().lower()
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ class Backtesting:
|
|||||||
if self.config.get('strategy_list'):
|
if self.config.get('strategy_list'):
|
||||||
if self.config.get('freqai', {}).get('enabled', False):
|
if self.config.get('freqai', {}).get('enabled', False):
|
||||||
logger.warning("Using --strategy-list with FreqAI REQUIRES all strategies "
|
logger.warning("Using --strategy-list with FreqAI REQUIRES all strategies "
|
||||||
"to have identical feature_engineering_* functions.")
|
"to have identical populate_any_indicators.")
|
||||||
for strat in list(self.config['strategy_list']):
|
for strat in list(self.config['strategy_list']):
|
||||||
stratconf = deepcopy(self.config)
|
stratconf = deepcopy(self.config)
|
||||||
stratconf['strategy'] = strat
|
stratconf['strategy'] = strat
|
||||||
@ -203,10 +203,9 @@ class Backtesting:
|
|||||||
# since a "perfect" stoploss-exit is assumed anyway
|
# since a "perfect" stoploss-exit is assumed anyway
|
||||||
# And the regular "stoploss" function would not apply to that case
|
# And the regular "stoploss" function would not apply to that case
|
||||||
self.strategy.order_types['stoploss_on_exchange'] = False
|
self.strategy.order_types['stoploss_on_exchange'] = False
|
||||||
# Update can_short flag
|
|
||||||
self._can_short = self.trading_mode != TradingMode.SPOT and strategy.can_short
|
|
||||||
|
|
||||||
self.strategy.ft_bot_start()
|
self.strategy.ft_bot_start()
|
||||||
|
strategy_safe_wrapper(self.strategy.bot_loop_start, supress_error=True)()
|
||||||
|
|
||||||
def _load_protections(self, strategy: IStrategy):
|
def _load_protections(self, strategy: IStrategy):
|
||||||
if self.config.get('enable_protections', False):
|
if self.config.get('enable_protections', False):
|
||||||
@ -441,8 +440,11 @@ class Backtesting:
|
|||||||
side_1 * abs(self.strategy.trailing_stop_positive / leverage)))
|
side_1 * abs(self.strategy.trailing_stop_positive / leverage)))
|
||||||
else:
|
else:
|
||||||
# Worst case: price ticks tiny bit above open and dives down.
|
# Worst case: price ticks tiny bit above open and dives down.
|
||||||
stop_rate = row[OPEN_IDX] * (1 - side_1 * abs(
|
stop_rate = row[OPEN_IDX] * (1 - side_1 * abs(trade.stop_loss_pct / leverage))
|
||||||
(trade.stop_loss_pct or 0.0) / leverage))
|
if is_short:
|
||||||
|
assert stop_rate > row[LOW_IDX]
|
||||||
|
else:
|
||||||
|
assert stop_rate < row[HIGH_IDX]
|
||||||
|
|
||||||
# Limit lower-end to candle low to avoid exits below the low.
|
# Limit lower-end to candle low to avoid exits below the low.
|
||||||
# This still remains "worst case" - but "worst realistic case".
|
# This still remains "worst case" - but "worst realistic case".
|
||||||
@ -470,7 +472,7 @@ class Backtesting:
|
|||||||
# - (Expected abs profit - open_rate - open_fee) / (fee_close -1)
|
# - (Expected abs profit - open_rate - open_fee) / (fee_close -1)
|
||||||
roi_rate = trade.open_rate * roi / leverage
|
roi_rate = trade.open_rate * roi / leverage
|
||||||
open_fee_rate = side_1 * trade.open_rate * (1 + side_1 * trade.fee_open)
|
open_fee_rate = side_1 * trade.open_rate * (1 + side_1 * trade.fee_open)
|
||||||
close_rate = -(roi_rate + open_fee_rate) / ((trade.fee_close or 0.0) - side_1 * 1)
|
close_rate = -(roi_rate + open_fee_rate) / (trade.fee_close - side_1 * 1)
|
||||||
if is_short:
|
if is_short:
|
||||||
is_new_roi = row[OPEN_IDX] < close_rate
|
is_new_roi = row[OPEN_IDX] < close_rate
|
||||||
else:
|
else:
|
||||||
@ -523,7 +525,7 @@ class Backtesting:
|
|||||||
max_stake = self.exchange.get_max_pair_stake_amount(trade.pair, current_rate)
|
max_stake = self.exchange.get_max_pair_stake_amount(trade.pair, current_rate)
|
||||||
stake_available = self.wallets.get_available_stake_amount()
|
stake_available = self.wallets.get_available_stake_amount()
|
||||||
stake_amount = strategy_safe_wrapper(self.strategy.adjust_trade_position,
|
stake_amount = strategy_safe_wrapper(self.strategy.adjust_trade_position,
|
||||||
default_retval=None, supress_error=True)(
|
default_retval=None)(
|
||||||
trade=trade, # type: ignore[arg-type]
|
trade=trade, # type: ignore[arg-type]
|
||||||
current_time=current_date, current_rate=current_rate,
|
current_time=current_date, current_rate=current_rate,
|
||||||
current_profit=current_profit, min_stake=min_stake,
|
current_profit=current_profit, min_stake=min_stake,
|
||||||
@ -561,7 +563,7 @@ class Backtesting:
|
|||||||
pos_trade = self._get_exit_for_signal(trade, row, exit_, amount)
|
pos_trade = self._get_exit_for_signal(trade, row, exit_, amount)
|
||||||
if pos_trade is not None:
|
if pos_trade is not None:
|
||||||
order = pos_trade.orders[-1]
|
order = pos_trade.orders[-1]
|
||||||
if self._get_order_filled(order.ft_price, row):
|
if self._get_order_filled(order.price, row):
|
||||||
order.close_bt_order(current_date, trade)
|
order.close_bt_order(current_date, trade)
|
||||||
trade.recalc_trade_from_orders()
|
trade.recalc_trade_from_orders()
|
||||||
self.wallets.update()
|
self.wallets.update()
|
||||||
@ -662,7 +664,6 @@ class Backtesting:
|
|||||||
side=trade.exit_side,
|
side=trade.exit_side,
|
||||||
order_type=order_type,
|
order_type=order_type,
|
||||||
status="open",
|
status="open",
|
||||||
ft_price=close_rate,
|
|
||||||
price=close_rate,
|
price=close_rate,
|
||||||
average=close_rate,
|
average=close_rate,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
@ -741,12 +742,12 @@ class Backtesting:
|
|||||||
proposed_leverage=1.0,
|
proposed_leverage=1.0,
|
||||||
max_leverage=max_leverage,
|
max_leverage=max_leverage,
|
||||||
side=direction, entry_tag=entry_tag,
|
side=direction, entry_tag=entry_tag,
|
||||||
) if self.trading_mode != TradingMode.SPOT else 1.0
|
) if self._can_short else 1.0
|
||||||
# Cap leverage between 1.0 and max_leverage.
|
# Cap leverage between 1.0 and max_leverage.
|
||||||
leverage = min(max(leverage, 1.0), max_leverage)
|
leverage = min(max(leverage, 1.0), max_leverage)
|
||||||
|
|
||||||
min_stake_amount = self.exchange.get_min_pair_stake_amount(
|
min_stake_amount = self.exchange.get_min_pair_stake_amount(
|
||||||
pair, propose_rate, -0.05 if not pos_adjust else 0.0, leverage=leverage) or 0
|
pair, propose_rate, -0.05, leverage=leverage) or 0
|
||||||
max_stake_amount = self.exchange.get_max_pair_stake_amount(
|
max_stake_amount = self.exchange.get_max_pair_stake_amount(
|
||||||
pair, propose_rate, leverage=leverage)
|
pair, propose_rate, leverage=leverage)
|
||||||
stake_available = self.wallets.get_available_stake_amount()
|
stake_available = self.wallets.get_available_stake_amount()
|
||||||
@ -886,7 +887,6 @@ class Backtesting:
|
|||||||
order_date=current_time,
|
order_date=current_time,
|
||||||
order_filled_date=current_time,
|
order_filled_date=current_time,
|
||||||
order_update_date=current_time,
|
order_update_date=current_time,
|
||||||
ft_price=propose_rate,
|
|
||||||
price=propose_rate,
|
price=propose_rate,
|
||||||
average=propose_rate,
|
average=propose_rate,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
@ -895,7 +895,7 @@ class Backtesting:
|
|||||||
cost=stake_amount + trade.fee_open,
|
cost=stake_amount + trade.fee_open,
|
||||||
)
|
)
|
||||||
trade.orders.append(order)
|
trade.orders.append(order)
|
||||||
if pos_adjust and self._get_order_filled(order.ft_price, row):
|
if pos_adjust and self._get_order_filled(order.price, row):
|
||||||
order.close_bt_order(current_time, trade)
|
order.close_bt_order(current_time, trade)
|
||||||
else:
|
else:
|
||||||
trade.open_order_id = str(self.order_id_counter)
|
trade.open_order_id = str(self.order_id_counter)
|
||||||
@ -1008,15 +1008,15 @@ class Backtesting:
|
|||||||
# only check on new candles for open entry orders
|
# only check on new candles for open entry orders
|
||||||
if order.side == trade.entry_side and current_time > order.order_date_utc:
|
if order.side == trade.entry_side and current_time > order.order_date_utc:
|
||||||
requested_rate = strategy_safe_wrapper(self.strategy.adjust_entry_price,
|
requested_rate = strategy_safe_wrapper(self.strategy.adjust_entry_price,
|
||||||
default_retval=order.ft_price)(
|
default_retval=order.price)(
|
||||||
trade=trade, # type: ignore[arg-type]
|
trade=trade, # type: ignore[arg-type]
|
||||||
order=order, pair=trade.pair, current_time=current_time,
|
order=order, pair=trade.pair, current_time=current_time,
|
||||||
proposed_rate=row[OPEN_IDX], current_order_rate=order.ft_price,
|
proposed_rate=row[OPEN_IDX], current_order_rate=order.price,
|
||||||
entry_tag=trade.enter_tag, side=trade.trade_direction
|
entry_tag=trade.enter_tag, side=trade.trade_direction
|
||||||
) # default value is current order price
|
) # default value is current order price
|
||||||
|
|
||||||
# cancel existing order whenever a new rate is requested (or None)
|
# cancel existing order whenever a new rate is requested (or None)
|
||||||
if requested_rate == order.ft_price:
|
if requested_rate == order.price:
|
||||||
# assumption: there can't be multiple open entry orders at any given time
|
# assumption: there can't be multiple open entry orders at any given time
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
@ -1028,12 +1028,8 @@ class Backtesting:
|
|||||||
if requested_rate:
|
if requested_rate:
|
||||||
self._enter_trade(pair=trade.pair, row=row, trade=trade,
|
self._enter_trade(pair=trade.pair, row=row, trade=trade,
|
||||||
requested_rate=requested_rate,
|
requested_rate=requested_rate,
|
||||||
requested_stake=(
|
requested_stake=(order.remaining * order.price / trade.leverage),
|
||||||
order.safe_remaining * order.ft_price / trade.leverage),
|
|
||||||
direction='short' if trade.is_short else 'long')
|
direction='short' if trade.is_short else 'long')
|
||||||
# Delete trade if no successful entries happened (if placing the new order failed)
|
|
||||||
if trade.open_order_id is None and trade.nr_of_successful_entries == 0:
|
|
||||||
return True
|
|
||||||
self.replaced_entry_orders += 1
|
self.replaced_entry_orders += 1
|
||||||
else:
|
else:
|
||||||
# assumption: there can't be multiple open entry orders at any given time
|
# assumption: there can't be multiple open entry orders at any given time
|
||||||
@ -1099,7 +1095,7 @@ class Backtesting:
|
|||||||
for trade in list(LocalTrade.bt_trades_open_pp[pair]):
|
for trade in list(LocalTrade.bt_trades_open_pp[pair]):
|
||||||
# 3. Process entry orders.
|
# 3. Process entry orders.
|
||||||
order = trade.select_order(trade.entry_side, is_open=True)
|
order = trade.select_order(trade.entry_side, is_open=True)
|
||||||
if order and self._get_order_filled(order.ft_price, row):
|
if order and self._get_order_filled(order.price, row):
|
||||||
order.close_bt_order(current_time, trade)
|
order.close_bt_order(current_time, trade)
|
||||||
trade.open_order_id = None
|
trade.open_order_id = None
|
||||||
self.wallets.update()
|
self.wallets.update()
|
||||||
@ -1110,7 +1106,7 @@ class Backtesting:
|
|||||||
|
|
||||||
# 5. Process exit orders.
|
# 5. Process exit orders.
|
||||||
order = trade.select_order(trade.exit_side, is_open=True)
|
order = trade.select_order(trade.exit_side, is_open=True)
|
||||||
if order and self._get_order_filled(order.ft_price, row):
|
if order and self._get_order_filled(order.price, row):
|
||||||
order.close_bt_order(current_time, trade)
|
order.close_bt_order(current_time, trade)
|
||||||
trade.open_order_id = None
|
trade.open_order_id = None
|
||||||
sub_trade = order.safe_amount_after_fee != trade.amount
|
sub_trade = order.safe_amount_after_fee != trade.amount
|
||||||
@ -1119,7 +1115,7 @@ class Backtesting:
|
|||||||
trade.recalc_trade_from_orders()
|
trade.recalc_trade_from_orders()
|
||||||
else:
|
else:
|
||||||
trade.close_date = current_time
|
trade.close_date = current_time
|
||||||
trade.close(order.ft_price, show_msg=False)
|
trade.close(order.price, show_msg=False)
|
||||||
|
|
||||||
# logger.debug(f"{pair} - Backtesting exit {trade}")
|
# logger.debug(f"{pair} - Backtesting exit {trade}")
|
||||||
LocalTrade.close_bt_trade(trade)
|
LocalTrade.close_bt_trade(trade)
|
||||||
@ -1159,8 +1155,6 @@ class Backtesting:
|
|||||||
while current_time <= end_date:
|
while current_time <= end_date:
|
||||||
open_trade_count_start = LocalTrade.bt_open_open_trade_count
|
open_trade_count_start = LocalTrade.bt_open_open_trade_count
|
||||||
self.check_abort()
|
self.check_abort()
|
||||||
strategy_safe_wrapper(self.strategy.bot_loop_start, supress_error=True)(
|
|
||||||
current_time=current_time)
|
|
||||||
for i, pair in enumerate(data):
|
for i, pair in enumerate(data):
|
||||||
row_index = indexes[pair]
|
row_index = indexes[pair]
|
||||||
row = self.validate_row(data, pair, row_index, current_time)
|
row = self.validate_row(data, pair, row_index, current_time)
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
import io
|
||||||
import logging
|
import logging
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
@ -23,8 +24,6 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
NON_OPT_PARAM_APPENDIX = " # value loaded from strategy"
|
NON_OPT_PARAM_APPENDIX = " # value loaded from strategy"
|
||||||
|
|
||||||
HYPER_PARAMS_FILE_FORMAT = rapidjson.NM_NATIVE | rapidjson.NM_NAN
|
|
||||||
|
|
||||||
|
|
||||||
def hyperopt_serializer(x):
|
def hyperopt_serializer(x):
|
||||||
if isinstance(x, np.integer):
|
if isinstance(x, np.integer):
|
||||||
@ -78,18 +77,9 @@ class HyperoptTools():
|
|||||||
with filename.open('w') as f:
|
with filename.open('w') as f:
|
||||||
rapidjson.dump(final_params, f, indent=2,
|
rapidjson.dump(final_params, f, indent=2,
|
||||||
default=hyperopt_serializer,
|
default=hyperopt_serializer,
|
||||||
number_mode=HYPER_PARAMS_FILE_FORMAT
|
number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def load_params(filename: Path) -> Dict:
|
|
||||||
"""
|
|
||||||
Load parameters from file
|
|
||||||
"""
|
|
||||||
with filename.open('r') as f:
|
|
||||||
params = rapidjson.load(f, number_mode=HYPER_PARAMS_FILE_FORMAT)
|
|
||||||
return params
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def try_export_params(config: Config, strategy_name: str, params: Dict):
|
def try_export_params(config: Config, strategy_name: str, params: Dict):
|
||||||
if params.get(FTHYPT_FILEVERSION, 1) >= 2 and not config.get('disableparamexport', False):
|
if params.get(FTHYPT_FILEVERSION, 1) >= 2 and not config.get('disableparamexport', False):
|
||||||
@ -200,7 +190,7 @@ class HyperoptTools():
|
|||||||
for s in ['buy', 'sell', 'protection',
|
for s in ['buy', 'sell', 'protection',
|
||||||
'roi', 'stoploss', 'trailing', 'max_open_trades']:
|
'roi', 'stoploss', 'trailing', 'max_open_trades']:
|
||||||
HyperoptTools._params_update_for_json(result_dict, params, non_optimized, s)
|
HyperoptTools._params_update_for_json(result_dict, params, non_optimized, s)
|
||||||
print(rapidjson.dumps(result_dict, default=str, number_mode=HYPER_PARAMS_FILE_FORMAT))
|
print(rapidjson.dumps(result_dict, default=str, number_mode=rapidjson.NM_NATIVE))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
HyperoptTools._params_pretty_print(params, 'buy', "Buy hyperspace params:",
|
HyperoptTools._params_pretty_print(params, 'buy', "Buy hyperspace params:",
|
||||||
@ -474,8 +464,8 @@ class HyperoptTools():
|
|||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
Path(csv_file).open('w+').close()
|
io.open(csv_file, 'w+').close()
|
||||||
except OSError:
|
except IOError:
|
||||||
logger.error(f"Failed to create CSV file: {csv_file}")
|
logger.error(f"Failed to create CSV file: {csv_file}")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -865,11 +865,6 @@ def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency:
|
|||||||
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
|
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
|
||||||
print(table)
|
print(table)
|
||||||
|
|
||||||
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
|
|
||||||
if isinstance(table, str) and len(table) > 0:
|
|
||||||
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
|
|
||||||
print(table)
|
|
||||||
|
|
||||||
if (results.get('results_per_enter_tag') is not None
|
if (results.get('results_per_enter_tag') is not None
|
||||||
or results.get('results_per_buy_tag') is not None):
|
or results.get('results_per_buy_tag') is not None):
|
||||||
# results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
|
# results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
|
||||||
@ -889,6 +884,11 @@ def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency:
|
|||||||
print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
|
print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
|
||||||
print(table)
|
print(table)
|
||||||
|
|
||||||
|
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
|
||||||
|
if isinstance(table, str) and len(table) > 0:
|
||||||
|
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
|
||||||
|
print(table)
|
||||||
|
|
||||||
for period in backtest_breakdown:
|
for period in backtest_breakdown:
|
||||||
days_breakdown_stats = generate_periodic_breakdown_stats(
|
days_breakdown_stats = generate_periodic_breakdown_stats(
|
||||||
trade_list=results['trades'], period=period)
|
trade_list=results['trades'], period=period)
|
||||||
@ -917,11 +917,11 @@ def show_backtest_results(config: Config, backtest_stats: Dict):
|
|||||||
strategy, results, stake_currency,
|
strategy, results, stake_currency,
|
||||||
config.get('backtest_breakdown', []))
|
config.get('backtest_breakdown', []))
|
||||||
|
|
||||||
if len(backtest_stats['strategy']) > 0:
|
if len(backtest_stats['strategy']) > 1:
|
||||||
# Print Strategy summary table
|
# Print Strategy summary table
|
||||||
|
|
||||||
table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
|
table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
|
||||||
print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |"
|
print(f"{results['backtest_start']} -> {results['backtest_end']} |"
|
||||||
f" Max open trades : {results['max_open_trades']}")
|
f" Max open trades : {results['max_open_trades']}")
|
||||||
print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
|
print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
|
||||||
print(table)
|
print(table)
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
# flake8: noqa: F401
|
# flake8: noqa: F401
|
||||||
|
|
||||||
from freqtrade.persistence.key_value_store import KeyStoreKeys, KeyValueStore
|
|
||||||
from freqtrade.persistence.models import init_db
|
from freqtrade.persistence.models import init_db
|
||||||
from freqtrade.persistence.pairlock_middleware import PairLocks
|
from freqtrade.persistence.pairlock_middleware import PairLocks
|
||||||
from freqtrade.persistence.trade_model import LocalTrade, Order, Trade
|
from freqtrade.persistence.trade_model import LocalTrade, Order, Trade
|
||||||
|
@ -1,9 +1,7 @@
|
|||||||
|
|
||||||
from sqlalchemy.orm import DeclarativeBase, Session, scoped_session
|
from typing import Any
|
||||||
|
|
||||||
|
from sqlalchemy.orm import declarative_base
|
||||||
|
|
||||||
|
|
||||||
SessionType = scoped_session[Session]
|
_DECL_BASE: Any = declarative_base()
|
||||||
|
|
||||||
|
|
||||||
class ModelBase(DeclarativeBase):
|
|
||||||
pass
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user