From ad424852b819a1d76b4ddefe7614abec7a382d0a Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Sat, 27 Dec 2025 10:19:13 +1100 Subject: [PATCH 1/3] Add the ability to migrate `minio` data to `rustfs` V2 Known issues with multiple versions not always returned in the right order via the RustFS API. --- .env.example | 38 +++++------ .github/workflows/test.yml | 8 +-- README.md | 64 ++++++++++++------- .../qfieldcloud/core/tests/test_status.py | 4 +- docker-app/qfieldcloud/filestorage/models.py | 4 +- docker-app/qfieldcloud/settings.py | 2 +- docker-compose.override.minio.yml | 35 ++++++++++ docker-compose.override.standalone.yml | 52 ++++++++------- 8 files changed, 128 insertions(+), 79 deletions(-) create mode 100644 docker-compose.override.minio.yml diff --git a/.env.example b/.env.example index e97133f67..9083b8092 100644 --- a/.env.example +++ b/.env.example @@ -81,14 +81,14 @@ LETSENCRYPT_STAGING=1 # Used to define storages in QFieldCloud. Note the contents of this variable is a superset of Django's `STORAGES` setting. # NOTE: Note if the `STORAGES` is not available, QFieldCloud will still work with `STORAGE_ACCESS_KEY_ID`, `STORAGE_SECRET_KEY_ID`, `STORAGE_BUCKET_NAME` and `STORAGE_REGION_NAME` from previous QFC versions. # NOTE: The custom property `QFC_IS_LEGACY` is temporary available to allow migration from the old to the new way of handling files. This option will soon be removed, so you are highly encouraged to migrate all the projects to the new way of handling files. -# NOTE: The `endpoint_url` must be a URL reachable from within docker and the host, the default value `172.17.0.1` for `minio` is the docker network `bridge`. On windows/mac, change the value to "http://host.docker.internal:8009". +# NOTE: The `endpoint_url` must be a URL reachable from within docker and the host, the default value `172.17.0.1` for `rustfs` is the docker network `bridge`. On windows/mac, change the value to "http://host.docker.internal:8009". # DEFAULT: # { # "default": { # "BACKEND": "qfieldcloud.filestorage.backend.QfcS3Boto3Storage", # "OPTIONS": { -# "access_key": "minioadmin", -# "secret_key": "minioadmin", +# "access_key": "rustfsadmin", +# "secret_key": "rustfsadmin", # "bucket_name": "qfieldcloud-local", # "region_name": "", # "endpoint_url": "http://172.17.0.1:8009" @@ -130,8 +130,8 @@ STORAGES='{ "default": { "BACKEND": "qfieldcloud.filestorage.backend.QfcS3Boto3Storage", "OPTIONS": { - "access_key": "minioadmin", - "secret_key": "minioadmin", + "access_key": "rustfsadmin", + "secret_key": "rustfsadmin", "bucket_name": "qfieldcloud-local", "region_name": "", "endpoint_url": "http://172.17.0.1:8009" @@ -479,28 +479,28 @@ SMTP4DEV_IMAP_PORT=143 # DEFAULT: 17-3.5-alpine POSTGIS_IMAGE_VERSION=17-3.5-alpine -# Local admin username configuration for minio storage in local and standalone instances. -# NOTE: Ignored if `minio` is not used. +# Local admin username configuration for object storage in local and standalone instances. +# NOTE: Ignored if `rustfs` is not used. # NOTE: Needs to be the same as in the `STORAGES` setting in standalone config. -# DEFAULT: MINIO_ROOT_USER=minioadmin -MINIO_ROOT_USER=minioadmin +# DEFAULT: "rustfsadmin" +OBJECT_STORAGE_ROOT_USER="rustfsadmin" -# Local admin password configuration for minio storage in local and standalone instances. -# NOTE: Ignored if `minio` is not used. +# Local admin password configuration for object storage in local and standalone instances. +# NOTE: Ignored if `rustfs` is not used. # NOTE: Needs to be the same as in the `STORAGES` setting in standalone config. -# DEFAULT: MINIO_ROOT_PASSWORD=minioadmin -MINIO_ROOT_PASSWORD=minioadmin +# DEFAULT: "rustfsadmin" +OBJECT_STORAGE_ROOT_PASSWORD="rustfsadmin" -# Public port to the minio API endpoint. It must match the configured port in `STORAGE_ENDPOINT_URL`. -# NOTE: Ignored if `minio` is not used. +# Public port to the object storage API endpoint. It must match the configured port in `STORAGE_ENDPOINT_URL`. +# NOTE: Ignored if `rustfs` is not used. # NOTE: Needs to be the same as in the `STORAGES` setting in standalone config. # DEFAULT: 8009 -MINIO_API_PORT=8009 +OBJECT_STORAGE_API_PORT=8009 -# Public port to the minio browser endpoint. -# NOTE: Ignored if `minio` is not used. +# Public port to the object storage browser endpoint. +# NOTE: Ignored if `rustfs` is not used. # DEFAULT: 8010 -MINIO_BROWSER_PORT=8010 +OBJECT_STORAGE_BROWSER_PORT=8010 # Public port to the webdav server. # NOTE: Ignored if `webdav` docker service is not used. diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e6671dded..37b59da92 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -71,8 +71,8 @@ jobs: "legacy_storage": { "BACKEND": "qfieldcloud.filestorage.backend.QfcS3Boto3Storage", "OPTIONS": { - "access_key": "minioadmin", - "secret_key": "minioadmin", + "access_key": "rustfsadmin", + "secret_key": "rustfsadmin", "bucket_name": "qfieldcloud-local-legacy", "region_name": "", "endpoint_url": "http://172.17.0.1:8009" @@ -91,8 +91,8 @@ jobs: "default": { "BACKEND": "qfieldcloud.filestorage.backend.QfcS3Boto3Storage", "OPTIONS": { - "access_key": "minioadmin", - "secret_key": "minioadmin", + "access_key": "rustfsadmin", + "secret_key": "rustfsadmin", "bucket_name": "qfieldcloud-local", "region_name": "", "endpoint_url": "http://172.17.0.1:8009" diff --git a/README.md b/README.md index bed797912..9bae5a9e2 100644 --- a/README.md +++ b/README.md @@ -181,8 +181,8 @@ Create an .env.test file with the following variables that override QFIELDCLOUD_HOST=nginx DJANGO_SETTINGS_MODULE=qfieldcloud.settings STORAGE_ENDPOINT_URL=http://172.17.0.1:8109 - MINIO_API_PORT=8109 - MINIO_BROWSER_PORT=8110 + OBJECT_STORAGE_API_PORT=8109 + OBJECT_STORAGE_BROWSER_PORT=8110 WEB_HTTP_PORT=8101 WEB_HTTPS_PORT=8102 HOST_POSTGRES_PORT=8103 @@ -327,22 +327,31 @@ Code style done with [`pre-commit`](https://pre-commit.com): Copy the `.env.example` into `.env` file: - cp .env.example .env - vi .env +``` +cp .env.example .env +vi .env +``` Do not forget to set `DEBUG=0` and to adapt `COMPOSE_FILE` environment variable to not load local development configurations. Run and build the docker containers: - docker compose up -d --build +``` +docker compose up -d --build +``` Run the django database migrations: - docker compose exec app python manage.py migrate +``` +docker compose exec app python manage.py migrate +``` Collect the static files: - docker compose exec app python manage.py collectstatic +``` +docker compose exec app python manage.py collectstatic +``` + ### Using certificate from Let's Encrypt @@ -352,40 +361,47 @@ Note you want to change the `LETSENCRYPT_EMAIL`, `LETSENCRYPT_RSA_KEY_SIZE` and On a server with a public domain, you can get a certificate issued by Let's Encrypt using certbot running the following command: - ./scripts/init_letsencrypt.sh +``` +./scripts/init_letsencrypt.sh +``` The certificates will be renewed automatically. To use this Let's Encrypt certificate within QFieldCloud you just need to uncomment the following lines in your `.env`: - QFIELDCLOUD_TLS_CERT=/etc/letsencrypt/live/${QFIELDCLOUD_HOST}/fullchain.pem - QFIELDCLOUD_TLS_KEY=/etc/letsencrypt/live/${QFIELDCLOUD_HOST}/privkey.pem +``` +QFIELDCLOUD_TLS_CERT=/etc/letsencrypt/live/${QFIELDCLOUD_HOST}/fullchain.pem +QFIELDCLOUD_TLS_KEY=/etc/letsencrypt/live/${QFIELDCLOUD_HOST}/privkey.pem +``` You can also use your own certificates by placing them in `conf/nginx/certs/` and changing `QFIELDCLOUD_TLS_CERT` and `QFIELDCLOUD_TLS_KEY` accordingly. Don't forget to create your Diffie-Hellman parameters. + ### Additional NGINX config You can add additional config to nginx placing files in `conf/nginx/config.d/` ending with `.conf`. They will be included in the main `nginx.conf`. + ## Infrastructure -Based on this example - ### Ports -| service | port | configuration | local | development | production | -|---------------|-------|----------------------|--------------------|--------------------|--------------------| -| nginx http | 80 | WEB_HTTP_PORT | :white_check_mark: | :white_check_mark: | :white_check_mark: | -| nginx https | 443 | WEB_HTTPS_PORT | :white_check_mark: | :white_check_mark: | :white_check_mark: | -| django http | 8011 | DJANGO_DEV_PORT | :white_check_mark: | :x: | :x: | -| postgres | 5433 | HOST_POSTGRES_PORT | :white_check_mark: | :white_check_mark: | :white_check_mark: | -| minio API | 8009 | MINIO_API_PORT | :white_check_mark: | :x: | :x: | -| minio browser | 8010 | MINIO_BROWSER_PORT | :white_check_mark: | :x: | :x: | -| smtp web | 8012 | SMTP4DEV_WEB_PORT | :white_check_mark: | :x: | :x: | -| smtp | 25 | SMTP4DEV_SMTP_PORT | :white_check_mark: | :x: | :x: | -| imap | 143 | SMTP4DEV_IMAP_PORT | :white_check_mark: | :x: | :x: | +Table of some of the relevant ports being exposed and how to configure them. + +| service | port | configuration | local | staging | production | +|------------------------|------|-----------------------------|--------------------|--------------------|--------------------| +| nginx http | 80 | WEB_HTTP_PORT | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| nginx https | 443 | WEB_HTTPS_PORT | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| django http | 8011 | DJANGO_DEV_PORT | :white_check_mark: | :x: | :x: | +| postgres | 5433 | HOST_POSTGRES_PORT | :white_check_mark: | :x: | :x: | +| object storage API | 8009 | OBJECT_STORAGE_API_PORT | :white_check_mark: | :x: | :x: | +| object storage browser | 8010 | OBJECT_STORAGE_BROWSER_PORT | :white_check_mark: | :x: | :x: | +| smtp web | 8012 | SMTP4DEV_WEB_PORT | :white_check_mark: | :x: | :x: | +| smtp | 25 | SMTP4DEV_SMTP_PORT | :white_check_mark: | :x: | :x: | +| imap | 143 | SMTP4DEV_IMAP_PORT | :white_check_mark: | :x: | :x: | +| imap | 8020 | WEBDAV_PUBLIC_PORT | :white_check_mark: | :x: | :x: | ### Logs @@ -402,7 +418,7 @@ For great `nginx` logs, use: ### Storage -You can use either the integrated `minio` object storage, or use an external provider (e.g. S3) with versioning enabled. Check the corresponding `STORAGE_*` environment variables for more info. +You can use either the integrated `rustfs` object storage, or use an external provider (e.g. S3) with versioning enabled. Check the corresponding `STORAGE_*` environment variables for more info. ### Database diff --git a/docker-app/qfieldcloud/core/tests/test_status.py b/docker-app/qfieldcloud/core/tests/test_status.py index e5dbdc5a2..ec579c698 100644 --- a/docker-app/qfieldcloud/core/tests/test_status.py +++ b/docker-app/qfieldcloud/core/tests/test_status.py @@ -69,8 +69,8 @@ def test_status_storage_fails_with_no_storages(self): "default": { "BACKEND": "qfieldcloud.filestorage.backend.QfcS3Boto3Storage", "OPTIONS": { - "access_key": "minioadmin", - "secret_key": "minioadmin", + "access_key": "rustfsadmin", + "secret_key": "rustfsadmin", "bucket_name": "nonexistent-bucket", "region_name": "", "endpoint_url": "http://wrong.url", diff --git a/docker-app/qfieldcloud/filestorage/models.py b/docker-app/qfieldcloud/filestorage/models.py index 7156cdeb6..51e31a092 100644 --- a/docker-app/qfieldcloud/filestorage/models.py +++ b/docker-app/qfieldcloud/filestorage/models.py @@ -90,7 +90,7 @@ class FileType(models.IntegerChoices): # Require at least 1 character filenames MinLengthValidator(1), # NOTE the files on Windows cannot be longer than 260 _chars_ by default, see https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#maximum-path-length-limitation - # NOTE minio limit is 255 _chars_ per filename segment, read https://min.io/docs/minio/linux/operations/concepts/thresholds.html#id1 + # NOTE `minio` limit is 255 _chars_ per filename segment, read https://min.io/docs/minio/linux/operations/concepts/thresholds.html#id1 MaxLengthValidator(settings.STORAGE_FILENAME_MAX_CHAR_LENGTH), # NOTE the keys on S3 cannot be longer than 1024 _bytes_, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html MaxBytesLengthValidator(1024), @@ -344,7 +344,7 @@ class Meta: # TODO We do not `auto_now_add=True` to be able to set this when migrating files from legacy to the regular storage. Switch to `auto_now_add=True` when the legacy storage is no longer supported. created_at = models.DateTimeField(default=timezone.now, editable=False) - # The version id from the legacy object storage. On minio it is a UUID, on other providers it might be a random string. + # The version id from the legacy object storage. The version id format is provider dependent, e.g. on S3 it is a random string, on `minio` it is a UUID. legacy_id = models.TextField(max_length=255, editable=False, null=True) @property diff --git a/docker-app/qfieldcloud/settings.py b/docker-app/qfieldcloud/settings.py index ded60fa70..5baf99697 100644 --- a/docker-app/qfieldcloud/settings.py +++ b/docker-app/qfieldcloud/settings.py @@ -304,7 +304,7 @@ # Maximum filename length in characters # NOTE the keys on S3 cannot be longer than 1024 _bytes_, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html # NOTE the files on Windows cannot be longer than 260 _chars_ by default, see https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#maximum-path-length-limitation -# NOTE minio limit is 255 _chars_ per filename segment, read https://min.io/docs/minio/linux/operations/concepts/thresholds.html#id1 +# NOTE `minio` limit is 255 _chars_ per filename segment, read https://min.io/docs/minio/linux/operations/concepts/thresholds.html#id1 STORAGE_FILENAME_MAX_CHAR_LENGTH = 255 # Filename validator regex. diff --git a/docker-compose.override.minio.yml b/docker-compose.override.minio.yml new file mode 100644 index 000000000..dcf2d5b2e --- /dev/null +++ b/docker-compose.override.minio.yml @@ -0,0 +1,35 @@ +services: + minio: + image: minio/minio:RELEASE.2025-02-18T16-25-55Z + restart: unless-stopped + volumes: + - minio_data1:/data1 + - minio_data2:/data2 + - minio_data3:/data3 + - minio_data4:/data4 + environment: + MINIO_ROOT_USER: ${MINIO_ROOT_USER} + MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD} + MINIO_BROWSER_REDIRECT_URL: http://${QFIELDCLOUD_HOST}:${MINIO_BROWSER_PORT} + command: server /data{1...4} --console-address :9001 + healthcheck: + test: [ + "CMD", + "curl", + "-A", + "Mozilla/5.0 (X11; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0", + "-f", + "http://localhost:9001/minio/index.html" + ] + interval: 5s + timeout: 20s + retries: 5 + ports: + - ${MINIO_BROWSER_PORT}:9001 + - ${MINIO_API_PORT}:9000 + +volumes: + minio_data1: + minio_data2: + minio_data3: + minio_data4: diff --git a/docker-compose.override.standalone.yml b/docker-compose.override.standalone.yml index 4de133e27..a055129be 100644 --- a/docker-compose.override.standalone.yml +++ b/docker-compose.override.standalone.yml @@ -29,40 +29,41 @@ services: # Specifies the server hostname. Used in auto-generated TLS certificate if enabled. - ServerOptions__HostName=smtp4dev - minio: - image: minio/minio:RELEASE.2025-02-18T16-25-55Z + rustfs: + image: rustfs/rustfs:1.0.0-alpha.77 restart: unless-stopped volumes: - - minio_data1:/data1 - - minio_data2:/data2 - - minio_data3:/data3 - - minio_data4:/data4 + - rustfs_data:/data environment: - MINIO_ROOT_USER: ${MINIO_ROOT_USER} - MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD} - MINIO_BROWSER_REDIRECT_URL: http://${QFIELDCLOUD_HOST}:${MINIO_BROWSER_PORT} - command: server /data{1...4} --console-address :9001 + - RUSTFS_ACCESS_KEY=${OBJECT_STORAGE_ROOT_USER} + - RUSTFS_SECRET_KEY=${OBJECT_STORAGE_ROOT_PASSWORD} + - RUSTFS_CONSOLE_ENABLE=true + - RUSTFS_ADDRESS=0.0.0.0:9000 + - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001 + - RUSTFS_EXTERNAL_ADDRESS=:${OBJECT_STORAGE_API_PORT} + ports: + # S3 API port + - "${OBJECT_STORAGE_API_PORT}:9000" + # Console port + - "${OBJECT_STORAGE_BROWSER_PORT}:9001" healthcheck: - test: [ + test: + [ "CMD", - "curl", - "-A", - "Mozilla/5.0 (X11; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0", - "-f", - "http://localhost:9001/minio/index.html" + "sh", "-c", + "curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health" ] - interval: 5s - timeout: 20s - retries: 5 - ports: - - ${MINIO_BROWSER_PORT}:9001 - - ${MINIO_API_PORT}:9000 + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + createbuckets: build: context: ./docker-createbuckets depends_on: - minio: + rustfs: condition: service_healthy environment: STORAGES: ${STORAGES} @@ -83,8 +84,5 @@ services: volumes: postgres_data: smtp4dev_data: - minio_data1: - minio_data2: - minio_data3: - minio_data4: + rustfs_data: webdav_data: From 5bbecad149ca9cd7380b92cafef237cfb54e70dc Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Thu, 29 Jan 2026 08:47:12 +1300 Subject: [PATCH 2/3] chore: change the code block definition syntax from indentation to tripple ticks in README --- README.md | 202 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 121 insertions(+), 81 deletions(-) diff --git a/README.md b/README.md index 9bae5a9e2..beabc2138 100644 --- a/README.md +++ b/README.md @@ -34,11 +34,15 @@ For self-hosted issues, please use the GitHub issues at https://github.com/openg Clone the repository and all its submodules: - git clone --recurse-submodules git@github.com:opengisch/QFieldCloud.git +```shell +git clone --recurse-submodules git@github.com:opengisch/QFieldCloud.git +``` To fetch upstream development, don't forget to update the submodules too: - git pull --recurse-submodules && git submodule update --recursive +```shell +git pull --recurse-submodules && git submodule update --recursive +``` ### Launch a local instance @@ -112,30 +116,35 @@ It is stored in the `postgres_data` volume and managed via the `db` container. One can connect to the database via running the `psql` command within the `db` container: - docker compose exec -it db psql -U qfieldcloud_db_admin -d qfieldcloud_db +```shell +docker compose exec -it db psql -U qfieldcloud_db_admin -d qfieldcloud_db +``` Or by creating `~/.pg_service.conf` in their user home directory and appending: - [localhost.qfield.cloud] - host=localhost - dbname=qfieldcloud_db - user=qfieldcloud_db_admin - port=5433 - password=3shJDd2r7Twwkehb - sslmode=disable - - [test.localhost.qfield.cloud] - host=localhost - dbname=test_qfieldcloud_db - user=qfieldcloud_db_admin - port=5433 - password=3shJDd2r7Twwkehb - sslmode=disable +```shell +[localhost.qfield.cloud] +host=localhost +dbname=qfieldcloud_db +user=qfieldcloud_db_admin +port=5433 +password=3shJDd2r7Twwkehb +sslmode=disable + +[test.localhost.qfield.cloud] +host=localhost +dbname=test_qfieldcloud_db +user=qfieldcloud_db_admin +port=5433 +password=3shJDd2r7Twwkehb +sslmode=disable +``` And then connecting to the database via: - psql 'service=localhost.qfield.cloud' - +```shell +psql 'service=localhost.qfield.cloud' +``` ### Dependencies @@ -143,33 +152,45 @@ QFieldCloud uses [`pip-compile`](https://pypi.org/project/pip-tools/) to manage All dependencies are listed in `requirements*.in` files. When a `pip` a dependency is changed, the developer should produce the new `requirements*.txt` files. - docker compose run --rm pipcompile +```shell +docker compose run --rm pipcompile +``` Alternatively, one can create only a `requirements.txt` file for a single `requirements.in`: +```shell docker compose run --rm pipcompile pip-compile --no-strip-extras -o requirements/requirements_worker_wrapper.txt requirements/requirements_worker_wrapper.in +``` ### Tests Rebuild the docker compose stack with the `docker-compose.override.test.yml` file added to the `COMPOSE_FILE` environment variable: - export COMPOSE_FILE=docker-compose.yml:docker-compose.override.standalone.yml:docker-compose.override.test.yml - # (Re-)build the app service to install necessary test utilities (requirements_test.txt) - docker compose up -d --build - docker compose run app python manage.py migrate - docker compose run app python manage.py collectstatic --noinput +```shell +export COMPOSE_FILE=docker-compose.yml:docker-compose.override.standalone.yml:docker-compose.override.test.yml +# (Re-)build the app service to install necessary test utilities (requirements_test.txt) +docker compose up -d --build +docker compose run app python manage.py migrate +docker compose run app python manage.py collectstatic --noinput +``` You can then run all the unit and functional tests: - docker compose run app python manage.py test --keepdb +```shell +docker compose run app python manage.py test --keepdb +``` To run only a test module (e.g. `test_permission.py`): - docker compose run app python manage.py test --keepdb qfieldcloud.core.tests.test_permission +```shell +docker compose run app python manage.py test --keepdb qfieldcloud.core.tests.test_permission +``` To run a specific test: - docker compose run app python manage.py test --keepdb qfieldcloud.core.tests.test_permission.QfcTestCase.test_collaborator_project_takeover +```shell +docker compose run app python manage.py test --keepdb qfieldcloud.core.tests.test_permission.QfcTestCase.test_collaborator_project_takeover +```
@@ -177,36 +198,42 @@ Instructions to have a test instance running in parallel to a dev instance Create an .env.test file with the following variables that override the ones in .env: - ENVIRONMENT=test - QFIELDCLOUD_HOST=nginx - DJANGO_SETTINGS_MODULE=qfieldcloud.settings - STORAGE_ENDPOINT_URL=http://172.17.0.1:8109 - OBJECT_STORAGE_API_PORT=8109 - OBJECT_STORAGE_BROWSER_PORT=8110 - WEB_HTTP_PORT=8101 - WEB_HTTPS_PORT=8102 - HOST_POSTGRES_PORT=8103 - QFIELDCLOUD_DEFAULT_NETWORK=qfieldcloud_test_default - QFIELDCLOUD_SUBSCRIPTION_MODEL=subscription.Subscription - DJANGO_DEV_PORT=8111 - SMTP4DEV_WEB_PORT=8112 - SMTP4DEV_SMTP_PORT=8125 - SMTP4DEV_IMAP_PORT=8143 - COMPOSE_PROJECT_NAME=qfieldcloud_test - COMPOSE_FILE=docker-compose.yml:docker-compose.override.standalone.yml:docker-compose.override.test.yml - DEBUG_APP_DEBUGPY_PORT=5781 - DEBUG_WORKER_WRAPPER_DEBUGPY_PORT=5780 - DEMGEN_PORT=8201 +```shell +ENVIRONMENT=test +QFIELDCLOUD_HOST=nginx +DJANGO_SETTINGS_MODULE=qfieldcloud.settings +STORAGE_ENDPOINT_URL=http://172.17.0.1:8109 +OBJECT_STORAGE_API_PORT=8109 +OBJECT_STORAGE_BROWSER_PORT=8110 +WEB_HTTP_PORT=8101 +WEB_HTTPS_PORT=8102 +HOST_POSTGRES_PORT=8103 +QFIELDCLOUD_DEFAULT_NETWORK=qfieldcloud_test_default +QFIELDCLOUD_SUBSCRIPTION_MODEL=subscription.Subscription +DJANGO_DEV_PORT=8111 +SMTP4DEV_WEB_PORT=8112 +SMTP4DEV_SMTP_PORT=8125 +SMTP4DEV_IMAP_PORT=8143 +COMPOSE_PROJECT_NAME=qfieldcloud_test +COMPOSE_FILE=docker-compose.yml:docker-compose.override.standalone.yml:docker-compose.override.test.yml +DEBUG_APP_DEBUGPY_PORT=5781 +DEBUG_WORKER_WRAPPER_DEBUGPY_PORT=5780 +DEMGEN_PORT=8201 +``` Build the test docker compose stack: - docker compose --env-file .env --env-file .env.test up -d --build - docker compose --env-file .env --env-file .env.test run app python manage.py migrate - docker compose --env-file .env --env-file .env.test run app python manage.py collectstatic --noinput +```shell +docker compose --env-file .env --env-file .env.test up -d --build +docker compose --env-file .env --env-file .env.test run app python manage.py migrate +docker compose --env-file .env --env-file .env.test run app python manage.py collectstatic --noinput +``` You can then launch the tests: - docker compose --env-file .env --env-file .env.test run app python manage.py test --keepdb +```shell +docker compose --env-file .env --env-file .env.test run app python manage.py test --keepdb +``` Don't forget to update the `port` value in [`[test.localhost.qfield.cloud]` in your `.pg_service.conf` file](#accessing-the-database). @@ -217,7 +244,7 @@ Don't forget to update the `port` value in [`[test.localhost.qfield.cloud]` in y To get information about the current test coverage, run: -``` +```shell docker compose exec app coverage run manage.py test --keepdb docker compose exec app coverage report ``` @@ -260,7 +287,7 @@ debugpy.wait_for_client() # optional To add breakpoints in vendor modules installed via `pip` or `apt`, you need a copy of their source code on your host machine. The easiest way to achieve that is do actual copy of them: -``` +```shell docker compose cp app:/usr/local/lib/python3.10/site-packages/ docker-app/site-packages ``` @@ -278,41 +305,52 @@ so other programs (e.g. `curl`) can create secure connection to the local QField On Debian/Ubuntu, copy the root certificate to the directory with trusted certificates. Note the extension has been changed to `.crt`: - sudo cp ./conf/nginx/certs/rootCA.pem /usr/local/share/ca-certificates/rootCA.crt +```shell +sudo cp ./conf/nginx/certs/rootCA.pem /usr/local/share/ca-certificates/rootCA.crt +``` Trust the newly added certificate: - sudo update-ca-certificates +```shell +sudo update-ca-certificates +``` Connecting with `curl` should return no errors: - curl https://localhost:8002/ +```shell +curl https://localhost:8002/ +``` ### Remove the root certificate If you want to remove or change the root certificate, you need to remove the root certificate file and refresh the list of certificates: - sudo rm /usr/local/share/ca-certificates/rootCA.crt - sudo update-ca-certificates --fresh +```shell +sudo rm /usr/local/share/ca-certificates/rootCA.crt +sudo update-ca-certificates --fresh +``` Now connecting with `curl` should fail with a similar error: - $ curl https://localhost:8002/ +``` +$ curl https://localhost:8002/ - curl: (60) SSL certificate problem: unable to get local issuer certificate - More details here: https://curl.haxx.se/docs/sslcerts.html +curl: (60) SSL certificate problem: unable to get local issuer certificate +More details here: https://curl.haxx.se/docs/sslcerts.html - curl failed to verify the legitimacy of the server and therefore could not - establish a secure connection to it. To learn more about this situation and - how to fix it, please visit the web page mentioned above. +curl failed to verify the legitimacy of the server and therefore could not +establish a secure connection to it. To learn more about this situation and +how to fix it, please visit the web page mentioned above. +``` ## Code style Code style done with [`pre-commit`](https://pre-commit.com): - pip install pre-commit - # install pre-commit hook - pre-commit install - +```shell +pip install pre-commit +# install pre-commit hook +pre-commit install +``` ## Deployment @@ -327,7 +365,7 @@ Code style done with [`pre-commit`](https://pre-commit.com): Copy the `.env.example` into `.env` file: -``` +```shell cp .env.example .env vi .env ``` @@ -336,19 +374,19 @@ Do not forget to set `DEBUG=0` and to adapt `COMPOSE_FILE` environment variable Run and build the docker containers: -``` +```shell docker compose up -d --build ``` Run the django database migrations: -``` +```shell docker compose exec app python manage.py migrate ``` Collect the static files: -``` +```shell docker compose exec app python manage.py collectstatic ``` @@ -361,7 +399,7 @@ Note you want to change the `LETSENCRYPT_EMAIL`, `LETSENCRYPT_RSA_KEY_SIZE` and On a server with a public domain, you can get a certificate issued by Let's Encrypt using certbot running the following command: -``` +```shell ./scripts/init_letsencrypt.sh ``` @@ -369,7 +407,7 @@ The certificates will be renewed automatically. To use this Let's Encrypt certificate within QFieldCloud you just need to uncomment the following lines in your `.env`: -``` +```shell QFIELDCLOUD_TLS_CERT=/etc/letsencrypt/live/${QFIELDCLOUD_HOST}/fullchain.pem QFIELDCLOUD_TLS_KEY=/etc/letsencrypt/live/${QFIELDCLOUD_HOST}/privkey.pem ``` @@ -407,14 +445,16 @@ Table of some of the relevant ports being exposed and how to configure them. Docker logs are managed by docker in the default way. To read the logs: - docker compose logs - +```shell +docker compose logs +``` For great `nginx` logs, use: - QFC_JQ='[.ts, .ip, (.method + " " + (.status|tostring) + " " + (.resp_time|tostring) + "s"), .uri, "I " + (.request_length|tostring) + " O " + (.resp_body_size|tostring), "C " + (.upstream_connect_time|tostring) + "s", "H " + (.upstream_header_time|tostring) + "s", "R " + (.upstream_response_time|tostring) + "s", .user_agent] | @tsv' - docker compose logs nginx -f --no-log-prefix | grep ':"nginx"' | jq -r $QFC_JQ - +```shell +QFC_JQ='[.ts, .ip, (.method + " " + (.status|tostring) + " " + (.resp_time|tostring) + "s"), .uri, "I " + (.request_length|tostring) + " O " + (.resp_body_size|tostring), "C " + (.upstream_connect_time|tostring) + "s", "H " + (.upstream_header_time|tostring) + "s", "R " + (.upstream_response_time|tostring) + "s", .user_agent] | @tsv' +docker compose logs nginx -f --no-log-prefix | grep ':"nginx"' | jq -r $QFC_JQ +``` ### Storage From 0e7d5df9fe85b6998912058abb2983bcc2bf4e13 Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Thu, 29 Jan 2026 08:50:00 +1300 Subject: [PATCH 3/3] Bump rustfs 77 to the newer alpha version 81 --- docker-compose.override.standalone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.override.standalone.yml b/docker-compose.override.standalone.yml index a055129be..6ad00ebaf 100644 --- a/docker-compose.override.standalone.yml +++ b/docker-compose.override.standalone.yml @@ -30,7 +30,7 @@ services: - ServerOptions__HostName=smtp4dev rustfs: - image: rustfs/rustfs:1.0.0-alpha.77 + image: rustfs/rustfs:1.0.0-alpha.81 restart: unless-stopped volumes: - rustfs_data:/data