В данном примере рассматривается вариант развёртывания всех компонентов системы, включая
общесистемные PostgreSQL, Redis, RabbitMQ, на одной виртуальной машине.
Примечание
PostgreSQL рекомендуется развёртывать на отдельной виртуальной машине и использовать SSL диски.
Общая схема проекта в примере:
Рекомендуемая структура проекта:
/opt/timetta
/proxy
/nginx.conf => конфигурация proxy
/ssl
/cert.pem => открытый ключ
/key.pem => закрытый ключ
/certificate.pfx => сертификат для OAuth (подпись токенов)
/client-settings.json => настройки клиентской части
/compose.yml => конфигурация Docker Compose
/key.json => авторизованный ключ для доступа к реестру образов Timetta
/nginx.conf => конфигурация клиентской части
/settings.json => настройки сервисов
/.env => параметры проекта
Создайте структуру следующим образом:
sudo mkdir -p /opt/timetta
sudo chown -R $USER:$USER /opt/timetta # Даём права своему пользователю
cd /opt/timetta
.env
BACKEND_IMAGE_TAG=latest
FRONTEND_IMAGE_TAG=latest
Для продуктивной среды указывайте конкретную версию!
Используется для запуска хоста клиентского приложения.
nginx.conf:
user nginx;
worker_processes 1;
events{}
http {
include /etc/nginx/mime.types;
server {
listen [::]:80 http2;
listen 80 http2;
root /usr/share/nginx/html;
index index.html;
location / {
try_files $uri $uri/ /index.html =404;
}
}
}
Используется для запуска хоста клиентского приложения.
nginx.conf:
events {
worker_connections 1024;
}
http {
# Основной параметр для больших файлов
client_max_body_size 100M;
# Буферизация и таймауты
client_body_buffer_size 128k;
client_body_timeout 300s;
# Для proxy
proxy_read_timeout 300s;
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
ssl_protocols TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_certificate /etc/nginx/ssl/cert.pem;
ssl_certificate_key /etc/nginx/ssl/key.pem;
proxy_set_header Authorization $http_authorization;
proxy_pass_request_headers on;
proxy_set_header Host $host:$server_port;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
server {
listen 443 ssl;
server_name _;
location / {
proxy_pass http://client;
}
}
server {
listen 5400 ssl;
server_name _;
location / {
proxy_pass http://api;
}
}
server {
listen 5401 ssl;
server_name _;
location / {
proxy_pass http://passport;
}
}
server {
listen 5404 ssl;
server_name _;
location / {
proxy_pass http://web-socket;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
server {
listen 5405 ssl;
server_name _;
location / {
proxy_pass http://reporting;
}
}
server {
listen 5406 ssl;
server_name _;
location / {
proxy_pass http://scheduler;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
}
Замените op.timetta-test.com на ваше доменное имя или IP-адрес. Укажите пароли для Redis, RabbitMQ и прочие параметры.
compose.yml:
name: timetta
# Общие настройки
x-common-logging: &app-config
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
rabbitmq:
condition: service_healthy
environment:
- ASPNETCORE_URLS=http://*:80
- ASPNETCORE_ENVIRONMENT=Production
configs:
app_settings:
file: ./settings.json
client_settings:
file: ./client-settings.json
nginx_config:
file: ./nginx.conf
certificate_pfx:
file: ./certificate.pfx
proxy_config:
file: ./proxy/nginx.conf
proxy_cert:
file: ./proxy/ssl/cert.pem
proxy_key:
file: ./proxy/ssl/key.pem
# Data storage.
volumes:
pgdata:
driver: local
services:
postgres:
image: postgres:17
container_name: postgres
environment:
POSTGRES_USER: dba
POSTGRES_PASSWORD: 1
PGDATA: /var/lib/postgresql/data/pgdata
ports:
- "5430:5432"
volumes:
- ./pgdata:/var/lib/postgresql/data/pgdata
deploy:
resources:
limits:
cpus: "1"
memory: 4G
reservations:
cpus: "0.5"
memory: 1024M
command: >+
postgres -c max_connections=1000
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres_user -d postgres_db"]
interval: 10s
timeout: 10s
retries: 10
restart: always
tty: true
stdin_open: true
redis:
image: redis:8.2.1
container_name: redis
restart: always
volumes:
- /path/to/local/dаta:/root/redis
- /path/to/local/redis.conf:/usr/local/etc/redis/redis.conf
environment:
- REDIS_PASSWORD=1
- REDIS_PORT=6379
- REDIS_DATABASES=16
healthcheck:
test:
[
"CMD",
"redis-cli",
"-a",
"$$REDIS_PASSWORD",
"--no-auth-warning",
"ping",
]
interval: 10s
timeout: 3s
retries: 3
start_period: 10s
rabbitmq:
container_name: rabbitmq
image: rabbitmq:4.1.4
restart: always
environment:
- RABBITMQ_DEFAULT_USER=admin
- RABBITMQ_DEFAULT_PASS=1
healthcheck:
test: rabbitmq-diagnostics -q ping
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
api:
container_name: api
restart: always
image: cr.yandex/crpr8bvek949tq2fuqkf/api:${BACKEND_IMAGE_TAG}
<<: *app-config
deploy:
resources:
limits:
cpus: "1"
memory: 4G
reservations:
cpus: "0.5"
memory: 2G
configs:
- source: app_settings
target: /app/secrets/settings.json
passport:
container_name: passport
image: cr.yandex/crpr8bvek949tq2fuqkf/passport:${BACKEND_IMAGE_TAG}
<<: *app-config
configs:
- source: app_settings
target: /app/secrets/settings.json
- source: certificate_pfx
target: /app/secrets/certificate.pfx
consumer:
container_name: consumer
image: cr.yandex/crpr8bvek949tq2fuqkf/consumer:${BACKEND_IMAGE_TAG}
<<: *app-config
configs:
- source: app_settings
target: /app/secrets/settings.json
scheduler:
container_name: scheduler
image: cr.yandex/crpr8bvek949tq2fuqkf/scheduler:${BACKEND_IMAGE_TAG}
<<: *app-config
configs:
- source: app_settings
target: /app/secrets/settings.json
web-socket:
container_name: web-socket
image: cr.yandex/crpr8bvek949tq2fuqkf/web-socket-hub:${BACKEND_IMAGE_TAG}
<<: *app-config
configs:
- source: app_settings
target: /app/secrets/settings.json
reporting:
container_name: reporting
image: cr.yandex/crpr8bvek949tq2fuqkf/reporting:${BACKEND_IMAGE_TAG}
<<: *app-config
configs:
- source: app_settings
target: /app/secrets/settings.json
client:
container_name: client
image: cr.yandex/crpr8bvek949tq2fuqkf/client-host:${FRONTEND_IMAGE_TAG}
deploy:
update_config:
parallelism: 1
delay: 0s
order: stop-first
configs:
- source: nginx_config
target: /etc/nginx/nginx.conf
- source: client_settings
target: /usr/share/nginx/html/assets/config.json
nginx-proxy:
container_name: nginx-proxy
image: nginx
ports:
- "5400:5400"
- "5401:5401"
- "5404:5404"
- "5405:5405"
- "5406:5406"
- "443:443"
- "80:80"
configs:
- source: proxy_config
target: /etc/nginx/nginx.conf
- source: proxy_cert
target: /etc/nginx/ssl/cert.pem
- source: proxy_key
target: /etc/nginx/ssl/key.pem
depends_on:
- api
- consumer
- passport
- web-socket
- scheduler
- client
docker compose up -d --force-recreate
Перейти на русскую версию?