Organize Docker compose.yaml examples here.
Application services:     app:        depends_on:          db:            condition:  service_healthy          cache:            condition:  service_healthy        ports:          -  xxx:xxx        environment:          -  xxx=xxx    db:      image:  'xxx'      ...    cache:      image:  'xxx'      ...  
Database Postgres Postgres DockerHub 
Run Postgres with Docker Compose
compose.yaml
services:   postgres1:      image:  'postgres:17-alpine'      container_name:  'postgres1'      ports:        -  '${POSTGRES_PORT}:5432'      environment:        -  TZ=Asia/Shanghai               -  'POSTGRES_PASSWORD=${POSTGRES_PASSWORD}'      volumes:        -  'postgres_data:/var/lib/postgresql/data'      restart:  unless-stopped      healthcheck:        test:  ["CMD-SHELL" , "pg_isready" ]       interval:  5s        timeout:  5s        retries:  5  volumes:     postgres_data:      driver:  local  
Note:  The PostgreSQL image sets up trust authentication locally so you may notice a password is not required when connecting from localhost (inside the same container). However, a password will be required if connecting from a different host/container.
Verify
mkdir  my-app && cd  my-appvim compose.yaml export  POSTGRES_PORT=15432 && export  POSTGRES_PASSWORD=STRONG_PASSWORDdocker compose up -d docker compose logs -f postgres1 docker compose exec  -it postgres1 psql -V docker compose exec  -it postgres1 psql -U postgres 
Run Postgres with docker run command
docker run -d \ 	--name postgres1 \ 	-p 5432:5432 \ 	-e POSTGRES_PASSWORD=STRONG_PASSWORD \ 	-e TZ=Asia/Shanghai \ 	-v postgres_data:/var/lib/postgresql/data \ 	--restart unless-stopped \ 	postgres:17-alpine 
MySQL MySQL DockerHub 
MySQL server DockerHub 
MySQL server docker environment variables 
Run MySQL with Docker Compose
compose.yaml
services:   mysql1:       image:  'mysql:8'      container_name:  'mysql1'      ports:        -  '${MYSQL_PORT}:3306'      environment:        -  TZ=Asia/Shanghai        -  MYSQL_ROOT_HOST=%        -  'MYSQL_ROOT_PASSWORD=${MYSQL_PASSWORD}'      volumes:        -  'mysql_data:/var/lib/mysql'      restart:  on-failure      healthcheck:        test:  "mysql -uroot -p$$MYSQL_ROOT_PASSWORD  -e 'show databases'"        interval:  5s        timeout:  5s        retries:  5  volumes:     mysql_data:      driver:  local  
image: mysql:8 or mysql/mysql-server:8.0 
 
Verify
mkdir  my-app && cd  my-appvim compose.yaml export  MYSQL_PORT=13306 && export  MYSQL_PASSWORD=STRONG_PASSWORDdocker compose up -d docker compose logs -f mysql1 docker compose exec  -it mysql1 mysql -V docker compose exec  -it mysql1 mysql -uroot -p 
Run MySQL with docker run command
docker run --name=mysql1 -d -p 3306:3306 \   -e TZ=Asia/Shanghai -e MYSQL_ROOT_HOST='%'  -e MYSQL_ROOT_PASSWORD=STRONG_PASSWORD \   --restart on-failure \   -v mysql_data:/var/lib/mysql \   mysql/mysql-server:8.0 
Full-Text Search Engine Elasticsearch + Kibana + Logstash (ELK) v9.x Install Elasticsearch with Docker 
Install Kibana with Docker 
Running Logstash on Docker 
ELK without password and enrollment token for local development 
1. compose.yaml
services:   elasticsearch1:      image:  "elasticsearch:9.0.4"      ports:        -  "${ELASTICSEARCH_PORT}:9200"      environment:        -  discovery.type=single-node        -  "xpack.security.enabled=false"      mem_limit:  1GB      networks:        -  elastic      healthcheck:        test:  "curl --fail -X GET http://localhost:9200/_cat/health || exit 1"        interval:  5s        timeout:  5s        retries:  10    kibana1:      image:  "kibana:9.0.4"      depends_on:        elasticsearch1:          condition:  service_healthy      ports:        -  "${KIBANA_PORT}:5601"      environment:        -  "ELASTICSEARCH_HOSTS=http://elasticsearch1:9200"      networks:        -  elastic      healthcheck:               test:  "curl http://localhost:5601/api/status | grep 'All services and plugins are available'"        interval:  5s        timeout:  5s        retries:  10    logstash1:      image:  "logstash:9.0.4"      depends_on:        elasticsearch1:          condition:  service_healthy      networks:        -  elastic  networks:   elastic:      driver:  bridge  
xpack.security.enabled=false: Disabling security. 
2. Running ELK
docker compose down docker compose up -d 
3. Visiting Kibana Web Page http://localhost:5601/ 
ELK with enrollment token and password 
1. compose.yaml
services:   elasticsearch1:      image:  "elasticsearch:9.0.4"      ports:        -  "${ELASTICSEARCH_PORT}:9200"      environment:        -  discovery.type=single-node        -  "ELASTIC_PASSWORD=${ELASTIC_PASSWORD}"      mem_limit:  1GB      networks:        -  elastic      healthcheck:        test:  "curl --cacert /usr/share/elasticsearch/config/certs/http_ca.crt -u elastic:${ELASTIC_PASSWORD} --fail -X GET https://localhost:9200/_cat/health || exit 1"        interval:  5s        timeout:  5s        retries:  10    kibana1:      image:  "kibana:9.0.4"      depends_on:        elasticsearch1:          condition:  service_healthy      ports:        -  "${KIBANA_PORT}:5601"      networks:        -  elastic      healthcheck:        test:  "curl -f http://localhost:5601/status || exit 1"        interval:  5s        timeout:  5s        retries:  10    logstash1:      image:  "logstash:9.0.4"      depends_on:        elasticsearch1:          condition:  service_healthy      networks:        -  elastic      volumes:        -  ./docker/logstash/config:/usr/share/logstash/config/  networks:   elastic:      driver:  bridge  
2. Running ELK
docker compose down docker compose up -d 
3. Visiting Kibana Web Page http://localhost:5601/ 
4. Get enrollment token
$ docker compose exec  -it elasticsearch1 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana 
5. Get verification code
$ docker compose exec  kibana1 bin/kibana-verification-code $ docker compose logs kibana1 
6. Login Kibana
Username: elastic 
Password: {your-passowrd} 
 
SSL 
Set up HTTPS 
Create an encrypted Elasticsearch keystore 
elasticsearch-keystore 
Elasticsearch + Kibana + Logstash (ELK) v7.x Install Elasticsearch with Docker 
Install Kibana with Docker 
Running Logstash on Docker 
ELK without password for local development 
services:   elasticsearch1:      image:  "elasticsearch:7.17.28"      ports:        -  "9200:9200"        -  "9300:9300"      environment:        -  discovery.type=single-node        -  "ES_JAVA_OPTS=-Xms512m -Xmx512m"      networks:        -  elastic      volumes:        -  es_data:/usr/share/elasticsearch/data      healthcheck:        test:  "curl --fail -X GET http://localhost:9200/_cat/health || exit 1"        interval:  5s        timeout:  5s        retries:  10    kibana1:      image:  "kibana:7.17.28"      depends_on:        elasticsearch1:          condition:  service_healthy      ports:        -  "${KIBANA_PORT}:5601"      environment:        -  "ELASTICSEARCH_HOSTS=http://elasticsearch1:9200"      networks:        -  elastic      healthcheck:               test:  "curl http://localhost:5601/api/status | grep 'Looking good'"        interval:  5s        timeout:  5s        retries:  10    logstash1:      image:  "logstash:7.17.28"      depends_on:        elasticsearch1:          condition:  service_healthy      networks:        -  elastic  volumes:   es_data:      driver:  local  networks:   elastic:      driver:  bridge  
Cache Redis Redis DockerHub 
Run Redis with Docker Compose
compose.yaml
services:    redis1:      image:  'redis:7-alpine'      container_name:  'redis1'      ports:        -  '${REDIS_PORT}:6379'      volumes:        -  'redis_data:/data'      command:  'redis-server --save 20 1 --loglevel warning --requirepass ${REDIS_PASSWORD}'      restart:  unless-stopped      healthcheck:        test:  ["CMD-SHELL" , "redis-cli -a ${REDIS_PASSWORD} ping | grep PONG" ]       interval:  5s        timeout:  5s        retries:  5  volumes:     redis_data:      driver:  local  
Verify
mkdir  my-app && cd  my-appvim compose.yaml export  REDIS_PORT=16379 && export  REDIS_PASSWORD=STRONG_PASSWORDdocker compose up -d docker compose logs -f redis1 docker compose exec  -it redis1 redis-server -v docker compose exec  -it redis1 redis-cli 
Run Redis with docker run command
docker run --name redis1 -d -p 16379:6379 \   -v redis_data:/data \   redis \   redis-server \   --save 60 1 \   --loglevel warning \   -requirepass STRONG_PASSWORD 
Web Server Nginx Nginx DockerHub 
Run Nginx with Docker Compose
services:   nginx1:      image:  'nginx:stable-alpine'      container_name:  'nginx1'      ports:        -  '${NGINX_PORT}:${NGINX_PORT}'      volumes:               -  ./nginx.conf:/etc/nginx/nginx.conf:ro                    restart:  unless-stopped      healthcheck:        test:  "curl --fail http://localhost/ || exit 1"        interval:  5s        timeout:  5s        retries:  5  
Verify
mkdir  my-app && cd  my-appvim compose.yaml docker run --rm  --entrypoint=cat  nginx:stable-alpine /etc/nginx/nginx.conf > ./nginx.conf export  NGINX_PORT=80docker compose up -d docker compose logs -f nginx1 docker compose exec  -it nginx1 nginx -v docker compose exec  -it nginx1 nginx -t curl http://localhost:80 
HAProxy HAProxy DockerHub 
services:   haproxy1:      image:  "haproxy:3.2"      volumes:        -  /path/to/etc/haproxy:/usr/local/etc/haproxy:ro      sysctls:        -  net.ipv4.ip_unprivileged_port_start=0  
$ docker run -d --name my-running-haproxy -v /path/to/etc/haproxy:/usr/local/etc/haproxy:ro --sysctl net.ipv4.ip_unprivileged_port_start=0 haproxy:3.2 
Message Queue RabbitMQ RabbitMQ DockerHub 
services:   rabbitmq1:      image:  'rabbitmq:4-management'      container_name:  'rabbitmq1'      ports:        -  '${RABBIT_PORT}:5672'        -  '${RABBIT_MANAGEMENT_CONSOLE_PORT}:15672'           environment:        -  TZ=Asia/Shanghai        -  RABBITMQ_DEFAULT_USER=${RABBIT_USER}        -  'RABBITMQ_DEFAULT_PASS=${RABBIT_PASSWORD}'      restart:  unless-stopped      healthcheck:        test:  rabbitmq-diagnostics  -q  ping        interval:  5s        timeout:  5s        retries:  5  
Open the RabbitMQ management console by visiting http://localhost:15672 
Run RabbitMQ with the docker run command
docker run -d --name rabbitmq1 -p 5672:5672 -p 15672:15672 rabbitmq:4-management 
RocketMQ Run RocketMQ in Docker 
Run RocketMQ with Docker Compose 
services:   namesrv:      image:  "apache/rocketmq:5.3.2"      container_name:  rmqnamesrv      ports:        -  "9876:9876"      networks:        -  rocketmq      command:  sh  mqnamesrv    broker:      image:  "apache/rocketmq:5.3.2"      container_name:  rmqbroker      ports:        -  "10909:10909"        -  "10911:10911"        -  "10912:10912"      environment:        -  NAMESRV_ADDR=rmqnamesrv:9876      depends_on:        -  namesrv      networks:        -  rocketmq      command:  sh  mqbroker    proxy:      image:  "apache/rocketmq:5.3.2"      container_name:  rmqproxy      networks:        -  rocketmq      depends_on:        -  broker        -  namesrv      ports:        -  "8080:8080"        -  "8081:8081"      restart:  on-failure      environment:        -  NAMESRV_ADDR=rmqnamesrv:9876      command:  sh  mqproxy  networks:   rocketmq:      driver:  bridge  
Service Discovery Apache ZooKeeper Apache ZooKeeper DockerHub 
services:   zookeeper1:      image:  'zookeeper:3.9'      restart:  unless-stopped      hostname:  zoo1      ports:        -  '${ZOOKEEPER_PORT}:2181'  
Run ZooKeeper with the docker run command
$ docker run --name some-zookeeper -p 2181:2181 --restart unless-stopped -d zookeeper 
Nacos Nacos DockerHub 
services:   nacos1:      image:  "nacos/nacos-server:v3.0.2"      environment:        -  "MODE=standalone"        -  "NACOS_AUTH_TOKEN=${NACOS_AUTH_TOKEN}"        -  "NACOS_AUTH_IDENTITY_KEY=${NACOS_AUTH_IDENTITY_KEY}"        -  "NACOS_AUTH_IDENTITY_VALUE=${NACOS_AUTH_IDENTITY_VALUE}"      ports:        -  "8080:8080"        -  "8848:8848"        -  "9848:9848"      healthcheck:        test:  "curl http://localhost:8080"        interval:  5s        timeout:  5s        retries:  5  
NACOS_AUTH_TOKEN: Nacos 用于生成JWT Token的密钥,使用长度大于32字符的字符串,再经过Base64编码。NACOS_AUTH_IDENTITY_KEY: Nacos Server端之间 Inner API的身份标识的Key,必填。NACOS_AUTH_IDENTITY_VALUE: Nacos Server端之间 Inner API的身份标识的Value,必填。ports:
8080: Nacos Dashboard’s port. 
8848: Nacos Web API’s port. 
9848: gRPC’s port. 
 
 
Run Nacos with the docker run command
docker run --name nacos-standalone-derby \     -e MODE=standalone \     -e NACOS_AUTH_TOKEN=${your_nacos_auth_secret_token}  \     -e NACOS_AUTH_IDENTITY_KEY=${your_nacos_server_identity_key}  \     -e NACOS_AUTH_IDENTITY_VALUE=${your_nacos_server_identity_value}  \     -p 8080:8080 \     -p 8848:8848 \     -p 9848:9848 \     -d nacos/nacos-server:v3.0.2 
Nacos控制台页面:http://127.0.0.1:8080/ 
首次打开会要求初始化 管理员用户nacos的密码。
 
Distributed Transaction Apache Seata Seata DockerHub 
services:   seata-1:      image:  "apache/seata-server:1.8.0.2"      hostname:  seata-server      ports:        -  "8091:8091"      environment:        -  SEATA_PORT=8091        -  STORE_MODE=file  
STORE_MODE: The variable is optional, specifies the log store mode of seata-server, support db and file, default is file. 
docker run --name seata-server-1 \   -p 8091:8091 \   -p 7091:7091 \   -e SEATA_IP=127.0.0.1 \   -e SEATA_PORT=8091 \   apache/seata-server 
Vault HashiCorp Vault HashiCorp Vault DockerHub 
services:     vault1:        image:  'hashicorp/vault:1.20'        ports:          -  ${VAULT_PORT}:8200        cap_add:          -  IPC_LOCK        environment:          -  'VAULT_DEV_ROOT_TOKEN_ID=${VAULT_DEV_ROOT_TOKEN_ID}'  
Run RabbitMQ with the docker run command
docker run -d --name=dev-vault -p 8200:8200 -e 'VAULT_DEV_ROOT_TOKEN_ID=myroot'  --cap-add=IPC_LOCK hashicorp/vault:1.20 
--cap-add=IPC_LOCK: This is required in order for Vault to lock memory, which prevents it from being swapped to disk. This is highly recommended. 
Authentication, Authorization, and User Management Keycloak Keycloak on Docker guide 
Running Keycloak in a container 
services:      keycloak1:        image:  'quay.io/keycloak/keycloak:26.3'        ports:          -  ${KEYCLOAK_PORT}:8080        environment:        -  TZ=Asia/Shanghai        -  'KC_BOOTSTRAP_ADMIN_USERNAME=${KC_BOOTSTRAP_ADMIN_USERNAME}'          -  'KC_BOOTSTRAP_ADMIN_PASSWORD=${KC_BOOTSTRAP_ADMIN_PASSWORD}'        command:  start-dev  
command: 
start-dev: Start the server in development mode for local development or testing.start <OPTIONS>: Start the server in production mode. 
 
Run Keycloak with the docker run command
docker run -p 8080:8080 -e KC_BOOTSTRAP_ADMIN_USERNAME=admin -e KC_BOOTSTRAP_ADMIN_PASSWORD=admin quay.io/keycloak/keycloak:26.3 start-dev 
Keycloak with Postgres
click to expand! 
services:   keycloak1:      image:  'quay.io/keycloak/keycloak:26.3'      depends_on:        postgres1:          condition:  service_healthy      ports:        -  ${KEYCLOAK_PORT}:8080      environment:        -  TZ=Asia/Shanghai        -  'KC_BOOTSTRAP_ADMIN_USERNAME=${KC_BOOTSTRAP_ADMIN_USERNAME}'        -  'KC_BOOTSTRAP_ADMIN_PASSWORD=${KC_BOOTSTRAP_ADMIN_PASSWORD}'        -  'KC_DB=postgres'        -  'KC_DB_URL=jdbc:postgresql://postgres1:${POSTGRES_PORT}/keycloak'        -  'KC_DB_USERNAME=postgres'        -  'KC_DB_PASSWORD=${POSTGRES_PASSWORD}'      networks:        -  keycloak      command:  start-dev    postgres1:      image:  'postgres:17-alpine'      ports:        -  '${POSTGRES_PORT}:5432'      environment:        -  TZ=Asia/Shanghai               -  'POSTGRES_PASSWORD=${POSTGRES_PASSWORD}'        -  POSTGRES_DB=keycloak      volumes:        -  'postgres_data_for_keycloak:/var/lib/postgresql/data'      restart:  unless-stopped      networks:        -  keycloak      healthcheck:        test:  [ "CMD-SHELL" , "pg_isready"  ]       interval:  5s        timeout:  5s        retries:  5  volumes:   postgres_data_for_keycloak:      driver:  local  networks:   keycloak:      driver:  bridge  
 
Logto logto GitHub 
Reference compose.yaml 
services:      logto:        image:  'svhd/logto:latest'      depends_on:          postgres:            condition:  service_healthy             entrypoint:  [ "sh" , "-c" , "npm run cli db seed -- --swe && npm start"  ]       ports:          -  3001 :3001          -  3002 :3002        environment:          -  TRUST_PROXY_HEADER=1          -  DB_URL=postgres://postgres:p0stgr3s@postgres:5432/logto                        -  ADMIN_ENDPOINT      postgres:        image:  postgres:17-alpine        container_name:  postgres-for-logto        user:  postgres        environment:          POSTGRES_USER:  postgres          POSTGRES_PASSWORD:  p0stgr3s        healthcheck:          test:  [ "CMD-SHELL" , "pg_isready"  ]         interval:  5s          timeout:  5s          retries:  5  
Appendixes Using environment variables in healthcheck command. For example,
healthcheck:   test: "echo ${MY_VARIABLE}" 
or
healthcheck:   test: "echo $$MY_VARIABLE"