compose.yaml erneut hinzugefügt

This commit is contained in:
2026-03-23 16:02:15 +01:00
parent c7a890f917
commit c19bb06603
23 changed files with 906 additions and 0 deletions
+47
View File
@@ -0,0 +1,47 @@
services:
bookstack:
image: solidnerd/bookstack:master
container_name: bookstack
labels:
com.hanold.environment: production
deploy:
resources:
limits:
memory: 256M
environment:
- PUID=1000
- PGID=1000
- DB_HOST=192.168.178.170:3306
- DB_USERNAME=bookstack
- DB_PASSWORD=dbpass14!
- DB_DATABASE=bookstack
- APP_URL=https://bookstack-app.hanold.online
- APP_KEY=base64:5qDgV0SFRxwfP60qyGrQCVONRoDYJqNZc73+JauMVz0=
- CACHE_DRIVER=redis
- SESSION_DRIVER=redis
# - QUEUE_CONNECTION=redis # optional, braucht Worker
- REDIS_SERVERS=192.168.178.183:9379:0 # Host:Port:Database
# - AUTH_METHOD=oidc
# - AUTH_AUTO_INITIATE=false # Set this to "true" to automatically redirect the user to authentik.
# - OIDC_NAME=authentik # The display name shown on the login page.
# - OIDC_DISPLAY_NAME_CLAIMS=name # Claim(s) for the user's display name. Can have multiple attributes listed, separated wi>
# - OIDC_CLIENT_ID=WXK8dpYtQOxLNrCNM3XmN7Xttg5tI1T7EaLXkoFd
# - OIDC_CLIENT_SECRET=Eoefvo9bhe1yb0EvqyhmkoHi7a8spyls25SOIZ4icQyk2ndyKt7Ny2ZAQqykof51FTzUpHhuOgLwrmMBaKmE1aHcjz93sNPdJMh4>
# - OIDC_ISSUER=https://authentik-app.hanold.online/application/o/bookstack/
# - OIDC_ISSUER_DISCOVER=true
# - OIDC_END_SESSION_ENDPOINT=true
volumes:
- ./config:/config
- ./uploads:/var/www/bookstack/public/uploads:rw
- ./storage-uploads:/var/www/bookstack/storage/uploads:rw
ports:
- 9345:8080
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://192.168.178.183:9345/status"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
networks: {}
+28
View File
@@ -0,0 +1,28 @@
services:
dozzle:
container_name: dozzle
restart: unless-stopped
labels:
- wud.watch=true
deploy:
resources:
limits:
memory: 256M
image: amir20/dozzle:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/data
environment:
- DOZZLE_REMOTE_AGENT=192.168.178.215:9007,192.168.178.200:9007,192.168.178.241:9007
ports:
- 9889:8080
healthcheck:
test:
- CMD
- /dozzle
- healthcheck
interval: 3s
timeout: 30s
retries: 5
start_period: 30s
networks: {}
+34
View File
@@ -0,0 +1,34 @@
services:
duplicati:
image: lscr.io/linuxserver/duplicati:latest
container_name: duplicati
restart: unless-stopped
labels:
- com.hanold.environment=production
- wud.watch=true
deploy:
resources:
limits:
memory: 2G
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- SETTINGS_ENCRYPTION_KEY=e232082ac53182f82487df27fe2d19dd23de0b4cb91736f824f7d5deab5e2a74
- CLI_ARGS= #optional
- DUPLICATI__WEBSERVICE_PASSWORD= #optional
volumes:
- ./config:/config
- /media/openmediavault:/backups
- /media:/source
# - /media/webdav:/webdav
- /opt:/docker-opt
ports:
- 9200:8200
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:8200"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
networks: {}
+30
View File
@@ -0,0 +1,30 @@
# docker-compose.yml
services:
evcc:
deploy:
resources:
limits:
memory: 256M
labels:
- com.hanold.environment=production
# - wud.watch=true
image: evcc/evcc:latest
container_name: evcc
ports:
- 9070:7070
volumes:
# - ./evcc.yaml:/etc/evcc.yaml
- ./data/:/root/.evcc
restart: unless-stopped
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- http://localhost:7070
interval: 30s
timeout: 5s
retries: 3
start_period: 20s
networks: {}
+35
View File
@@ -0,0 +1,35 @@
services:
freshrss:
image: freshrss/freshrss:latest
container_name: freshrss
labels:
- "com.centurylinklabs.watchtower.enable=true"
deploy:
resources:
limits:
memory: 256M
environment:
- FRESHRSS_ENV=production
- PUID=1000
- PGID=1000
- TZ='Europe/Berlin'
- CRON_MIN=2,17,32,47
volumes:
- ./data:/var/www/FreshRSS/data
- ./extensions:/var/www/FreshRSS/extensions
# - ./config:/config
# - ./extensions:/extensions
# - ./data/:/data
ports:
- 9280:80
restart: unless-stopped
healthcheck:
test:
- CMD
- cli/health.php
timeout: 10s
start_period: 60s
# start_interval: 11s
interval: 75s
retries: 3
networks: {}
+41
View File
@@ -0,0 +1,41 @@
services:
server:
image: gitea/gitea
restart: unless-stopped
container_name: gitea
labels:
- com.hanold.environment=production
- wud.watch=true
deploy:
resources:
limits:
cpus: 4
memory: 512M
volumes:
- ./data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- 9876:3000
- 2222:2222
environment:
- GITEA__database__DB_TYPE=mysql
- GITEA__database__HOST=192.168.178.170:3306
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__database__PASSWD=dbpass
- USER_UID=1000
- USER_GID=1000
healthcheck:
test:
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- http://127.0.0.1:3000/api/healthz
interval: 30s
timeout: 5s
retries: 3
start_period: 40s
networks: {}
+32
View File
@@ -0,0 +1,32 @@
services:
languagetool:
image: erikvl87/languagetool
container_name: languagetool
restart: unless-stopped
labels:
- com.hanold.environment=production
- wud.watch=true
deploy:
resources:
limits:
memory: 4G
ports:
- 0.0.0.0:9888:8010
environment:
- Java_Xms=512m
- Java_Xmx=1g
# Optional: N-Gramm-Modelle für bessere Erkennung (siehe unten)
- langtool_languageModel=/ngrams
volumes:
- ./ngrams:/ngrams # nur wenn N-Gramme genutzt werden
healthcheck:
test:
- CMD
- curl
- -f
- http://localhost:8010/v2/languages
interval: 30s
timeout: 10s
retries: 3
start_period: 60s # LanguageTool braucht etwas zum Starten
networks: {}
+28
View File
@@ -0,0 +1,28 @@
services:
eclipse-mosquitto:
labels:
- com.hanold.environment=production
- wud.watch=true
restart: unless-stopped
deploy:
resources:
limits:
memory: 64M
stdin_open: true
tty: true
ports:
- 9883:1883
volumes:
- ./config:/mosquitto/config
- ./data:/mosquitto/data
- ./log:/mosquitto/log
image: eclipse-mosquitto:latest
container_name: mqtt
healthcheck:
test:
- CMD-SHELL
- nc -z localhost 1883
interval: 20s
timeout: 5s
retries: 3
networks: {}
+123
View File
@@ -0,0 +1,123 @@
services:
n8n:
image: n8nio/n8n:latest
restart: unless-stopped
container_name: n8nv2
deploy:
resources:
limits:
memory: 2G
ports:
- 9789:5678
volumes:
- ./n8n-data:/home/node/.n8n
# - /media/Daten/:/mnt/heimserver-daten
- ./upload/:/mnt/upload
# - ./files/WertPlattensammlung.csv:/mnt/files
environment:
- N8N_RUNNERS_MODE=external
- N8N_RUNNERS_ENABLED=true
- N8N_RUNNERS_BROKER_LISTEN_ADDRESS=0.0.0.0
- N8N_RUNNERS_BROKER_PORT=5679
- N8N_RUNNERS_AUTH_TOKEN=^0hXW4sgSU7aG6!LFkGZ
- N8N_NATIVE_PYTHON_RUNNER=true
- N8N_PYTHON_ENABLED=true
- OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS=true
- NODES_EXCLUDE=[]
- N8N_RESTRICT_FILE_ACCESS_TO=/home/node/.n8n-files;/mnt/upload
- N8N_DIAGNOSTICS_ENABLED=false
- N8N_VERSION_NOTIFICATIONS_ENABLED=true
- N8N_BASIC_AUTH_ACTIVE=true
- N8N_BASIC_AUTH_USER=admin
- N8N_BASIC_AUTH_PASSWORD="OvTCSDZqpA#NfitX^dP1"
- GENERIC_TIMEZONE=Europe/Berlin
- N8N_SECURE_COOKIE=false
- N8N_HOST=n8n-app.hanold.online
- N8N_PROTOCOL=https
# - N8N_PORT=" " #GIFTIG: Killt den Zugriff auf n8n von außen
- WEBHOOK_URL=https://n8n-app.hanold.online
- N8N_PROXY_HOPS=1
- DB_TYPE=postgresdb
- DB_POSTGRESDB_HOST=postgres # Service-Name statt IP
- DB_POSTGRESDB_PORT=5432
- DB_POSTGRESDB_DATABASE=n8n
- DB_POSTGRESDB_USER=n8n
- DB_POSTGRESDB_PASSWORD=dbpass
# Timeouts anpassen:
- EXECUTIONS_TIMEOUT=36000
- N8N_HTTP_REQUEST_TIMEOUT=0
- N8N_HTTP_HEADERS_TIMEOUT=36000000
- N8N_HTTP_KEEPALIVE_TIMEOUT=36000000
- FETCH_HEADERS_TIMEOUT=36000000
- FETCH_BODY_TIMEOUT=360000000
- FETCH_CONNECT_TIMEOUT=6000000
- N8N_LOG_LEVEL=info
- N8N_LOG_OUTPUT=console
healthcheck:
test:
- CMD-SHELL
- wget -q -T 5 --no-verbose --tries=1 -O - http://localhost:5678/healthz
| grep -q '{"status":"ok"}' || exit 1
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks:
- n8n-net
postgres:
image: postgres:15-alpine
restart: unless-stopped
container_name: n8nv2-postgres
labels:
- wud.watch=true
- wud.tag.include=15-.*$
deploy:
resources:
limits:
memory: 128M
environment:
- POSTGRES_DB=n8n
- POSTGRES_USER=n8n
- POSTGRES_PASSWORD=dbpass
ports:
- 5432:5432
volumes:
- ./postgres-data:/var/lib/postgresql/data
networks:
- n8n-net
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB} -q"]
interval: 10s
timeout: 5s
retries: 5
start_period: 20s
task-runners:
image: n8nio/runners:latest
restart: unless-stopped
container_name: n8n-runners
deploy:
resources:
limits:
memory: 128M
environment:
- N8N_RUNNERS_TASK_BROKER_URI=http://n8n:5679
- N8N_RUNNERS_AUTH_TOKEN=^0hXW4sgSU7aG6!LFkGZ
- N8N_RUNNERS_MAX_CONCURRENCY=10
- N8N_RUNNERS_AUTO_SHUTDOWN_TIMEOUT=15
depends_on:
- n8n
networks:
- n8n-net
healthcheck:
test:
- CMD
- sh
- -c
- wget -q -T 3 -O - http://localhost:5680/healthz >/dev/null 2>&1
interval: 10s
timeout: 5s
retries: 6
networks:
n8n-net:
driver: bridge
+32
View File
@@ -0,0 +1,32 @@
services:
navidrome:
deploy:
resources:
limits:
memory: 512M
image: deluan/navidrome:latest
container_name: navidrome
restart: unless-stopped
labels:
- com.hanold.environment=production
environment:
- ND_CONFIGFILE=/config/navidrome.toml
ports:
- 9533:4533
volumes:
- /media/Musik:/music:ro
- ./data:/data
- ./navidrome.toml:/config/navidrome.toml
healthcheck:
test:
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- http://localhost:4533/ping
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks: {}
+72
View File
@@ -0,0 +1,72 @@
services:
nextcloud:
container_name: nextcloud
restart: unless-stopped
labels:
com.hanold.environment: production
hostname: https://nextcloud-app.hanold.online
volumes:
- ./nextcloud-data:/var/www/html
# - /media/webdav/Dokumente:/mnt/WebDav_Dokumente
- /media/Daten/Nextcloud:/mnt/Heimserver
tmpfs:
- /tmp:size=1G # Ensures /tmp writable, clears restarts
init: true
ports:
- 9180:80
environment:
- MYSQL_HOST=192.168.178.170
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
- MYSQL_PASSWORD=dbpass
- NEXTCLOUD_ADMIN_USER=admin
- NEXTCLOUD_ADMIN_PASSWORD=kNxMAUj1L391QuAWxBkJ
- REDIS_HOST=192.168.178.183
- REDIS_HOST_PORT=9379
- MEMCACHE_HOST=192.168.178.183
- MEMCACHE_PORT=9379
# - PUID=1000
# - PGID=1000
image: nextcloud:latest
deploy:
resources:
limits:
memory: 4G
healthcheck:
test:
- CMD-SHELL
- "runuser -u www-data -- php /var/www/html/occ status | grep -e
'installed: true' -e 'maintenance: false' -e 'needsDbUpgrade: false' |
wc -l | grep -q 3 || exit 1"
interval: 60s
timeout: 10s
retries: 5
start_period: 20s
collabora:
image: collabora/code:latest
restart: unless-stopped
deploy:
resources:
limits:
memory: 4G
cap_add:
- MKNOD
- SYS_ADMIN
environment:
- domain=nextcloud-app\.hanold\.online
# - username=admin # Or blank
# - password=yourpass
- extra_params=--o:ssl.enable=false # If internal; true + certs for prod
ports:
- 9380:9980
healthcheck:
test:
- CMD
- curl
- -f
- http://192.168.178.183:9380
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
networks: {}
+39
View File
@@ -0,0 +1,39 @@
services:
broker:
image: docker.io/library/redis:latest
restart: unless-stopped
deploy:
resources:
limits:
memory: 128M
labels:
- wud.watch=true
webserver:
image: ghcr.io/paperless-ngx/paperless-ngx:latest
restart: unless-stopped
deploy:
resources:
limits:
memory: 4G
labels:
- wud.watch=true
depends_on:
- broker
ports:
- 9800:8000
volumes:
- /media/Paperless/paperless-data:/usr/src/paperless/data
- /media/Paperless/paperless-media:/usr/src/paperless/media
- ./export:/usr/src/paperless/export
- ./paperless-consume:/usr/src/paperless/consume
env_file: docker-compose.env
environment:
PAPERLESS_REDIS: redis://192.168.178.183:9379
PAPERLESS_DBENGINE: mariadb
PAPERLESS_DBHOST: 192.168.178.170
PAPERLESS_DBUSER: paperless # only needed if non-default username
PAPERLESS_DBPASS: dbpass # only needed if non-default password
PAPERLESS_DBPORT: 3306
USERMAP_UID: 1002
USERMAP_GID: 1001
networks: {}
+62
View File
@@ -0,0 +1,62 @@
services:
database:
image: postgres:17-alpine
container_name: patchmon-postgres
labels:
- wud.watch=true
# restart: unless-stopped
environment:
POSTGRES_DB: patchmon_db
POSTGRES_USER: patchmon_user
POSTGRES_PASSWORD: dbpass
volumes:
- ./postgres:/var/lib/postgresql/data
healthcheck:
test:
- CMD-SHELL
- pg_isready -U patchmon_user -d patchmon_db
interval: 5s
timeout: 5s
retries: 12
backend:
image: ghcr.io/patchmon/patchmon-backend:latest
container_name: patchmon-backend
# restart: unless-stopped
labels:
- wud.watch=true
user: 1000:1000
depends_on:
database:
condition: service_healthy
environment:
LOG_LEVEL: info
DATABASE_URL: postgresql://patchmon_user:dbpass@database:5432/patchmon_db
JWT_SECRET: very-long-random-jwt-secret
# Backend listens on 3001 inside the container
SERVER_PORT: 3001
# IMPORTANT: set this to what your browser origin actually is
# Examples:
# http://10.1.149.134:3000
# http://patchmon.local:3000
# If PatchMon supports comma-separated origins, this allows both local + LAN access.
CORS_ORIGIN: http://localhost:3000,http://192.168.178.183:3000,http://192.168.178.138:3001
REDIS_HOST: 192.168.178.183
REDIS_PORT: 9379
volumes:
- ./data:/app/agents
ports:
- 3001:3001
frontend:
image: ghcr.io/patchmon/patchmon-frontend:latest
labels:
- wud.watch=true
container_name: patchmon-frontend
# restart: unless-stopped
depends_on:
- backend
environment:
BACKEND_HOST: backend
BACKEND_PORT: 3001
ports:
- 3000:3000
networks: {}
+20
View File
@@ -0,0 +1,20 @@
services:
portainer:
image: portainer/portainer-ce:latest
container_name: portainer
ports:
- "9999:9000"
volumes:
- ./data:/data
- /var/run/docker.sock:/var/run/docker.sock
restart: unless-stopped
networks:
- portainer_net
networks:
portainer_net:
driver: bridge
volumes:
portainer_data:
+43
View File
@@ -0,0 +1,43 @@
services:
prometheus:
container_name: prometheus
deploy:
resources:
limits:
memory: 1G
labels:
- wud.watch=true
ports:
- 9090:9090
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- /timeseries/prometheus/:/prometheus
command:
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.path=/prometheus
- --storage.tsdb.retention.time=7200d
- --storage.tsdb.retention.size=0
- --storage.tsdb.wal-compression
- --log.level=error # Nur Errors loggen
image: prom/prometheus
restart: unless-stopped
healthcheck:
test:
- CMD-SHELL
- wget --spider --quiet http://localhost:9090/-/ready || exit 1
interval: 10s
timeout: 5s
retries: 3
pve-exporter:
image: prompve/prometheus-pve-exporter:latest
container_name: prometheus-pve-exporter
deploy:
resources:
limits:
memory: 128M
ports:
- 9221:9221
volumes:
- /opt/prometheus/pve.yml:/etc/prometheus/pve.yml:ro
restart: unless-stopped
networks: {}
+31
View File
@@ -0,0 +1,31 @@
services:
pulse:
deploy:
resources:
limits:
memory: 2G
image: rcourtman/pulse:latest
container_name: pulse
labels:
- wud.watch=true
ports:
- 9655:7655
volumes:
- ./data:/data
environment:
- REDIS_HOST=192.168.178.183
- REDIS_PORT=9379
# Network discovery (usually not needed - auto-scans common networks)
- DISCOVERY_SUBNET=192.168.178.0/24 # Only for non-standard networks
# Security (all optional - runs open by default)
- PULSE_AUTH_USER=sven # Username for web UI login
- PULSE_AUTH_PASS=3IQyC3*202*a634DK7x^ # Plain text or bcrypt hash (auto-hashed if plain)
- API_TOKEN=tMeE9lQShpMQbrmBOL44K5E8Bf66LjUs
- ALLOW_UNPROTECTED_EXPORT=true # Allow export without auth (default: false)
- PULSE_DISABLE_DOCKER_UPDATE_ACTIONS=true
- PULSE_PUBLIC_URL=https://pulse-app.hanold.online
- LOG_LEVEL=error # Log level: debug/info/warn/error (default: info)
restart: unless-stopped
volumes:
pulse_data: null
networks: {}
+22
View File
@@ -0,0 +1,22 @@
services:
redis:
image: redis:latest
container_name: redis
labels:
- wud.watch=true
restart: unless-stopped
# command: redis-server --loglevel error
deploy:
resources:
limits:
memory: 256M
ports:
- 9379:6379
volumes:
- ./data:/data
healthcheck:
test: ["CMD-SHELL", "redis-cli ping || exit 1"]
interval: 10s
timeout: 5s
retries: 3
networks: {}
+24
View File
@@ -0,0 +1,24 @@
services:
uptime-kuma:
image: louislam/uptime-kuma
container_name: uptime-kuma
labels:
- wud.watch=true
- com.hanold.environment=production
deploy:
resources:
limits:
memory: 1G
volumes:
- ./data:/app/data
ports:
- 9011:3001
restart: unless-stopped
environment:
UPTIME_KUMA_DB_TYPE: mariadb
UPTIME_KUMA_DB_HOSTNAME: 192.168.178.170
UPTIME_KUMA_DB_PORT: 3306
UPTIME_KUMA_DB_USERNAME: uptimekuma
UPTIME_KUMA_DB_PASSWORD: dbpass
UPTIME_KUMA_DB_NAME: uptimekuma
networks: {}
+33
View File
@@ -0,0 +1,33 @@
services:
vaultwarden:
container_name: vaultwarden
image: vaultwarden/server:latest
deploy:
resources:
limits:
memory: 512M
labels:
- wud.watch=true
- com.hanold.environment=production
restart: unless-stopped
volumes:
- ./data/:/data/
ports:
- 9445:80
environment:
- ADMIN_TOKEN=jfdkljslkfj8949mklfjs!
- WEBSOCKET_ENABLED=true
- SIGNUPS_ALLOWED=false
- EMERGENCY_ACCESS_ALLOWED=true
- DOMAIN=https://vaultwarden-app.hanold.online
#Mail
- SMTP_HOST=smtp.strato.de
- SMTP_FROM=vaultwarden@hanold.online
- SMTP_FROM_NAME=Vaultwarden
- SMTP_SECURITY=starttls #Kann “starttls” / “force_tls” / “off”
- SMTP_PORT=587
- SMTP_USERNAME=vaultwarden@hanold.online
- SMTP_PASSWORD=VaultW_app_PW0815#
- SMTP_AUTH_MECHANISM=Login #Kann “Plain” / “Login” / “Xoauth2”
- REDIS_URL=redis://192.168.178.183:9379
networks: {}
+29
View File
@@ -0,0 +1,29 @@
services:
wordpress:
image: wordpress:latest
container_name: wordpress
labels:
- wud.watch=true
deploy:
resources:
limits:
memory: 1G
volumes:
- ./data:/var/www/html
ports:
- 9080:80
restart: always
environment:
- WORDPRESS_DB_HOST=192.168.178.170
- WORDPRESS_DB_USER=wordpress
- WORDPRESS_DB_PASSWORD=dbpass
- WORDPRESS_DB_NAME=wordpress
- WORDPRESS_REDIS_HOST=192.168.178.183
- WORDPRESS_REDIS_PORT=9379
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost || exit 1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 40s # WP-Setup braucht Zeit
networks: {}
+29
View File
@@ -0,0 +1,29 @@
services:
wordpress:
image: wordpress:latest
container_name: wordpress_bookstax
labels:
- wud.watch=true
deploy:
resources:
limits:
memory: 256M
volumes:
- ./data:/var/www/html
ports:
- 9100:80
restart: unless-stopped
environment:
- WORDPRESS_DB_HOST=192.168.178.170
- WORDPRESS_DB_USER=wp_bookstax
- WORDPRESS_DB_PASSWORD=DBpass14!
- WORDPRESS_DB_NAME=wp_bookstax
- WORDPRESS_REDIS_HOST=192.168.178.183
- WORDPRESS_REDIS_PORT=9379
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost || exit 1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 40s # WP-Setup braucht Zeit
networks: {}
+25
View File
@@ -0,0 +1,25 @@
services:
wordpress:
image: wordpress:latest
container_name: wordpress_charlotte
labels:
- wud.watch=true
deploy:
resources:
limits:
memory: 1G
volumes:
- ./data:/var/www/html
ports:
- 9181:80
restart: always
environment:
- WORDPRESS_DB_HOST=192.168.178.170
- WORDPRESS_DB_USER=wp_charlotte
- WORDPRESS_DB_PASSWORD=dbpass
- WORDPRESS_DB_NAME=wp_charlotte
- WORDPRESS_REDIS_HOST=192.168.178.183
- WORDPRESS_REDIS_PORT=9379
volumes:
wp_data: null
networks: {}
+47
View File
@@ -0,0 +1,47 @@
services:
whatsupdocker:
deploy:
resources:
limits:
memory: 512M
image: getwud/wud:latest
labels:
- wud.watch=true
container_name: wud
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/store
ports:
- 9700:3000
environment:
- TZ=Europe/Berlin
- WUD_REGISTRY_LSCR_PRIVATE_USERNAME=Zvenster
- WUD_REGISTRY_LSCR_PRIVATE_TOKEN=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
- WUD_LOG_LEVEL=info
- WUD_AUTH_BASIC_SVEN_USER=sven
- WUD_AUTH_BASIC_SVEN_HASH=$$apr1$$ir3cptob$$eBkIR.L31wK4DlmGYRrEh.
# Prometheus Metrics abschalten
- WUD_PROMETHEUS_ENABLED=false
# Watchers:
- WUD_WATCHER_CHATBOT_HOST=192.168.178.215
- WUD_WATCHER_CHATBOT_PORT=2375
- WUD_WATCHER_CHATBOT_WATCHALL=true
- WUD_WATCHER_CHATBOT_WATCHATSTART=true
- WUD_WATCHER_LOCAL_WATCHATSTART=true
- WUD_WATCHER_LOCAL_WATCHBYDEFAULT=false
- WUD_WATCHER_LOCAL_CRON=15 19 * * * #Jeden Tag um 19:15. War:0 12 * * *
- WUD_WATCHER_CHATBOT_CRON=15 19 * * * #Jeden Tag um 19:15. War:0 12 * * *
# Triggers
- WUD_TRIGGER_HTTP_LOCAL_AUTO=true
- WUD_TRIGGER_HTTP_LOCAL_ONCE=true
- WUD_TRIGGER_HTTP_LOCAL_URL=http://192.168.178.183:9789/webhook/87467257-a2f7-4cda-abac-d6eb98dd4e29
- WUD_TRIGGER_HTTP_LOCAL_SIMPLEBODY=$${container.name}
- WUD_TRIGGER_HTTP_LOCAL_METHOD=POST
healthcheck:
test: curl --fail http://192.168.178.183:9700/health || exit 1
interval: 10s
timeout: 10s
retries: 3
start_period: 10s
networks: {}