added all the dotfiles

This commit is contained in:
liph
2025-12-19 22:53:46 +00:00
parent e29ca99666
commit 303cd3b5a9
61 changed files with 3148 additions and 0 deletions

64
podman/ai/docker-compose.yml Executable file
View File

@@ -0,0 +1,64 @@
version: "3.8"
services:
# Ollama (Local LLM Runner)
ollama:
image: docker.io/ollama/ollama:latest
container_name: ollama
environment:
- PUID=1000
- PGID=1000
volumes:
- /mnt/flash1/podman/ai/config/ollama/:/root/.ollama # Model storage
ports:
- "11434:11434" # API port
restart: unless-stopped
networks:
- ai_net
# OpenWebUI (Chat Interface for Ollama)
openwebui:
image: ghcr.io/open-webui/open-webui:main
container_name: openwebui
depends_on:
- ollama
environment:
- OLLAMA_API_BASE_URL=http://ollama:11434 # Connect to Ollama
volumes:
- /mnt/flash1/podman/ai/config/openwebui:/app/backend/data
ports:
- "3010:8080" # Web UI
restart: unless-stopped
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- ai_net
n8n:
image: docker.io/n8nio/n8n:latest
container_name: n8n
environment:
- N8N_RUNNERS_ENABLED=true
- WEBHOOK_URL=https://n8n.liphlink.xyz/
- N8N_HOST=n8n.liphlink.xyz
# - WEBHOOK_TUNNEL_URL=https://n8n.liphlink.xyz
- N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS=true
- GENERIC_TIMEZONE=Europe/Berlin
- N8N_SECURE_COOKIE=false
- N8N_PROTOCOL=https
- PUID=1000
- PGID=1000
ports:
- "5678:5678"
volumes:
- /mnt/flash1/podman/ai/config/n8n_data:/home/node/.n8n:z
restart: unless-stopped
networks:
ai_net:
volumes:
ollama_data:
openwebui_data:
n8n_data:

91
podman/base/.env Executable file
View File

@@ -0,0 +1,91 @@
# Baserow Environment Configuration
# Copy this file to .env and customize the values
# ============================================
# REQUIRED: SECURITY SETTINGS
# ============================================
# IMPORTANT: Change these before starting Baserow!
# Database password
DATABASE_PASSWORD=1ChageposT
# Redis password
REDIS_PASSWORD=1ChagerediS
# Secret key (MUST be at least 50 characters)
# Generate with: openssl rand -base64 64
SECRET_KEY=mWDO0stOYZxSzHDtqCpOIi78pJBoeIIs+tH2rcNPOf8=
# ============================================
# BASEROW PUBLIC URL
# ============================================
# The URL where Baserow will be accessible
# Examples:
# - Local: http://localhost
# - Local network: http://192.168.1.100
# - Domain: https://baserow.yourdomain.com
BASEROW_PUBLIC_URL=https://baserow.liphlink.xyz
BASEROW_ENABLE_SECURE_PROXY_SSL_HEADER=True
# ============================================
# EMAIL CONFIGURATION (Optional)
# ============================================
## Enable SMTP for user invitations and notifications
#EMAIL_SMTP=False
#
## If EMAIL_SMTP=True, configure these:
#EMAIL_SMTP_HOST=smtp.gmail.com
#EMAIL_SMTP_PORT=587
#EMAIL_SMTP_USER=your-email@gmail.com
#EMAIL_SMTP_PASSWORD=your-app-password
#EMAIL_SMTP_USE_TLS=True
#FROM_EMAIL=noreply@yourdomain.com
# ============================================
# S3/MINIO STORAGE (Optional)
# ============================================
# Uncomment and configure if using MinIO or AWS S3 for file storage
# For MinIO (if you uncomment the MinIO service in docker-compose.yml):
# AWS_ACCESS_KEY_ID=minioadmin
# AWS_SECRET_ACCESS_KEY=changeme_minio_password
# AWS_STORAGE_BUCKET_NAME=baserow
# AWS_S3_ENDPOINT_URL=http://minio:9000
# For AWS S3:
# AWS_ACCESS_KEY_ID=your_aws_access_key
# AWS_SECRET_ACCESS_KEY=your_aws_secret_key
# AWS_STORAGE_BUCKET_NAME=your-bucket-name
# AWS_S3_REGION_NAME=us-east-1
# ============================================
## MINIO CONFIGURATION (if using MinIO)
## ============================================
#MINIO_ROOT_USER=minioadmin
#MINIO_ROOT_PASSWORD=changeme_secure_minio_password
# ============================================
# PERFORMANCE TUNING (Optional)
# ============================================
# Number of worker processes (adjust based on CPU cores)
BASEROW_AMOUNT_OF_WORKERS=1
# Backend debug mode (set to False in production)
BASEROW_BACKEND_DEBUG=False
# Enable if behind a reverse proxy with SSL
BASEROW_ENABLE_SECURE_PROXY_SSL_HEADER=False
# ============================================
# WEBHOOK CONFIGURATION (Optional)
# ============================================
BASEROW_WEBHOOKS_MAX_CONSECUTIVE_TRIGGER_FAILURES=8
BASEROW_WEBHOOKS_MAX_RETRIES_PER_CALL=8
BASEROW_WEBHOOKS_REQUEST_TIMEOUT_SECONDS=5
# ============================================
# FEATURE FLAGS (Optional)
# ============================================
# Sync templates after migration
BASEROW_TRIGGER_SYNC_TEMPLATES_AFTER_MIGRATION=True

149
podman/base/docker-compose.yml Executable file
View File

@@ -0,0 +1,149 @@
version: '3.8'
services:
# PostgreSQL Database
db:
image: docker.io/postgres:15-alpine
container_name: baserow-postgres
restart: unless-stopped
environment:
POSTGRES_DB: baserow
POSTGRES_USER: baserow
POSTGRES_PASSWORD: ${DATABASE_PASSWORD:-changeme_secure_postgres_password}
volumes:
- /mnt/flash1/podman/base/data/postgres:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U baserow"]
interval: 10s
timeout: 5s
retries: 5
networks:
- baserow-network
# Redis Cache
redis:
image: docker.io/redis:7-alpine
container_name: baserow-redis
restart: unless-stopped
command: redis-server --requirepass ${REDIS_PASSWORD:-changeme_secure_redis_password}
volumes:
- /mnt/flash1/podman/base/data/redis:/data
healthcheck:
test: ["CMD", "redis-cli", "--no-auth-warning", "-a", "${REDIS_PASSWORD:-changeme_secure_redis_password}", "ping"]
interval: 10s
timeout: 5s
retries: 5
networks:
- baserow-network
# Baserow Application (All-in-One)
baserow:
image: docker.io/baserow/baserow:latest
container_name: baserow
restart: unless-stopped
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
ports:
- "8079:80" # HTTP
environment:
# Database Configuration
DATABASE_HOST: db
DATABASE_NAME: baserow
DATABASE_USER: baserow
DATABASE_PASSWORD: ${DATABASE_PASSWORD:-changeme_secure_postgres_password}
DATABASE_PORT: 5432
# Redis Configuration
REDIS_HOST: redis
REDIS_PASSWORD: ${REDIS_PASSWORD:-changeme_secure_redis_password}
REDIS_PORT: 6379
REDIS_PROTOCOL: redis
# Baserow Configuration
BASEROW_PUBLIC_URL: ${BASEROW_PUBLIC_URL:-http://localhost}
SECRET_KEY: ${SECRET_KEY:-changeme_secret_key_minimum_50_characters_long}
# # Email Configuration (optional - for invitations and notifications)
# EMAIL_SMTP: ${EMAIL_SMTP:-False}
# EMAIL_SMTP_HOST: ${EMAIL_SMTP_HOST:-}
# EMAIL_SMTP_PORT: ${EMAIL_SMTP_PORT:-587}
# EMAIL_SMTP_USER: ${EMAIL_SMTP_USER:-}
# EMAIL_SMTP_PASSWORD: ${EMAIL_SMTP_PASSWORD:-}
# EMAIL_SMTP_USE_TLS: ${EMAIL_SMTP_USE_TLS:-True}
# FROM_EMAIL: ${FROM_EMAIL:-noreply@baserow.io}
# File Upload Configuration
# Use S3-compatible storage (optional - MinIO, AWS S3, etc.)
# AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-}
# AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-}
# AWS_STORAGE_BUCKET_NAME: ${AWS_STORAGE_BUCKET_NAME:-baserow}
# AWS_S3_REGION_NAME: ${AWS_S3_REGION_NAME:-us-east-1}
# AWS_S3_ENDPOINT_URL: ${AWS_S3_ENDPOINT_URL:-} # For MinIO: http://minio:9000
# Advanced Configuration
BASEROW_AMOUNT_OF_WORKERS: ${BASEROW_AMOUNT_OF_WORKERS:-1}
BASEROW_CELERY_BEAT_DEBUG_LEVEL: ${BASEROW_CELERY_BEAT_DEBUG_LEVEL:-INFO}
BASEROW_BACKEND_DEBUG: ${BASEROW_BACKEND_DEBUG:-False}
# Feature Flags
BASEROW_ENABLE_SECURE_PROXY_SSL_HEADER: ${BASEROW_ENABLE_SECURE_PROXY_SSL_HEADER:-False}
BASEROW_TRIGGER_SYNC_TEMPLATES_AFTER_MIGRATION: ${BASEROW_TRIGGER_SYNC_TEMPLATES_AFTER_MIGRATION:-True}
# Webhooks and API
BASEROW_WEBHOOKS_MAX_CONSECUTIVE_TRIGGER_FAILURES: ${BASEROW_WEBHOOKS_MAX_CONSECUTIVE_TRIGGER_FAILURES:-8}
BASEROW_WEBHOOKS_MAX_RETRIES_PER_CALL: ${BASEROW_WEBHOOKS_MAX_RETRIES_PER_CALL:-8}
BASEROW_WEBHOOKS_REQUEST_TIMEOUT_SECONDS: ${BASEROW_WEBHOOKS_REQUEST_TIMEOUT_SECONDS:-5}
# Performance Tuning
BASEROW_MAX_ROW_REPORT_ERROR_COUNT: ${BASEROW_MAX_ROW_REPORT_ERROR_COUNT:-30}
BASEROW_INITIAL_TABLE_DATA_LIMIT: ${BASEROW_INITIAL_TABLE_DATA_LIMIT:-}
volumes:
- /mnt/flash1/podman/base/data/baserow:/baserow/data
networks:
- baserow-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/api/health/"]
interval: 30s
timeout: 10s
retries: 3
# MinIO S3-compatible storage (optional but recommended for production)
# Uncomment this section if you want to use MinIO for file storage
# minio:
# image: minio/minio:latest
# container_name: baserow-minio
# restart: unless-stopped
# command: server /data --console-address ":9001"
# environment:
# MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
# MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-changeme_minio_password}
# volumes:
# - minio_data:/data
# ports:
# - "9000:9000" # S3 API
# - "9001:9001" # MinIO Console
# healthcheck:
# test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
# interval: 30s
# timeout: 20s
# retries: 3
# networks:
# - baserow-network
volumes:
postgres_data:
driver: local
redis_data:
driver: local
baserow_data:
driver: local
# minio_data:
# driver: local
networks:
baserow-network:
driver: bridge

118
podman/browser/docker-compose.yml Executable file
View File

@@ -0,0 +1,118 @@
version: "3.8"
services:
# --- PhotoPrism (Photo Management) ---
photoprism:
image: docker.io/photoprism/photoprism:latest
container_name: photoprism
environment:
- PHOTOPRISM_UPLOAD_NSFW=true
- PHOTOPRISM_ADMIN_PASSWORD=1ChagephotO # Change this!
volumes:
- /mnt/flash1/podman/browser/config/photoprism:/photoprism/storage
- /mnt/tank/photos/:/photoprism/originals # Photo storage
ports:
- "2342:2342" # Web UI
restart: unless-stopped
audiobookshelf:
image: ghcr.io/advplyr/audiobookshelf:latest
container_name: audiobookshelf
ports:
- 13378:80
volumes:
- /mnt/tank/audio/audiobooks:/audiobooks
- /mnt/tank/audio/podcasts:/podcasts
- /mnt/flash1/podman/browser/config/audiobookshelf:/config
- /mnt/flash1/podman/browser/data/audiobookshelf:/metadata
environment:
- TZ=Europe/Berlin
# Music Server (Navidrome)
# navidrome:
# image: docker.io/deluan/navidrome:latest
# container_name: navidrome
# restart: unless-stopped
# ports:
# - "4533:4533" # Web UI port
# volumes:
# - /mnt/flash1/podman/browser/config/navidrome/data/:/data # Config & DB
# - /mnt/tank/music/:/music:ro # Music library (read-only)
# - /mnt/tank/audio/:/tutorials:ro
# environment:
# - ND_SCANSCHEDULE=1h # Auto-scan every hour
# - ND_LOGLEVEL=info
navidrome-music:
image: docker.io/deluan/navidrome:latest
container_name: navidrome-music
ports:
- "4535:4533"
environment:
- ND_SCANSCHEDULE=1h
- ND_LOGLEVEL=info
- ND_SESSIONTIMEOUT=24h
volumes:
- /mnt/flash1/podman/browser/data/navidrome-music/:/data:Z
- /mnt/tank/music:/music:Z
restart: unless-stopped
navidrome-meditation:
image: docker.io/deluan/navidrome:latest
container_name: navidrome-meditation
ports:
- "4536:4533"
environment:
- ND_SCANSCHEDULE=1h
- ND_LOGLEVEL=info
- ND_SESSIONTIMEOUT=24h
volumes:
- /mnt/flash1/podman/browser/data/navidrome-meditation:/data:Z
- /mnt/tank/audio/healing:/music:Z
restart: unless-stopped
calibre-web:
image: ghcr.io/linuxserver/calibre-web
container_name: calibre-web
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
# - DOCKER_MODS=linuxserver/calibre-web:calibre
volumes:
- /mnt/flash1/podman/browser/config/calibre:/config
- /mnt/tank/ebooks_phil:/books
ports:
- 8083:8083
restart: unless-stopped
mealie:
image: ghcr.io/mealie-recipes/mealie:nightly
container_name: mealie
restart: always
ports:
- "9925:9000" #
deploy:
resources:
limits:
memory: 1000M #
volumes:
- /mnt/flash1/podman/browser/config/mealie/:/app/data/
environment:
# Set Backend ENV Variables Here
ALLOW_SIGNUP: "false"
PUID: 1000
PGID: 1000
TZ: Europe/Berlin
BASE_URL: https://demo.mealie.io/g/liph
kavita:
image: lscr.io/linuxserver/kavita:latest
container_name: kavita
volumes:
- /mnt/flash1/podman/browser/config/kavita/:/kavita/config # Config
- /mnt/tank/ebooks_phil:/books:z # Library
ports:
- "5000:5000"
restart: unless-stopped

4
podman/essential/.env Executable file
View File

@@ -0,0 +1,4 @@
PUID=1000
PGID=33
TZ=Europe/Berlin

View File

@@ -0,0 +1,86 @@
version: "3.8"
services:
# Homarr (Dashboard)
homarr:
image: ghcr.io/ajnart/homarr:latest
container_name: homarr
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
- DISABLE_UPGRADE_MODAL=true
volumes:
- /mnt/flash1/podman/essential/config/homarr:/app/data/configs
ports:
- "7575:7575"
restart: unless-stopped
# Portainer (Container Management - Fixed Socket Path)
portainer:
image: docker.io/portainer/portainer-ce:latest
container_name: portainer
restart: unless-stopped
ports:
# - "9443:9443"
- "9000:9000"
volumes:
- /mnt/flash1/podman/essential/config/portainer-data:/data
- /var/run/docker.sock:/var/run/docker.sock:ro
security_opt:
- no-new-privileges:true
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
watchtower:
image: docker.io/containrrr/watchtower:latest
container_name: watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
fail2ban:
image: lscr.io/linuxserver/fail2ban:latest
container_name: fail2ban
cap_add:
- NET_ADMIN
- NET_RAW
network_mode: host
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
- VERBOSITY=-vv #optional
volumes:
- /mnt/flash1/podman/essential/config/fail2ban:/config
- /var/log:/var/log:ro
- /mnt/tank/podman/homeassistant/home_assistant/config/home-assistant.log:/remotelogs/homeassistant:ro #optional
# - /path/to/nextcloud/log:/remotelogs/nextcloud:ro #optional
# - /path/to/nginx/log:/remotelogs/nginx:ro #optional
# - /path/to/overseerr/log:/remotelogs/overseerr:ro #optional
# - /path/to/prowlarr/log:/remotelogs/prowlarr:ro #optional
# - /path/to/radarr/log:/remotelogs/radarr:ro #optional
# - /path/to/sonarr/log:/remotelogs/sonarr:ro #optional
# - /path/to/vaultwarden/log:/remotelogs/vaultwarden:ro #optional
restart: unless-stopped
vaultwarden:
image: docker.io/vaultwarden/server:latest
container_name: vaultwarden
restart: unless-stopped
ports:
- "9445:80" # Expose port 80 internally (no need to publish)
volumes:
- /mnt/flash1/podman/essential/config/vw_data:/data
environment:
- WEBSOCKET_ENABLED=true # Enable WebSocket for real-time sync
- ADMIN_TOKEN=a4dJaEqGjx1q76PoAG0FOw9AURubpMht5cZSVyAvGrX2hnyhlUBc/WbImuZedhTQ
#
#networks:
# npm_network:
# external: true # Use NPM's existing network

View File

@@ -0,0 +1,54 @@
version: '3'
services:
homeassistant:
image: ghcr.io/home-assistant/home-assistant:stable
container_name: homeassistant
restart: unless-stopped
volumes:
- /mnt/flash1/podman/homeassistant/home_assistant/config/:/config
- /etc/localtime:/etc/localtime:ro
environment:
- TZ=${TZ} # Change to your timezone
network_mode: host
# devices:
# - /dev/ttyACM0:/dev/ttyACM0 # Only if using USB devices (e.g., Z-Wave sticks)
# - /dev/ttyUSB0:/dev/ttyUSB0:Z
privileged: true # Required for some hardware access
zigbee2mqtt:
image: docker.io/koenkk/zigbee2mqtt:latest
server: mqtt://liph:1ChagemqtTd@mosquitto:1883 # "mosquitto" = container name
container_name: zigbee2mqtt
restart: unless-stopped
volumes:
- /mnt/flash1/podman/homeassistant/zigbee2mqtt/data/:/app/data
- /dev/ttyUSB0:/dev/ttyUSB0:z
# devices:
# - /dev/serial/by-id/usb-Itead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_V2_7a09c139e72bef11884a5a88dfbc56eb-if00-port0:/dev/ttyACM0
environment:
- TZ=${TZ}
network_mode: host # Required for discovery
mosquitto:
image: docker.io/eclipse-mosquitto:latest
container_name: mosquitto
restart: unless-stopped
volumes:
- /mnt/flash1/podman/homeassistant/mosquitto/config:/mosquitto/config
- /mnt/flash1/podman/homeassistant/mosquitto/data:/mosquitto/data
- /mnt/flash1/podman/homeassistant/mosquitto/log:/mosquitto/log
ports:
- "1883:1883" # MQTT
- "9001:9001" # Websocket (for Lovelace)
nodered:
image: docker.io/nodered/node-red:latest
container_name: nodered
restart: unless-stopped
volumes:
- /mnt/flash1/podman/homeassistant/nodered/data/:/data
ports:
- "1880:1880"
environment:
- TZ=${TZ}

22
podman/immich/.env Executable file
View File

@@ -0,0 +1,22 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# The location where your uploaded files are stored
UPLOAD_LOCATION=/mnt/flash1/podman/immich/config/immich/
# The location where your database files are stored. Network shares are not supported for the database
DB_DATA_LOCATION=/mnt/flash1/podman/immich/config/postgres/
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
TZ=Etc/UTC
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=release
# Connection secret for postgres. You should change it to a random password
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
DB_PASSWORD=1ChageposT
# The values below this line do not need to be changed
###################################################################################
DB_USERNAME=postgres
DB_DATABASE_NAME=immich

View File

@@ -0,0 +1,75 @@
#
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/data
- /etc/localtime:/etc/localtime:ro
- /mnt/tank/photos:/photos:z
env_file:
- .env
ports:
- '2283:2283'
# depends_on:
# - redis
# - database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:facc1d2c3462975c34e10fccb167bfa92b0e0dbd992fc282c29a61c3243afb11
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:32324a2f41df5de9efe1af166b7008c3f55646f8d0e00d9550c16c9822366b4a
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
restart: always
volumes:
model-cache:

3
podman/media/.env Executable file
View File

@@ -0,0 +1,3 @@
PUID=1000
PGID=33
TZ=Berlin/Europe

60
podman/media/docker-compose.yml Executable file
View File

@@ -0,0 +1,60 @@
services:
jellyfin:
image: lscr.io/linuxserver/jellyfin:latest
container_name: jellyfin
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
# - JELLYFIN_PublishedServerUrl=http://10.0.0.101 #optional
volumes:
- /mnt/flash1/podman/media/config/:/config
- /mnt/tank/:/data
ports:
- 8096:8096
- 7359:7359/udp #Service Discovery
- 1901:1900/udp #Client Discovery
restart: unless-stopped
jellyseerr:
container_name: jellyseerr
image: docker.io/fallenbagel/jellyseerr:latest
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- /mnt/flash1/podman/media/config/jellyseerr:/app/config
ports:
- 5055:5055
restart: unless-stopped
jellystat-db:
image: docker.io/postgres:latest
container_name: jellystat-db
restart: unless-stopped
environment:
POSTGRES_USER: liph
POSTGRES_PASSWORD: 1ChageposT
volumes:
- ./jellystat/postgres:/var/lib/postgresql/data
jellystat:
image: docker.io/cyfershepard/jellystat:latest
container_name: jellystat
restart: unless-stopped
environment:
POSTGRES_USER: liph
POSTGRES_PASSWORD: 1ChageposT
POSTGRES_IP: jellystat-db
POSTGRES_PORT: 5432
JWT_SECRET: "/N1k7UPV2K5I47aARWEbD6lFtD8igZeImIpt64Fhf/Q="
TZ: ${TZ}
volumes:
- ./jellystat/backup-data:/app/backend/backup-data
ports:
- "3002:3000"
depends_on:
- jellystat-db
# Want more!? https://github.com/awesome-jellyfin/awesome-jellyfin

View File

@@ -0,0 +1,223 @@
#volumes:
# netdataconfig:
# netdatalib:
# netdatacache:
version: "3"
services:
beszel:
image: docker.io/henrygd/beszel:latest
container_name: beszel
restart: unless-stopped
ports:
- 8090:8090
volumes:
- /mnt/flash1/podman/monitoring/beszel/data:/beszel_data
- ./beszel_socket:/beszel_socket
beszel-agent:
image: docker.io/henrygd/beszel-agent
container_name: beszel-agent
restart: unless-stopped
network_mode: host
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /mnt/flash1/podman/monitoring/beszel_agent/data:/var/lib/beszel-agent
# monitor other disks / partitions by mounting a folder in /extra-filesystems
# - /mnt/disk/.beszel:/extra-filesystems/sda1:ro
environment:
LISTEN: 45876
KEY: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAUf2dTtQj9WJesH11kFD7/qzMIubcHzuZwN92XnHpfB'
TOKEN: 1ac0ad26-9c32-4c45-b02b-c4aa2f8eb40c
HUB_URL: https://beszel.liphlink.xyz
dozzle:
image: docker.io/amir20/dozzle:latest
container_name: dozzle
volumes:
- /run/podman/podman.sock:/var/run/docker.sock:ro
ports:
- "8078:8080"
environment:
- DOZZLE_LEVEL=info
- DOZZLE_TAILSIZE=300
restart: unless-stopped
pulse:
image: docker.io/rcourtman/pulse:latest
container_name: pulse
restart: unless-stopped
ports:
- "${PULSE_PORT:-7655}:7655"
volumes:
- pulse-data:/data
# Secure temperature monitoring via host-side proxy (requires setup - see docs)
# Uncomment after installing pulse-sensor-proxy on host with --standalone flag
# Mount is read-only (:ro) for security - proxy uses SO_PEERCRED for access control
# - /run/pulse-sensor-proxy:/run/pulse-sensor-proxy:ro
environment:
- TZ=${TZ:-UTC}
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:7655/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
netdata:
container_name: netdata
image: docker.io/netdata/netdata:latest
ports:
- '19999:19999'
volumes:
- /mnt/flash1/podman/monitoring/netdata/data:/etc/netdata
- /mnt/flash1/podman/monitoring/netdata/lib:/var/lib/netdata
- /mnt/flash1/podman/monitoring/netdata/cache:/var/cache/netdata
- '/etc/passwd:/host/etc/passwd:ro'
- '/etc/group:/host/etc/group:ro'
- '/proc:/host/proc:ro'
- '/sys:/host/sys:ro'
- '/etc/os-release:/host/etc/os-release:ro'
- '/etc/localtime:/etc/localtime:ro'
- '/var/log:/host/var/log:ro'
- '/var/run/docker.sock:/var/run/docker.sock:ro'
- '/run/dbus:/run/dbus:ro'
restart: unless-stopped
cap_add:
- SYS_PTRACE
- SYS_ADMIN
security_opt:
- apparmor=unconfined
#networks: ["nginx_nginx_network"] #optional
uptime-kuma:
image: docker.io/louislam/uptime-kuma:latest
container_name: uptime-kuma
volumes:
- ./data/uptime-kuma:/app/data
ports:
- "3001:3001" # <Host Port>:<Container Port>
restart: always
volumes:
uptime-kuma:
netdataconfig:
netdatalib:
netdatacache:
pulse-data:
#
# loki:
# container_name: loki
# image: docker.io/grafana/loki:main
# networks:
# - grafana-monitoring
# volumes:
# - ./data/loki/:/etc/loki
# ports:
# - "3100:3100"
# restart: unless-stopped
# command: -config.file=./config/loki/loki-config.yml
#
# promtail:
# container_name: promtail
# image: docker.io/grafana/promtail:main
# networks:
# - grafana-monitoring
# volumes:
# - /var/log:/var/log
# - ./data/promtail/:/etc/promtail
# ports:
# - "1514:1514" # this is only needed if you are going to send syslogs
# restart: unless-stopped
# command: -config.file=./config/promtail/promtail-config.yml
#
# grafana:
# container_name: grafana
# image: docker.io/grafana/grafana-oss:main-ubuntu
# user: "0"
# networks:
# - grafana-monitoring
# - proxy
# volumes:
# - ./data/grafana/:/var/lib/grafana
# restart: unless-stopped
# ports:
# - 3000:3000
# labels:
# - "traefik.enable=true"
# - "traefik.http.routers.grafana.entrypoints=http"
# - "traefik.http.routers.grafana.rule=Host(`grafana.jimsgarage.co.uk`)"
# - "traefik.http.routers.grafana.middlewares=default-whitelist@file"
# - "traefik.http.middlewares.grafana-https-redirect.redirectscheme.scheme=https"
# - "traefik.http.routers.grafana.middlewares=grafana-https-redirect"
# - "traefik.http.routers.grafana-secure.entrypoints=https"
# - "traefik.http.routers.grafana-secure.rule=Host(`grafana.jimsgarage.co.uk`)"
# - "traefik.http.routers.grafana-secure.tls=true"
# - "traefik.http.routers.grafana-secure.service=grafana"
# - "traefik.http.services.grafana.loadbalancer.server.port=3000"
# - "traefik.docker.network=proxy"
#
# influxdb:
# container_name: influxdb
# image: docker.io/influxdb:latest
# restart: unless-stopped
# ports:
# - 8086:8086
# - 8089:8089/udp
# networks:
# - grafana-monitoring
# volumes:
# - ./data/influxdb/:/var/lib/influxdb2
#
# telegraf:
# container_name: docker.io/telegraf
# restart: unless-stopped
# user: 1000:1000 #you need to find the GID of Docker if not added to Sudo group changed from 995 to 1000
# networks:
# - grafana-monitoring
# volumes:
# - /run/user/1000/podman/podman.sock:/run/podman/podman.sock
# - './config/telegraf/telegraf.conf:/etc/telegraf/telegraf.conf:ro'
# - './data/telegraf/mibs/:/usr/share/snmp/mibs'
# environment:
# - HOST_ETC=/hostfs/etc
# - HOST_PROC=/hostfs/proc
# - HOST_SYS=/hostfs/sys
# - HOST_VAR=/hostfs/var
# - HOST_RUN=/hostfs/run
# - HOST_MOUNT_PREFIX=/hostfs
# image: docker.io/telegraf
#
# graphite:
# image: docker.io/graphiteapp/graphite-statsd
# container_name: graphite
# restart: unless-stopped
# ports:
# - 8050:80 # nginx
# - 2003-2004 # carbon receiver - plaintext & pickle
# - 2023-2024 # carbon aggregator - plaintext & pickle
# - 8125:8125/udp # statsd
# - 8126:8126 # statsd admin
# volumes:
# - ./config/graphite/configs/:/opt/graphite/conf
# - ./data/graphite/data/:/opt/graphite/storage
# - ./config/graphite/statsd_config/:/opt/statsd/config
# networks:
# - grafana-monitoring
#
# prometheus:
# image: docker.io/prom/prometheus
# container_name: prometheus
# restart: unless-stopped
# ports:
# - 9090:9090
# volumes:
# - ./config/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
# networks:
# - grafana-monitoring
#
#networks:
# grafana-monitoring:
# proxy:
# # external: true

View File

@@ -0,0 +1,43 @@
services:
app:
image: 'docker.io/jc21/nginx-proxy-manager:latest'
container_name: npm
restart: unless-stopped
ports:
# These ports are in format <host-port>:<container-port>
- '80:80' # Public HTTP Port
- '443:443' # Public HTTPS Port
- '81:81' # Admin Web Port
# Add any other Stream port you want to expose
# - '21:21' # FTP
#environment:
# Uncomment this if you want to change the location of
# the SQLite DB file within the container
# DB_SQLITE_FILE: "/data/database.sqlite"
# Uncomment this if IPv6 is not enabled on your host
# DISABLE_IPV6: 'true'
volumes:
- /mnt/flash1/podman/network/data:/data
- /mnt/flash1/podman/network/letsencrypt:/etc/letsencrypt
syncthing:
image: lscr.io/linuxserver/syncthing:latest
container_name: syncthing
hostname: syncthing
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Zurich # Change to your timezone
volumes:
- /mnt/flash1/podman/network/config:/config
- /mnt/flash1/podman/nextcloud/config/obsidian/Phil:/data/obsidian:z
ports:
- 8384:8384
- 22000:22000/tcp
- 22000:22000/udp
- 21027:21027/udp
restart: unless-stopped

8
podman/nextcloud/.env Executable file
View File

@@ -0,0 +1,8 @@
DB_PASSWORD=1ChageposT
DB_USERNAME=next-db
DB_DATABASE_NAME=next
DB_HOST=next-db
PUID=33
PGID=1000

48
podman/nextcloud/config.php Executable file
View File

@@ -0,0 +1,48 @@
<?php
$CONFIG = array (
'htaccess.RewriteBase' => '/',
'memcache.local' => '\\OC\\Memcache\\APCu',
'apps_paths' =>
array (
0 =>
array (
'path' => '/var/www/html/apps',
'url' => '/apps',
'writable' => false,
),
1 =>
array (
'path' => '/var/www/html/custom_apps',
'url' => '/custom_apps',
'writable' => true,
),
),
'upgrade.disable-web' => true,
'passwordsalt' => '6lps3G6LfFcAhOlPzVU+e1vi9pmLZa',
'secret' => '/sp6y5Wdq1x085Oow0nrkHcFtaehnL88g5cPH31gvrpWfV8S',
'trusted_domains' =>
array (
0 => 'localhost',
1 => 'next.liphlink.xyz',
),
'datadirectory' => '/var/www/html/data',
'dbtype' => 'pgsql',
'version' => '31.0.8.1',
'overwrite.cli.url' => 'http://localhost',
'dbname' => 'next',
'dbhost' => 'next-db',
'dbport' => '',
'dbtableprefix' => 'oc_',
'dbuser' => 'next-db',
'dbpassword' => '1ChageposT',
'installed' => true,
'instanceid' => 'ocyyoad5slj1',
'app_install_overwrite' =>
array (
0 => 'diary',
1 => 'inventory',
2 => 'files_reader',
),
'loglevel' => 2,
'maintenance' => false,
);

View File

@@ -0,0 +1,157 @@
version: '3.8'
services:
# Database (PostgreSQL)
next-db:
image: docker.io/postgres:18
container_name: next-db
environment:
- POSTGRES_DB=${DB_DATABASE_NAME}
- POSTGRES_USER=${DB_USERNAME}
- POSTGRES_PASSWORD=${DB_PASSWORD}
volumes:
- /mnt/flash1/podman/nextcloud/config/db:/var/lib/postgresql
restart: unless-stopped
networks:
- nextcloud_network
# # Redis Cache
# next_redis:
# image: docker.io/redis:latest
# container_name: next-redis
# command: redis-server --save 60 1 --loglevel warning
# volumes:
# - ./data/redis:/data
# restart: unless-stopped
# networks:
# - nextcloud_network
# Nextcloud Main Application
next:
image: docker.io/nextcloud:latest
container_name: next
depends_on:
- next-db
ports:
- "8808:80"
environment:
- POSTGRES_DB=${DB_DATABASE_NAME}
- POSTGRES_USER=${DB_USERNAME}
- POSTGRES_PASSWORD=${DB_PASSWORD}
- POSTGRES_HOST=${DB_HOST}
- NEXTCLOUD_TRUSTED_DOMAINS=next.liphlink.xyz
- NEXTCLOUD_ADMIN_USER=liph
- NEXTCLOUD_ADMIN_PASSWORD=1ChagenexT
volumes:
- /mnt/flash1/podman/nextcloud/config/nextcloud/data:/var/www/html
- /mnt/flash1/podman/nextcloud/config/nextcloud/config:/var/www/html/config:Z
- /mnt/flash1/podman/nextcloud/config/nextcloud/apps:/var/www/html/custom_apps
- /mnt/tank/ebooks_phil:/ebooks_phil:z
- /mnt/tank/cloud_phil:/cloud_phil:z
- /mnt/tank/ebooks_miri:/ebooks_miri:z
- /mnt/tank/cloud_miri:/cloud_miri:z
restart: unless-stopped
networks:
- nextcloud_network
# # Collabora Online Office
# collabora:
# image: collabora/code:latest
# container_name: collabora
# ports:
# - 9980:9980
# environment:
# - domain=liphlink.xyz
# - username=liph
# - password=1ChagecolL
# - extra_params=--o:ssl.enable=false
# restart: unless-stopped
# networks:
# - nextcloud_network
# OnlyOffice (alternative to Collabora)
onlyoffice:
image: docker.io/onlyoffice/documentserver:latest
container_name: onlyoffice
ports:
- 8000:80
environment:
- JWT_SECRET= 'joJVOvGpHYCahLnEeAShgm78r58VJl4C'
volumes:
- /mnt/flash1/podman/nextcloud/config/onlyoffice:/var/www/onlyoffice/Data
restart: unless-stopped
networks:
- nextcloud_network
obsidian:
image: lscr.io/linuxserver/obsidian:latest
container_name: obsidian
security_opt:
- seccomp:unconfined #optional
environment:
- PUID=1000
- PGID=1000
- TZ=Etc/UTC
volumes:
- /mnt/flash1/podman/nextcloud/config/obsidian:/config:z
- /mnt/flash1/podman/nextcloud/config/vaults:/vault:z
ports:
- 3004:3000
- 3003:3001
shm_size: "1gb"
restart: unless-stopped
# # Talk (Video Conferencing)
# talk:
# image: jvillafa/turn:latest
# container_name: nextcloud_talk
# ports:
# - 3478:3478/udp
# - 3478:3478/tcp
# - 65000-65535:65000-65535/udp
# environment:
# - TURN_SECRET=yourturnsecret
# - NEXTCLOUD_TALK_IP=your_server_ip
# restart: unless-stopped
# networks:
# - nextcloud_network
# # Full-Text Search (optional)
# fulltextsearch:
# image: nextcloud/fulltextsearch:latest
# container_name: nextcloud_fulltext
# depends_on:
# - nextcloud
# environment:
# - NEXTCLOUD_URL=http://nextcloud
# volumes:
# - nextcloud_data:/var/www/html
# restart: unless-stopped
# networks:
# - nextcloud_network
# # Cron for background jobs
# cron:
# image: nextcloud:latest
# container_name: nextcloud_cron
# depends_on:
# - nextcloud
# volumes:
# - nextcloud_data:/var/www/html
# - nextcloud_config:/var/www/html/config
# - nextcloud_apps:/var/www/html/custom_apps
# entrypoint: /cron.sh
# restart: unless-stopped
# networks:
# - nextcloud_network
volumes:
pg_data:
redis_data:
nextcloud_data:
nextcloud_config:
nextcloud_apps:
onlyoffice_data:
networks:
nextcloud_network:
name: nextcloud_network

View File

7
podman/nocodb/.env Executable file
View File

@@ -0,0 +1,7 @@
# .env file
NC_DB=pg://postgres:5432?u=nocodb&p=LFeVBxYxjlk959sXXNKK6jx4hZmELdZDLQU3sN+m+04=&d=nocodb
NC_AUTH_JWT_SECRET=9D3V0TFT6YWHT8s3csTHm6KQuf3o74m4bJ3UQfI/e1w=
NC_PUBLIC_URL=http://100.111.222.33:8066
POSTGRES_USER=postgres
POSTGRES_PASSWORD=LFeVBxYxjlk959sXXNKK6jx4hZmELdZDLQU3sN+m+04=
POSTGRES_DB=nocodb

View File

@@ -0,0 +1,40 @@
version: '3.8'
services:
# --- NocoDB + Postgres + Redis ---
nocodb:
image: docker.io/nocodb/nocodb:latest
restart: unless-stopped
container_name: nocodb
depends_on:
- nocodb-db
- nocodb-redis
environment:
# Point NocoDB at Postgres. Change passwords before using in prod.
NC_DB: pg://nocodb-db:5432?u=noco&p=noco_pass&d=nocodb
# Required for auth sessions/tokens — use a long random string.
NC_AUTH_JWT_SECRET: "change-me-super-random"
ports:
- "8077:8080"
volumes:
- /mnt/flash1/podman/nocodb/data/nocodb:/usr/app/data
nocodb-db:
image: docker.io/postgres:15
restart: unless-stopped
container_name: nocodb-post
environment:
POSTGRES_USER: noco
POSTGRES_PASSWORD: noco_pass
POSTGRES_DB: nocodb
volumes:
- /mnt/flash1/podman/nocodb/data/db:/var/lib/postgresql/data
nocodb-redis:
image: docker.io/redis:7
restart: unless-stopped
container_name: nocodb-redis
#volumes:
# nocodb_data:
# nocodb_db:

View File

@@ -0,0 +1,23 @@
version: "3"
networks:
gitea:
external: false
services:
gitea:
image: docker.io/gitea/gitea:latest
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
restart: always
networks:
- gitea
volumes:
- /mnt/flash1/podman/programming/config/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "222:22"

24
podman/servarr/.env Executable file
View File

@@ -0,0 +1,24 @@
# General UID/GIU and Timezone
TZ=Berlin/Europe
PUID=1000
PGID=1000
# Input your VPN provider and type here
VPN_SERVICE_PROVIDER=protonvpn
VPN_TYPE=wireguard
#WIREGUARD_ENDPOINT_PORT=51820
#WIREGUARD_ENDPOINT_IP=79.127.184.216
#VPN_DNS_ADDRESS=10.2.0.1
# Copy all these varibles from your generated configuration file
#WIREGUARD_PUBLIC_KEY=snSASVcKZegpITPNw2scm44NBC6NPUropoTkfEGtq18=
WIREGUARD_PRIVATE_KEY=aEdH/9nZ0zcvOhAIfI7N/+RlcFqQuelV8nmN4R50hG4=
#WIREGUARD_ADDRESSES=10.2.0.2/32
SERVER_COUNTRIES=Switzerland
# Heath check duration
HEALTH_VPN_DURATION_INITIAL=120s
PORT_FORWARD_ONLY=on
VPN_PORT_FORWARDING=on
VPN_PORT_FORWARDING_UP_COMMAND=/bin/sh -c 'wget -O- --retry-connrefused --post-data "json={\"listen_port\":{{PORTS}}}" http://127.0.0.1:8099/api/v2/app/setPreferences 2>&1'

178
podman/servarr/docker-compose.yml Executable file
View File

@@ -0,0 +1,178 @@
# Compose file for the *arr stack. Configuration files are stored in the
# directory you launch the compose file on. Change to bind mounts if needed.
# All containers are ran with user and group ids of the main user and
# group to aviod permissions issues of downloaded files, please refer
# the read me file for more information.
#############################################################################
# NOTICE: We recently switched to using a .env file. PLEASE refer to the docs.
# https://github.com/TechHutTV/homelab/tree/main/media#docker-compose-and-env
#############################################################################
networks:
servarrnetwork:
name: servarrnetwork
ipam:
config:
- subnet: 172.39.0.0/24
version: '3.8'
services:
# # airvpn recommended (referral url: https://airvpn.org/?referred_by=673908)
gluetun:
image: ghcr.io/qdm12/gluetun:latest
container_name: gluetun
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun:/dev/net/tun # If running on an LXC see readme for more info.
networks:
servarrnetwork:
ipv4_address: 172.39.0.2
ports:
- 8888:8888/tcp # HTTP proxy
- 8388:8388/tcp # Shadowsocks
- 8388:8388/udp # Shadowsocks
- 8099:8099 # qbittorrent web interface
- 6881:6881 # qbittorrent torrent port
- 6789:6789 # nzbget
- 9696:9696 # prowlarr
# - 9091:9091 #transmission
# - 51413:51413/udp #transmission
# - 51413:51413 #transmission
volumes:
- /mnt/flash1/podman/servarr/config/gluetun:/gluetun
# Make a '.env' file in the same directory.
env_file:
- .env
healthcheck:
test: ping -c 1 www.google.com || exit 1
interval: 20s
timeout: 10s
retries: 5
restart: unless-stopped
qbit:
image: lscr.io/linuxserver/qbittorrent:latest
container_name: qbit
restart: unless-stopped
labels:
- deunhealth.restart.on.unhealthy=true
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
- WEBUI_PORT=8099 # must match "qbittorrent web interface" port number in gluetun's service above
volumes:
- /mnt/flash1/podman/servarr/config/qbittorrent:/config
- /mnt/tank/:/data
- /mnt/tank/downloads/torrent/auto:/auto
depends_on:
gluetun:
condition: service_healthy
restart: true
network_mode: service:gluetun
healthcheck:
test: ping -c 1 www.google.com || exit 1
interval: 60s
retries: 3
start_period: 20s
timeout: 10s
prowlarr:
image: lscr.io/linuxserver/prowlarr:latest
container_name: prowlarr
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- /etc/localtime:/etc/localtime:ro
- /mnt/flash1/podman/servarr/config/prowlarr:/config
restart: unless-stopped
depends_on:
# flaresolverr:
gluetun:
condition: service_healthy
restart: true
network_mode: service:gluetun
flaresolverr:
image: ghcr.io/flaresolverr/flaresolverr:latest
container_name: flaresolverr
environment:
- LOG_LEVEL=info # Can be changed to 'debug' for troubleshooting
- LOG_HTML=false # Set to 'true' if you need HTML logging (warning: verbose)
- CAPTCHA_SOLVER=none # Options: none, hcaptcha-solver
- TZ=${TZ} # Change to your timezone
ports:
- 8191:8191 # FlareSolverr API port
restart: unless-stopped
networks:
servarrnetwork:
ipv4_address: 172.39.0.6
sonarr:
image: lscr.io/linuxserver/sonarr:latest
container_name: sonarr
restart: unless-stopped
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- /etc/localtime:/etc/localtime:ro
- /mnt/flash1/podman/servarr/config/sonarr:/config
- /mnt/tank/:/data:z
ports:
- 8989:8989
networks:
servarrnetwork:
ipv4_address: 172.39.0.5
radarr:
image: lscr.io/linuxserver/radarr:latest
container_name: radarr
restart: unless-stopped
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- /etc/localtime:/etc/localtime:ro
- /mnt/flash1/podman/servarr/config/radarr:/config
- /mnt/tank/:/data
ports:
- 7878:7878
networks:
servarrnetwork:
ipv4_address: 172.39.0.4
# transmission:
# image: ghcr.io/linuxserver/transmission
# container_name: transmission
# environment:
# - PUID=${PUID}
# - PGID=${PGID}
# - TZ=${TZ}
# # ports:
# # - 9091:9091 # UI Port
# # - 51413:51413
# # - 51413:51413/udp
# network_mode: service:gluetun
# volumes:
# - /mnt/ssd2/podman/servarr/config/transmission:/config
# - /mnt/tank/downloads/torrent:/downloads
# - /home/Downloads/Transmission_watch:/watch
# depends_on:
# gluetun:
# condition: service_healthy
# restart: true
# healthcheck:
# test: ping -c 1 www.google.com || exit 1
# interval: 60s
# retries: 3
# start_period: 20s
# timeout: 10s