Last active
December 24, 2025 01:28
-
-
Save wiesty/07374ada616ef4df21524d8faf9fff5c to your computer and use it in GitHub Desktop.
Unraid Full Backup Script (Docker, MYSQL + PostgresDBs, Vms + Configs)
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/bin/bash | |
| # === KONFIGURATION === | |
| # Pfade | |
| APPDATA_PATH="/mnt/user/appdata" | |
| BACKUP_BASE_PATH="/mnt/user/backup" | |
| POOLBACKUP_PATH="/mnt/user/poolbackup/masterbackup" | |
| POOLBACKUP_VMS_PATH="/mnt/user/poolbackup/vms" | |
| DOCKER_COMPOSE_PATH="/mnt/user/appdata" | |
| VM_DISKS_PATH="/mnt/user/domains" | |
| TEMP_PATH="/mnt/user/data/temp" | |
| BACKUP_DATE=$(date +%Y-%m-%d_%H-%M-%S) | |
| BACKUP_PATH="${BACKUP_BASE_PATH}/${BACKUP_DATE}" | |
| # Webhook URL (Discord, Slack, etc.) | |
| WEBHOOK_URL="URL" | |
| # PostgreSQL Konfiguration | |
| POSTGRES_CONTAINER="postgresql17" | |
| # MySQL Konfiguration | |
| MYSQL_CONTAINER="MySQL" | |
| MYSQL_USER="USER" | |
| MYSQL_PASSWORD="PW" | |
| # Optionen | |
| KEEP_LOCAL_BACKUPS=10 # Anzahl der Backups auf /mnt/user/backup | |
| KEEP_POOL_BACKUPS=180 # Maximale Anzahl der Backups auf Pool | |
| MIN_FREE_SPACE_GB=10 # Mindestens 10GB frei lassen | |
| LOG_DIR="${BACKUP_BASE_PATH}/logs" | |
| LOG_FILE="${LOG_DIR}/backup_${BACKUP_DATE}.log" | |
| # === FUNKTIONEN === | |
| # Webhook Benachrichtigung senden | |
| send_webhook() { | |
| local message="$1" | |
| local timestamp=$(date "+%Y-%m-%d %H:%M:%S") | |
| curl -H "Content-Type: application/json" \ | |
| -X POST \ | |
| -d "{\"content\": \"[${timestamp}] ${message}\"}" \ | |
| "${WEBHOOK_URL}" 2>/dev/null | |
| } | |
| # Logging | |
| log() { | |
| echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "${LOG_FILE}" | |
| } | |
| # Backup-Verzeichnis erstellen | |
| create_backup_dirs() { | |
| log "Erstelle Backup-Verzeichnisse..." | |
| mkdir -p "${BACKUP_PATH}/appdata" | |
| mkdir -p "${BACKUP_PATH}/databases/postgresql" | |
| mkdir -p "${BACKUP_PATH}/databases/mysql" | |
| mkdir -p "${BACKUP_PATH}/docker-configs" | |
| mkdir -p "${BACKUP_PATH}/vm-configs" | |
| mkdir -p "${LOG_DIR}" | |
| mkdir -p "${TEMP_PATH}" | |
| mkdir -p "${POOLBACKUP_VMS_PATH}" | |
| log "✓ Verzeichnisse erstellt" | |
| } | |
| # AppData Backup - jeder Ordner einzeln komprimieren | |
| backup_appdata() { | |
| log "Starte AppData Backup..." | |
| send_webhook "Backup gestartet: AppData wird gesichert..." | |
| local count=0 | |
| local failed=0 | |
| if [ ! -d "${APPDATA_PATH}" ]; then | |
| log "FEHLER: AppData-Pfad ${APPDATA_PATH} existiert nicht!" | |
| send_webhook "✖ FEHLER: AppData-Pfad nicht gefunden!" | |
| return 1 | |
| fi | |
| for folder in "${APPDATA_PATH}"/*; do | |
| if [ -d "$folder" ]; then | |
| local folder_name=$(basename "$folder") | |
| log "Komprimiere: ${folder_name}..." | |
| if tar -czf "${BACKUP_PATH}/appdata/${folder_name}.tar.gz" -C "${APPDATA_PATH}" "${folder_name}" 2>>"${LOG_FILE}"; then | |
| local size=$(du -h "${BACKUP_PATH}/appdata/${folder_name}.tar.gz" | cut -f1) | |
| log "✓ ${folder_name} erfolgreich gesichert (${size})" | |
| ((count++)) | |
| else | |
| log "✗ FEHLER beim Sichern von ${folder_name}" | |
| ((failed++)) | |
| fi | |
| fi | |
| done | |
| log "AppData Backup abgeschlossen: ${count} erfolgreich, ${failed} fehlgeschlagen" | |
| send_webhook "✓ AppData Backup abgeschlossen: ${count} Ordner gesichert, ${failed} Fehler" | |
| } | |
| # PostgreSQL Backup - direkt die Datenbank-Files kopieren | |
| backup_postgresql() { | |
| log "Starte PostgreSQL Backup (File-basiert)..." | |
| send_webhook "PostgreSQL Datenbank-Files werden gesichert..." | |
| # Prüfen ob Container läuft | |
| if ! docker ps --format '{{.Names}}' | grep -q "^${POSTGRES_CONTAINER}$"; then | |
| log "WARNUNG: PostgreSQL Container '${POSTGRES_CONTAINER}' läuft nicht!" | |
| send_webhook "! PostgreSQL Container nicht gefunden!" | |
| return 1 | |
| fi | |
| # Finde das Daten-Volume des Containers | |
| local pg_data_path=$(docker inspect "${POSTGRES_CONTAINER}" --format='{{range .Mounts}}{{if eq .Destination "/var/lib/postgresql/data"}}{{.Source}}{{end}}{{end}}' 2>>/dev/null) | |
| if [ -z "${pg_data_path}" ]; then | |
| log "WARNUNG: PostgreSQL Daten-Volume nicht gefunden, versuche alternativen Pfad..." | |
| # Versuche alternative Mount-Points | |
| pg_data_path=$(docker inspect "${POSTGRES_CONTAINER}" --format='{{range .Mounts}}{{.Source}}:{{.Destination}}|{{end}}' 2>>/dev/null | tr '|' '\n' | grep -i postgres | head -1 | cut -d: -f1) | |
| fi | |
| if [ -n "${pg_data_path}" ] && [ -d "${pg_data_path}" ]; then | |
| log "PostgreSQL Daten gefunden: ${pg_data_path}" | |
| # Komprimiere direkt das komplette Datenverzeichnis | |
| if tar -czf "${BACKUP_PATH}/databases/postgresql/postgresql_data.tar.gz" -C "$(dirname ${pg_data_path})" "$(basename ${pg_data_path})" 2>>"${LOG_FILE}"; then | |
| local size=$(du -h "${BACKUP_PATH}/databases/postgresql/postgresql_data.tar.gz" | cut -f1) | |
| log "✓ PostgreSQL Datenbank-Files erfolgreich gesichert (${size})" | |
| send_webhook "✓ PostgreSQL: Daten gesichert (${size})" | |
| else | |
| log "✗ FEHLER beim Sichern der PostgreSQL Files" | |
| send_webhook "✖ FEHLER beim PostgreSQL Backup" | |
| fi | |
| else | |
| log "✗ PostgreSQL Datenverzeichnis nicht gefunden!" | |
| send_webhook "! PostgreSQL Datenverzeichnis nicht gefunden" | |
| return 1 | |
| fi | |
| } | |
| # Docker-Konfigurationen sichern | |
| backup_docker_configs() { | |
| log "Starte Docker-Konfiguration Backup..." | |
| send_webhook "Docker-Konfigurationen werden gesichert..." | |
| local count=0 | |
| # Containers-Verzeichnis erstellen | |
| mkdir -p "${BACKUP_PATH}/docker-configs/containers" | |
| # 1. Docker-Compose Files finden und sichern (nur relevante, keine node_modules) | |
| log "Suche nach docker-compose Files..." | |
| while IFS= read -r compose_file; do | |
| local relative_path=$(echo "${compose_file}" | sed "s|${DOCKER_COMPOSE_PATH}/||") | |
| local target_dir=$(dirname "${BACKUP_PATH}/docker-configs/compose/${relative_path}") | |
| mkdir -p "${target_dir}" | |
| if cp "${compose_file}" "${target_dir}/" 2>>/dev/null; then | |
| log "✓ Gesichert: ${relative_path}" | |
| ((count++)) | |
| fi | |
| done < <(find "${DOCKER_COMPOSE_PATH}" -type f \( -name "docker-compose.yml" -o -name "docker-compose.yaml" \) \ | |
| ! -path "*/node_modules/*" ! -path "*/.git/*" ! -path "*/.github/*" 2>/dev/null) | |
| # 2. Laufende Container-Konfigurationen exportieren | |
| log "Exportiere Container-Konfigurationen..." | |
| while IFS= read -r container_name; do | |
| log "Exportiere Config: ${container_name}..." | |
| # Docker inspect als JSON speichern | |
| docker inspect "${container_name}" > "${BACKUP_PATH}/docker-configs/containers/${container_name}_inspect.json" 2>>/dev/null | |
| # Docker run command rekonstruieren (vereinfacht) | |
| docker inspect "${container_name}" --format='{{range .Mounts}}--volume {{.Source}}:{{.Destination}} {{end}}' > "${BACKUP_PATH}/docker-configs/containers/${container_name}_volumes.txt" 2>>/dev/null | |
| log "✓ ${container_name} Config gesichert" | |
| ((count++)) | |
| done < <(docker ps --format '{{.Names}}') | |
| # 3. Docker-Compose Files aus laufenden Containern (falls labels vorhanden) | |
| log "Suche nach Container-Labels mit compose info..." | |
| while IFS= read -r container_name; do | |
| local compose_file=$(docker inspect "${container_name}" --format='{{index .Config.Labels "com.docker.compose.project.config_files"}}' 2>/dev/null) | |
| if [ -n "${compose_file}" ] && [ -f "${compose_file}" ]; then | |
| cp "${compose_file}" "${BACKUP_PATH}/docker-configs/compose/" 2>>/dev/null | |
| fi | |
| done < <(docker ps --format '{{.Names}}') | |
| # 4. Unraid Template XMLs (falls vorhanden) | |
| if [ -d "/boot/config/plugins/dockerMan/templates-user" ]; then | |
| log "Sichere Unraid Docker Templates..." | |
| cp -r /boot/config/plugins/dockerMan/templates-user "${BACKUP_PATH}/docker-configs/unraid-templates" 2>>/dev/null | |
| fi | |
| log "Docker-Config Backup abgeschlossen: ${count} Einträge gesichert" | |
| send_webhook "✓ Docker-Configs: ${count} Einträge gesichert" | |
| } | |
| # MySQL Backup - SQL-Dumps erstellen | |
| backup_mysql() { | |
| log "Starte MySQL Backup (SQL-Dump)..." | |
| send_webhook "MySQL Datenbanken werden gesichert..." | |
| # Prüfen ob Container läuft | |
| if ! docker ps --format '{{.Names}}' | grep -q "^${MYSQL_CONTAINER}$"; then | |
| log "WARNUNG: MySQL Container '${MYSQL_CONTAINER}' läuft nicht!" | |
| send_webhook "! MySQL Container nicht gefunden!" | |
| return 1 | |
| fi | |
| # Alle Datenbanken auflisten (außer System-DBs) | |
| local databases=$(docker exec "${MYSQL_CONTAINER}" \ | |
| mysql -u"${MYSQL_USER}" -p"${MYSQL_PASSWORD}" -e "SHOW DATABASES;" 2>>"${LOG_FILE}" | \ | |
| grep -Ev "^(Database|information_schema|performance_schema|mysql|sys)$") | |
| if [ -z "$databases" ]; then | |
| log "Keine MySQL Datenbanken gefunden" | |
| return 0 | |
| fi | |
| local count=0 | |
| for db in $databases; do | |
| log "Sichere MySQL DB: ${db}..." | |
| if docker exec "${MYSQL_CONTAINER}" \ | |
| mysqldump -u"${MYSQL_USER}" -p"${MYSQL_PASSWORD}" --single-transaction --quick "${db}" \ | |
| > "${BACKUP_PATH}/databases/mysql/${db}.sql" 2>>"${LOG_FILE}"; then | |
| # Komprimieren | |
| gzip "${BACKUP_PATH}/databases/mysql/${db}.sql" | |
| local size=$(du -h "${BACKUP_PATH}/databases/mysql/${db}.sql.gz" | cut -f1) | |
| log "✓ ${db} erfolgreich gesichert (${size})" | |
| ((count++)) | |
| else | |
| log "✗ FEHLER beim Sichern von MySQL DB: ${db}" | |
| fi | |
| done | |
| log "MySQL Backup abgeschlossen: ${count} Datenbanken gesichert" | |
| send_webhook "✓ MySQL: ${count} Datenbanken gesichert" | |
| } | |
| # VM-Konfigurationen sichern | |
| backup_vm_configs() { | |
| log "Starte VM-Konfiguration Backup..." | |
| send_webhook "VM-Konfigurationen werden gesichert..." | |
| local count=0 | |
| # 1. Libvirt XML Configs sichern | |
| if command -v virsh &> /dev/null; then | |
| log "Exportiere VM-Definitionen via virsh..." | |
| while IFS= read -r vm_name; do | |
| if [ -n "${vm_name}" ]; then | |
| log "Sichere VM Config: ${vm_name}..." | |
| if virsh dumpxml "${vm_name}" > "${BACKUP_PATH}/vm-configs/${vm_name}.xml" 2>>"${LOG_FILE}"; then | |
| log "✓ ${vm_name}.xml gesichert" | |
| ((count++)) | |
| fi | |
| fi | |
| done < <(virsh list --all --name 2>/dev/null) | |
| fi | |
| # 2. OVMF NVRAM Files sichern (UEFI VMs) | |
| if [ -d "/etc/libvirt/qemu/nvram" ]; then | |
| log "Sichere OVMF NVRAM Files..." | |
| cp -r /etc/libvirt/qemu/nvram "${BACKUP_PATH}/vm-configs/" 2>>"${LOG_FILE}" | |
| fi | |
| # 3. VM Manager Configs (falls vorhanden) | |
| if [ -d "/boot/config/domain.cfg" ]; then | |
| log "Sichere Unraid VM Manager Configs..." | |
| cp /boot/config/domain.cfg "${BACKUP_PATH}/vm-configs/" 2>>"${LOG_FILE}" | |
| fi | |
| log "VM-Config Backup abgeschlossen: ${count} VMs gesichert" | |
| send_webhook "✓ VM-Configs: ${count} VMs gesichert" | |
| } | |
| # VM-Disks sichern (separat auf Pool) | |
| backup_vm_disks() { | |
| log "Starte VM-Disk Backup (auf Pool)..." | |
| send_webhook "VM-Disks werden auf Pool gesichert..." | |
| if [ ! -d "${VM_DISKS_PATH}" ]; then | |
| log "WARNUNG: VM-Disks-Pfad ${VM_DISKS_PATH} existiert nicht!" | |
| send_webhook "! VM-Disks-Pfad nicht gefunden!" | |
| return 1 | |
| fi | |
| # Berechne zuerst die Gesamtgröße aller VM-Disks | |
| log "Berechne benötigte Größe für VM-Disks..." | |
| # Finde alle VM-Disks | |
| local total_vm_size_gb=0 | |
| local disk_count=0 | |
| while IFS= read -r disk_file; do | |
| if [ -f "${disk_file}" ]; then | |
| local disk_size_gb=$(du -b "${disk_file}" 2>/dev/null | awk '{printf "%.2f\n", $1/1024/1024/1024}') | |
| total_vm_size_gb=$(awk "BEGIN {printf \"%.2f\", ${total_vm_size_gb} + ${disk_size_gb}}") | |
| ((disk_count++)) | |
| log " Gefunden: ${disk_file} (${disk_size_gb}GB)" | |
| fi | |
| done < <(find "${VM_DISKS_PATH}" -type f \( -name "*.img" -o -name "*.qcow2" -o -name "*.raw" -o -name "*.vdi" -o -name "*.vmdk" \) 2>/dev/null) | |
| if [ ${disk_count} -eq 0 ]; then | |
| log "Keine VM-Disks gefunden in ${VM_DISKS_PATH}" | |
| send_webhook "⚠️ Keine VM-Disks gefunden" | |
| return 0 | |
| fi | |
| log "VM-Disks Gesamtgröße: ${total_vm_size_gb}GB (${disk_count} Disks gefunden)" | |
| # Prüfe Speicherplatz und räume ggf. auf | |
| local free_space=$(get_free_space_gb "${POOLBACKUP_VMS_PATH}") | |
| local required_space=$(echo "${total_vm_size_gb} ${MIN_FREE_SPACE_GB}" | awk '{printf "%.2f\n", $1 + $2}') | |
| log "Prüfe Speicherplatz für VM-Disks: ${free_space}GB verfügbar, ${required_space}GB benötigt (${total_vm_size_gb}GB Disks + ${MIN_FREE_SPACE_GB}GB Puffer)" | |
| # Vergleiche als Integer (konvertiere zu MB für genauere Vergleiche) | |
| local free_mb=$(echo "${free_space}" | awk '{printf "%.0f\n", $1 * 1024}') | |
| local required_mb=$(echo "${required_space}" | awk '{printf "%.0f\n", $1 * 1024}') | |
| if [ ${free_mb} -lt ${required_mb} ]; then | |
| log "Nicht genug Speicher (${free_mb}MB < ${required_mb}MB), räume alte VM-Disk Backups auf..." | |
| cleanup_vm_disk_backups "${required_space}" | |
| else | |
| log "✓ Genügend Speicherplatz vorhanden (${free_mb}MB >= ${required_mb}MB)" | |
| fi | |
| local count=0 | |
| local total_size=0 | |
| local vm_backup_path="${POOLBACKUP_VMS_PATH}/${BACKUP_DATE}" | |
| mkdir -p "${vm_backup_path}" | |
| # Finde alle VM-Disk Images | |
| while IFS= read -r disk_file; do | |
| local vm_name=$(basename $(dirname "${disk_file}")) | |
| local disk_name=$(basename "${disk_file}") | |
| local disk_size=$(du -h "${disk_file}" | cut -f1) | |
| log "Sichere VM-Disk: ${vm_name}/${disk_name} (${disk_size})..." | |
| send_webhook " Sichere VM-Disk: ${vm_name}/${disk_name} (${disk_size})..." | |
| # Erstelle VM-spezifisches Verzeichnis | |
| mkdir -p "${vm_backup_path}/${vm_name}" | |
| # Kopiere über Temp (SSD) für bessere Performance | |
| local temp_file="${TEMP_PATH}/${disk_name}" | |
| if cp "${disk_file}" "${temp_file}" 2>>"${LOG_FILE}"; then | |
| log "✓ Zu Temp kopiert, verschiebe zu Pool..." | |
| if mv "${temp_file}" "${vm_backup_path}/${vm_name}/" 2>>"${LOG_FILE}"; then | |
| log "✓ ${vm_name}/${disk_name} erfolgreich auf Pool gesichert" | |
| ((count++)) | |
| else | |
| log "✗ FEHLER beim Verschieben zu Pool" | |
| rm -f "${temp_file}" 2>/dev/null | |
| fi | |
| else | |
| log "✗ FEHLER beim Kopieren von ${disk_name}" | |
| fi | |
| done < <(find "${VM_DISKS_PATH}" -type f \( -name "*.img" -o -name "*.qcow2" -o -name "*.raw" -o -name "*.vdi" -o -name "*.vmdk" \) 2>/dev/null) | |
| # Finale Cleanup: Maximal 5 VM-Disk Backups behalten | |
| cleanup_vm_disk_backups | |
| local final_size=$(du -sh "${vm_backup_path}" 2>/dev/null | cut -f1) | |
| log "VM-Disk Backup abgeschlossen: ${count} Disks gesichert (${final_size})" | |
| send_webhook "✓ VM-Disks: ${count} Disks auf Pool gesichert (${final_size})" | |
| } | |
| # VM-Disk Backups aufräumen | |
| cleanup_vm_disk_backups() { | |
| local required_space="${1:-0}" | |
| log "Räume VM-Disk Backups auf..." | |
| local vm_backup_count=$(ls -1dt "${POOLBACKUP_VMS_PATH}"/[0-9]*/ 2>/dev/null | wc -l) | |
| if [ ${vm_backup_count} -gt 5 ]; then | |
| log "Lösche alte VM-Disk Backups (${vm_backup_count} > 5)..." | |
| ls -1dt "${POOLBACKUP_VMS_PATH}"/[0-9]*/ | tail -n +6 | while read old_vm_backup; do | |
| local backup_size=$(du -sh "${old_vm_backup}" | cut -f1) | |
| log "Lösche altes VM-Disk Backup: $(basename ${old_vm_backup}) (${backup_size})" | |
| rm -rf "${old_vm_backup}" | |
| done | |
| fi | |
| # Wenn Speicherplatz angegeben, prüfe ob genug frei ist | |
| if [ "${required_space}" != "0" ]; then | |
| local free_space=$(get_free_space_gb "${POOLBACKUP_VMS_PATH}") | |
| local free_mb=$(echo "${free_space}" | awk '{printf "%.0f\n", $1 * 1024}') | |
| local required_mb=$(echo "${required_space}" | awk '{printf "%.0f\n", $1 * 1024}') | |
| # Lösche weitere alte Backups falls nötig | |
| while [ ${free_mb} -lt ${required_mb} ]; do | |
| local oldest=$(ls -1dt "${POOLBACKUP_VMS_PATH}"/[0-9]*/ 2>/dev/null | tail -1) | |
| if [ -z "${oldest}" ]; then | |
| log "! Keine weiteren VM-Disk Backups zum Löschen" | |
| return 1 | |
| fi | |
| local backup_size=$(du -sh "${oldest}" | cut -f1) | |
| log "Lösche VM-Disk Backup für Speicherplatz: $(basename ${oldest}) (${backup_size})" | |
| rm -rf "${oldest}" | |
| free_space=$(get_free_space_gb "${POOLBACKUP_VMS_PATH}") | |
| free_mb=$(echo "${free_space}" | awk '{printf "%.0f\n", $1 * 1024}') | |
| log "Neuer freier Speicher: ${free_space}GB (${free_mb}MB)" | |
| done | |
| log "✓ Genügend Speicher freigeräumt: ${free_space}GB verfügbar" | |
| fi | |
| } | |
| # Speicherplatz prüfen und ggf. alte Backups löschen | |
| get_free_space_gb() { | |
| local path="$1" | |
| df -BG "${path}" | awk 'NR==2 {print $4}' | sed 's/G//' | |
| } | |
| get_dir_size_gb() { | |
| local path="$1" | |
| if [ -e "${path}" ]; then | |
| du -sb "${path}" | awk '{printf "%.2f\n", $1/1024/1024/1024}' | |
| else | |
| echo "0" | |
| fi | |
| } | |
| # Lokale Backups aufräumen (auf /mnt/user/backup) | |
| cleanup_local_backups() { | |
| log "Räume lokale Backups auf (behalte die letzten ${KEEP_LOCAL_BACKUPS})..." | |
| # Backup-Verzeichnisse löschen | |
| local backup_count=$(ls -1dt "${BACKUP_BASE_PATH}"/[0-9]*/ 2>/dev/null | wc -l) | |
| if [ ${backup_count} -gt ${KEEP_LOCAL_BACKUPS} ]; then | |
| ls -1dt "${BACKUP_BASE_PATH}"/[0-9]*/ | tail -n +$((KEEP_LOCAL_BACKUPS + 1)) | while read old_backup; do | |
| log "Lösche lokales Backup-Verzeichnis: $(basename ${old_backup})" | |
| rm -rf "${old_backup}" | |
| done | |
| log "✓ Alte lokale Backup-Verzeichnisse gelöscht" | |
| else | |
| log "Keine alten lokalen Backup-Verzeichnisse zu löschen (${backup_count}/${KEEP_LOCAL_BACKUPS})" | |
| fi | |
| # Master-Archive löschen (gleiche Anzahl behalten) | |
| local archive_count=$(ls -1t "${BACKUP_BASE_PATH}"/master_*.tar.gz 2>/dev/null | wc -l) | |
| if [ ${archive_count} -gt ${KEEP_LOCAL_BACKUPS} ]; then | |
| ls -1t "${BACKUP_BASE_PATH}"/master_*.tar.gz | tail -n +$((KEEP_LOCAL_BACKUPS + 1)) | while read old_archive; do | |
| log "Lösche lokales Master-Archiv: $(basename ${old_archive})" | |
| rm -f "${old_archive}" | |
| done | |
| log "✓ Alte lokale Master-Archive gelöscht" | |
| else | |
| log "Keine alten lokalen Master-Archive zu löschen (${archive_count}/${KEEP_LOCAL_BACKUPS})" | |
| fi | |
| } | |
| # Pool-Backups aufräumen basierend auf verfügbarem Speicher | |
| cleanup_pool_backups() { | |
| local required_space_gb="$1" | |
| log "Prüfe Speicherplatz auf Pool-Backup..." | |
| # Prüfe erst ob Maximum erreicht ist | |
| local current_pool_count=$(ls -1 "${POOLBACKUP_PATH}"/master_*.tar.gz 2>/dev/null | wc -l) | |
| if [ ${current_pool_count} -ge ${KEEP_POOL_BACKUPS} ]; then | |
| log "Maximale Anzahl Pool-Backups erreicht (${current_pool_count}/${KEEP_POOL_BACKUPS}), lösche älteste..." | |
| local to_delete=$((current_pool_count - KEEP_POOL_BACKUPS + 1)) | |
| ls -1t "${POOLBACKUP_PATH}"/master_*.tar.gz | tail -n ${to_delete} | while read old_backup; do | |
| log "Lösche Pool-Backup (Maximum erreicht): $(basename ${old_backup})" | |
| rm -f "${old_backup}" | |
| done | |
| fi | |
| local free_space=$(get_free_space_gb "${POOLBACKUP_PATH}") | |
| # Verwende awk statt bc für Berechnungen | |
| local needed_space=$(awk "BEGIN {printf \"%.0f\", ${required_space_gb} + ${MIN_FREE_SPACE_GB}}") | |
| log "Verfügbarer Speicher: ${free_space}GB, Benötigt: ${needed_space}GB" | |
| if [ ${free_space} -ge ${needed_space} ]; then | |
| log "✓ Genügend Speicherplatz vorhanden" | |
| return 0 | |
| fi | |
| log "! Nicht genug Speicher, lösche alte Pool-Backups..." | |
| send_webhook "! Räume Speicherplatz frei (benötigt: ${needed_space}GB, verfügbar: ${free_space}GB)..." | |
| # Lösche alte Backups bis genug Platz vorhanden ist | |
| ls -1t "${POOLBACKUP_PATH}"/master_*.tar.gz 2>/dev/null | while read old_backup; do | |
| free_space=$(get_free_space_gb "${POOLBACKUP_PATH}") | |
| if [ ${free_space} -ge ${needed_space} ]; then | |
| log "✓ Genügend Speicher freigeräumt" | |
| break | |
| fi | |
| local backup_size=$(du -h "${old_backup}" | cut -f1) | |
| log "Lösche Pool-Backup: $(basename ${old_backup}) (${backup_size})" | |
| rm -f "${old_backup}" | |
| done | |
| # Finale Prüfung | |
| free_space=$(get_free_space_gb "${POOLBACKUP_PATH}") | |
| if [ ${free_space} -ge ${needed_space} ]; then | |
| log "✓ Speicherplatz erfolgreich freigeräumt" | |
| return 0 | |
| else | |
| log "✖ FEHLER: Nicht genug Speicher freiräumbar!" | |
| send_webhook "✖ FEHLER: Nicht genug Speicherplatz verfügbar!" | |
| return 1 | |
| fi | |
| } | |
| # Master-Backup erstellen und zum Pool kopieren | |
| create_master_backup() { | |
| log "Erstelle Master-Backup-Archiv..." | |
| send_webhook "Erstelle Master-Backup-Archiv..." | |
| local master_filename="master_${BACKUP_DATE}.tar.gz" | |
| local master_path="${BACKUP_BASE_PATH}/${master_filename}" | |
| # Speichere Statistiken BEVOR das Verzeichnis gelöscht wird | |
| export BACKUP_SIZE=$(du -sh "${BACKUP_PATH}" 2>/dev/null | cut -f1) | |
| export APPDATA_COUNT=$(ls -1 "${BACKUP_PATH}/appdata/" 2>/dev/null | wc -l) | |
| export PG_COUNT=$(ls -1 "${BACKUP_PATH}/databases/postgresql/" 2>/dev/null | wc -l) | |
| export MYSQL_COUNT=$(ls -1 "${BACKUP_PATH}/databases/mysql/" 2>/dev/null | wc -l) | |
| export DOCKER_COUNT=$(find "${BACKUP_PATH}/docker-configs/" -type f 2>/dev/null | wc -l) | |
| export VM_CONFIG_COUNT=$(ls -1 "${BACKUP_PATH}/vm-configs/"*.xml 2>/dev/null | wc -l) | |
| # Komplettes Backup-Verzeichnis komprimieren | |
| if tar -czf "${master_path}" -C "${BACKUP_BASE_PATH}" "${BACKUP_DATE}" 2>&1 | tee -a "${LOG_FILE}"; then | |
| local master_size=$(du -h "${master_path}" | cut -f1) | |
| log "✓ Master-Backup erstellt: ${master_filename} (${master_size})" | |
| # Größe in GB für Speicherprüfung (warte kurz bis Datei vollständig geschrieben ist) | |
| sleep 1 | |
| local master_size_gb=$(du -b "${master_path}" | awk '{printf "%.2f\n", $1/1024/1024/1024}') | |
| log "Master-Backup Größe: ${master_size_gb}GB" | |
| # Prüfe ob Pool-Verzeichnis existiert | |
| mkdir -p "${POOLBACKUP_PATH}" | |
| # Prüfe Speicherplatz und räume ggf. auf | |
| if cleanup_pool_backups "${master_size_gb}"; then | |
| log "Kopiere Master-Backup zum Pool..." | |
| send_webhook "Kopiere Master-Backup zum Pool..." | |
| if cp "${master_path}" "${POOLBACKUP_PATH}/"; then | |
| log "✓ Master-Backup erfolgreich zum Pool kopiert" | |
| send_webhook "✓ Master-Backup auf Pool gesichert (${master_size})" | |
| else | |
| log "✗ FEHLER beim Kopieren zum Pool" | |
| send_webhook "✖ FEHLER beim Kopieren zum Pool" | |
| fi | |
| else | |
| log "✗ Überspringe Pool-Backup wegen Speichermangel" | |
| fi | |
| # Master-Archiv lokal BEHALTEN für doppelte Sicherheit | |
| log "✓ Master-Archiv lokal gespeichert (doppelte Sicherung)" | |
| # Unkomprimiertes Backup-Verzeichnis löschen (nur Master-Archiv behalten) | |
| log "Lösche unkomprimiertes Backup-Verzeichnis..." | |
| rm -rf "${BACKUP_PATH}" | |
| log "✓ Unkomprimiertes Verzeichnis gelöscht, nur Master-Archiv behalten" | |
| else | |
| log "✗ FEHLER beim Erstellen des Master-Backups" | |
| send_webhook "✖ FEHLER beim Erstellen des Master-Backups" | |
| return 1 | |
| fi | |
| } | |
| # Backup-Statistiken | |
| show_statistics() { | |
| # Verwende die exportierten Werte (gesetzt vor dem Löschen des Backup-Pfads) | |
| local backup_size="${BACKUP_SIZE:-N/A}" | |
| local appdata_count="${APPDATA_COUNT:-0}" | |
| local pg_count="${PG_COUNT:-0}" | |
| local mysql_count="${MYSQL_COUNT:-0}" | |
| local docker_count="${DOCKER_COUNT:-0}" | |
| local vm_config_count="${VM_CONFIG_COUNT:-0}" | |
| local local_backup_count=$(ls -1dt "${BACKUP_BASE_PATH}"/[0-9]*/ 2>/dev/null | wc -l) | |
| local local_master_count=$(ls -1 "${BACKUP_BASE_PATH}"/master_*.tar.gz 2>/dev/null | wc -l) | |
| local pool_backup_count=$(ls -1 "${POOLBACKUP_PATH}"/master_*.tar.gz 2>/dev/null | wc -l) | |
| local vm_disk_backup_count=$(ls -1dt "${POOLBACKUP_VMS_PATH}"/[0-9]*/ 2>/dev/null | wc -l) | |
| local free_local=$(get_free_space_gb "${BACKUP_BASE_PATH}") | |
| local free_pool=$(get_free_space_gb "${POOLBACKUP_PATH}") | |
| log "=== BACKUP STATISTIKEN ===" | |
| log "Backup-Pfad: ${BACKUP_PATH} (gelöscht, nur Master-Archiv behalten)" | |
| log "Lokale Größe: ${backup_size}" | |
| log "AppData Ordner: ${appdata_count}" | |
| log "Docker Configs: ${docker_count}" | |
| log "VM Configs: ${vm_config_count}" | |
| log "PostgreSQL DBs: ${pg_count}" | |
| log "MySQL DBs: ${mysql_count}" | |
| log "---" | |
| log "Lokale Verzeichnisse: ${local_backup_count}" | |
| log "Lokale Master-Archive: ${local_master_count}" | |
| log "Pool Master-Archive: ${pool_backup_count}" | |
| log "Pool VM-Disk Backups: ${vm_disk_backup_count}" | |
| log "Freier Speicher (lokal): ${free_local}GB" | |
| log "Freier Speicher (pool): ${free_pool}GB" | |
| log "=========================" | |
| send_webhook "✓ Backup komplett!\n• Größe: ${backup_size}\n• AppData: ${appdata_count} | Docker: ${docker_count} | VMs: ${vm_config_count}\n• PG: ${pg_count} | MySQL: ${mysql_count}\n• Lokal: ${local_backup_count} Verz. + ${local_master_count} Archive (${free_local}GB)\n• Pool: ${pool_backup_count} Archive + ${vm_disk_backup_count} VM-Disk Backups (${free_pool}GB)" | |
| } | |
| # === HAUPTPROGRAMM === | |
| main() { | |
| log "================================================" | |
| log "Starte Unraid Backup" | |
| log "================================================" | |
| send_webhook "Unraid Backup gestartet" | |
| # Backup-Verzeichnisse erstellen | |
| create_backup_dirs | |
| # AppData sichern | |
| backup_appdata | |
| # Docker-Konfigurationen sichern | |
| backup_docker_configs | |
| # Datenbanken sichern | |
| backup_postgresql | |
| backup_mysql | |
| # VM-Konfigurationen sichern (ins Master-Backup) | |
| backup_vm_configs | |
| # Master-Backup erstellen und zum Pool kopieren | |
| create_master_backup | |
| # VM-Disks separat auf Pool sichern (nach Master-Backup) | |
| backup_vm_disks | |
| # Lokale Backups aufräumen | |
| cleanup_local_backups | |
| # Statistiken anzeigen | |
| show_statistics | |
| log "================================================" | |
| log "Backup erfolgreich abgeschlossen!" | |
| log "================================================" | |
| } | |
| # Script ausführen | |
| main | |
| exit 0 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment