weitere implementierung

This commit is contained in:
Hoang Nguyen
2026-05-06 11:19:18 +02:00
parent d8d4d1cbe9
commit aca2d6be33
18 changed files with 323 additions and 11 deletions

View File

@@ -0,0 +1,11 @@
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: timescale-backup-daily
namespace: database
spec:
schedule: "0 2 * * *" # täglich 02:00
backupOwnerReference: self
cluster:
name: timescale-cluster
method: barmanObjectStore

View File

@@ -0,0 +1,12 @@
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: timescale-backup-hourly
namespace: database
spec:
schedule: "0 * * * *" # jede Stunden
backupOwnerReference: self
cluster:
name: timescale-cluster
method: barmanObjectStore

View File

@@ -0,0 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: cnpg-backup-alert
namespace: database
spec:
groups:
- name: cnpg.rules
rules:
- alert: CNPGBackupMissing
expr: time() - cnpg_last_backup_time_seconds > 86400
for: 10m
labels:
severity: critical
annotations:
summary: "CNPG Backup missing"
description: "No backup in last 24h"

View File

@@ -4,10 +4,51 @@ metadata:
name: timescale-cluster
spec:
instances: 3
imageName: timescale/timescaledb:2.15.3-pg15
# ✅ Initiales Setup
bootstrap:
initdb:
database: app
owner: app
# ✅ Storage
storage:
size: 50Gi
# ✅ Ressourcen (wichtig!)
resources:
requests:
memory: "2Gi"
cpu: "500m"
limits:
memory: "4Gi"
cpu: "2"
# ✅ High Availability
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
# ✅ Monitoring
monitoring:
enablePodMonitor: true
# ✅ PostgreSQL / Timescale Tuning
postgresql:
parameters:
max_connections: "100" # mehr Verbindungen 200
shared_buffers: "256MB" # Cache im RAM 1GB
effective_cache_size: "3GB" # Optimizer hint
work_mem: "16MB" # pro Query
maintenance_work_mem: "256MB" # für VACUUM/REINDEX
wal_buffers: "16MB"
checkpoint_completion_target: "0.9"
random_page_cost: "1.1" # SSD optimiert
effective_io_concurrency: "200"
backup:
retentionPolicy: "7d" # Backup älter als 7 Tage -> automatisch gelöscht
barmanObjectStore:
destinationPath: "s3://backups/"
endpointURL: "http://minio.minio-system.svc.cluster.local:9000"
@@ -18,3 +59,5 @@ spec:
secretAccessKey:
name: s3-creds
key: SECRET_ACCESS_KEY
wal:
compression: gzip

View File

@@ -1,2 +1,9 @@
resources:
- cluster.yaml
- s3-secret.yaml
- backup-hourly.yaml
- backup-daily.yaml
- restore-cronjob.yaml
- restore-configmap.yaml

View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: restore-test-manifest
namespace: database
data:
restore-test.yaml: |
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: timescale-restore-test
spec:
instances: 1
imageName: timescale/timescaledb:2.15.3-pg15
bootstrap:
recovery:
source: timescale-cluster
recoveryTarget:
targetImmediate: true
storage:
size: 10Gi
externalClusters:
- name: timescale-cluster
barmanObjectStore:
destinationPath: "s3://backups/"
endpointURL: "http://minio.minio-system.svc.cluster.local:9000"
s3Credentials:
accessKeyId:
name: s3-creds
key: S3_ACCESS_KEY_ID
secretAccessKey:
name: s3-creds
key: S3_SECRET_ACCESS_KEY

View File

@@ -0,0 +1,34 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: timescale-restore-test
namespace: database
spec:
schedule: "0 4 * * *" # täglich 04:00
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: restore-test
image: bitnami/kubectl:latest
command:
- /bin/sh
- -c
- |
kubectl apply -f /manifests/restore-test.yaml
sleep 300
kubectl get pods -n database
echo "✅ Restore Test executed"
kubectl delete cluster timescale-restore-test -n database || true
volumeMounts:
- name: manifests
mountPath: /manifests
volumes:
- name: manifests
configMap:
name: restore-test-manifest

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: s3-creds
namespace: database
type: Opaque
stringData:
S3_ACCESS_KEY_ID: admin
S3_SECRET_ACCESS_KEY: password123