weitere implementierung
This commit is contained in:
11
k8s/base/database/backup-daily.yaml
Normal file
11
k8s/base/database/backup-daily.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: ScheduledBackup
|
||||
metadata:
|
||||
name: timescale-backup-daily
|
||||
namespace: database
|
||||
spec:
|
||||
schedule: "0 2 * * *" # täglich 02:00
|
||||
backupOwnerReference: self
|
||||
cluster:
|
||||
name: timescale-cluster
|
||||
method: barmanObjectStore
|
||||
12
k8s/base/database/backup-hourly.yaml
Normal file
12
k8s/base/database/backup-hourly.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: ScheduledBackup
|
||||
metadata:
|
||||
name: timescale-backup-hourly
|
||||
namespace: database
|
||||
spec:
|
||||
schedule: "0 * * * *" # jede Stunden
|
||||
backupOwnerReference: self
|
||||
cluster:
|
||||
name: timescale-cluster
|
||||
method: barmanObjectStore
|
||||
|
||||
17
k8s/base/database/backup-monitor.yaml
Normal file
17
k8s/base/database/backup-monitor.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: cnpg-backup-alert
|
||||
namespace: database
|
||||
spec:
|
||||
groups:
|
||||
- name: cnpg.rules
|
||||
rules:
|
||||
- alert: CNPGBackupMissing
|
||||
expr: time() - cnpg_last_backup_time_seconds > 86400
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "CNPG Backup missing"
|
||||
description: "No backup in last 24h"
|
||||
@@ -4,10 +4,51 @@ metadata:
|
||||
name: timescale-cluster
|
||||
spec:
|
||||
instances: 3
|
||||
|
||||
imageName: timescale/timescaledb:2.15.3-pg15
|
||||
|
||||
# ✅ Initiales Setup
|
||||
bootstrap:
|
||||
initdb:
|
||||
database: app
|
||||
owner: app
|
||||
|
||||
# ✅ Storage
|
||||
storage:
|
||||
size: 50Gi
|
||||
|
||||
# ✅ Ressourcen (wichtig!)
|
||||
resources:
|
||||
requests:
|
||||
memory: "2Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
cpu: "2"
|
||||
|
||||
# ✅ High Availability
|
||||
affinity:
|
||||
enablePodAntiAffinity: true
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
# ✅ Monitoring
|
||||
monitoring:
|
||||
enablePodMonitor: true
|
||||
|
||||
# ✅ PostgreSQL / Timescale Tuning
|
||||
postgresql:
|
||||
parameters:
|
||||
max_connections: "100" # mehr Verbindungen 200
|
||||
shared_buffers: "256MB" # Cache im RAM 1GB
|
||||
effective_cache_size: "3GB" # Optimizer hint
|
||||
work_mem: "16MB" # pro Query
|
||||
maintenance_work_mem: "256MB" # für VACUUM/REINDEX
|
||||
wal_buffers: "16MB"
|
||||
checkpoint_completion_target: "0.9"
|
||||
random_page_cost: "1.1" # SSD optimiert
|
||||
effective_io_concurrency: "200"
|
||||
backup:
|
||||
retentionPolicy: "7d" # Backup älter als 7 Tage -> automatisch gelöscht
|
||||
barmanObjectStore:
|
||||
destinationPath: "s3://backups/"
|
||||
endpointURL: "http://minio.minio-system.svc.cluster.local:9000"
|
||||
@@ -18,3 +59,5 @@ spec:
|
||||
secretAccessKey:
|
||||
name: s3-creds
|
||||
key: SECRET_ACCESS_KEY
|
||||
wal:
|
||||
compression: gzip
|
||||
@@ -1,2 +1,9 @@
|
||||
resources:
|
||||
- cluster.yaml
|
||||
- s3-secret.yaml
|
||||
- backup-hourly.yaml
|
||||
- backup-daily.yaml
|
||||
- restore-cronjob.yaml
|
||||
- restore-configmap.yaml
|
||||
|
||||
|
||||
|
||||
33
k8s/base/database/restore-configmap.yaml
Normal file
33
k8s/base/database/restore-configmap.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: restore-test-manifest
|
||||
namespace: database
|
||||
data:
|
||||
restore-test.yaml: |
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: timescale-restore-test
|
||||
spec:
|
||||
instances: 1
|
||||
imageName: timescale/timescaledb:2.15.3-pg15
|
||||
bootstrap:
|
||||
recovery:
|
||||
source: timescale-cluster
|
||||
recoveryTarget:
|
||||
targetImmediate: true
|
||||
storage:
|
||||
size: 10Gi
|
||||
externalClusters:
|
||||
- name: timescale-cluster
|
||||
barmanObjectStore:
|
||||
destinationPath: "s3://backups/"
|
||||
endpointURL: "http://minio.minio-system.svc.cluster.local:9000"
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: s3-creds
|
||||
key: S3_ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: s3-creds
|
||||
key: S3_SECRET_ACCESS_KEY
|
||||
34
k8s/base/database/restore-cronjob.yaml
Normal file
34
k8s/base/database/restore-cronjob.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: timescale-restore-test
|
||||
namespace: database
|
||||
spec:
|
||||
schedule: "0 4 * * *" # täglich 04:00
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: restore-test
|
||||
image: bitnami/kubectl:latest
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
kubectl apply -f /manifests/restore-test.yaml
|
||||
sleep 300
|
||||
|
||||
kubectl get pods -n database
|
||||
|
||||
echo "✅ Restore Test executed"
|
||||
|
||||
kubectl delete cluster timescale-restore-test -n database || true
|
||||
volumeMounts:
|
||||
- name: manifests
|
||||
mountPath: /manifests
|
||||
volumes:
|
||||
- name: manifests
|
||||
configMap:
|
||||
name: restore-test-manifest
|
||||
9
k8s/base/database/s3-secret.yaml
Normal file
9
k8s/base/database/s3-secret.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: s3-creds
|
||||
namespace: database
|
||||
type: Opaque
|
||||
stringData:
|
||||
S3_ACCESS_KEY_ID: admin
|
||||
S3_SECRET_ACCESS_KEY: password123
|
||||
Reference in New Issue
Block a user