Чтобы уменьшить размер тома сервиса Метрики без потери данных:
sudo k0s kubectl get pvc metrics -n kuma -o json | jq '.spec.volumeName'
sudo k0s kubectl scale deployment/metrics -n kuma --replicas=0
sudo k0s kubectl get pods -n kuma
sudo k0s kubectl delete pod metrics-<UID> -n kuma --force
sudo k0s kubectl get volume <имя_тома> -n longhorn-system -o json | jq '.status.state'
sudo k0s kubectl apply -f - <<EOF
---
apiVersion: batch/v1
kind: Job
metadata:
name: tmp-metrics-job
namespace: kuma
spec:
ttlSecondsAfterFinished: 300
backoffLimit: 0
parallelism: 1
completions: 1
template:
spec:
containers:
- name: tmp-container
image: docker.io/library/busybox:1.35.0
imagePullPolicy: IfNotPresent
command: ["/bin/sh"]
args:
- -c
- >-
sleep 36000
volumeMounts:
- mountPath: /metrics
name: metrics
restartPolicy: Never
volumes:
- name: metrics
persistentVolumeClaim:
claimName: metrics
EOF
sudo k0s kubectl get pods -n kuma -o wide | grep tmp-metrics
sudo lsblk | grep "<имя_тома>"
ls -al
total 48
drwxrwsr-x 9 root 65532 4096 Aug 26 10:36 .
drwxr-x--- 3 root root 4096 Aug 26 11:08 ..
drwx--S--- 2 65532 65532 4096 Aug 26 10:36 certificates
drwx--S--- 2 65532 65532 4096 Aug 26 10:36 dashboards
drwxrwsr-x 8 root 65532 4096 Aug 26 10:34 data
drwx--S--- 2 65532 65532 4096 Aug 26 10:36 log
drwxrws--- 2 root 65532 16384 Aug 26 10:34 lost+found
drwx--S--- 2 65532 65532 4096 Aug 26 10:36 rules
drwx--S--- 2 65532 65532 4096 Aug 26 10:36 tmp
sudo k0s kubectl delete job tmp-metrics-job -n kuma
sudo k0s kubectl get pods -n kuma
sudo k0s kubectl delete pod tmp-metrics-job-<UID> -n kuma --force
sudo k0s kubectl get volume <имя_тома> -n longhorn-system -o json | jq '.status.state'
sudo k0s kubectl delete pvc metrics -n kuma
sudo k0s kubectl get volume <имя_тома> -n longhorn-system
На всех рабочих узлах при этом в директории /opt/longhorn/replicas должны пропасть директории, имя которых начинается с имени удалённого тома. Если этого не произошло, то их нужно удалить вручную. Важно не удалить по ошибке реплики, относящиеся к другим томам.
sudo k0s kubectl apply -f - <<EOF
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: metrics
namespace: kuma
spec:
accessModes:
- ReadWriteOnce
storageClassName: kuma-network-storage
resources:
requests:
storage: 30Gi
EOF
sudo k0s kubectl get pvc metrics -n kuma -o json | jq '.spec.volumeName'
sudo k0s kubectl apply -f - <<EOF
---
apiVersion: batch/v1
kind: Job
metadata:
name: tmp-metrics-job
namespace: kuma
spec:
ttlSecondsAfterFinished: 300
backoffLimit: 0
parallelism: 1
completions: 1
template:
spec:
containers:
- name: tmp-container
image: docker.io/library/busybox:1.35.0
imagePullPolicy: IfNotPresent
command: ["/bin/sh"]
args:
- -c
- >-
sleep 36000
volumeMounts:
- mountPath: /metrics
name: metrics
restartPolicy: Never
volumes:
- name: metrics
persistentVolumeClaim:
claimName: metrics
EOF
sudo k0s kubectl get pods -n kuma -o wide | grep tmp-metrics
sudo lsblk | grep "<имя_тома>"
ls -al
total 48
drwxrwsr-x 9 root 65532 4096 Aug 26 10:36 .
drwxr-x--- 3 root root 4096 Aug 26 11:08 ..
drwx--S--- 2 65532 65532 4096 Aug 26 10:36 certificates
drwx--S--- 2 65532 65532 4096 Aug 26 10:36 dashboards
drwxrwsr-x 8 root 65532 4096 Aug 26 10:34 data
drwx--S--- 2 65532 65532 4096 Aug 26 10:36 log
drwxrws--- 2 root 65532 16384 Aug 26 10:34 lost+found
drwx--S--- 2 65532 65532 4096 Aug 26 10:36 rules
drwx--S--- 2 65532 65532 4096 Aug 26 10:36 tmp
sudo k0s kubectl delete job tmp-metrics-job -n kuma
sudo k0s kubectl get pods -n kuma
sudo k0s kubectl delete pod tmp-metrics-job-<UID> -n kuma --force
sudo k0s kubectl get volume <имя_тома> -n longhorn-system -o json | jq '.status.state'
sudo k0s kubectl scale deployment/metrics -n kuma --replicas=1
Уменьшение размера тома сервиса Метрики без потери данных выполнено.
В начало