From 157e9f196e00ba7806b3211b33849328ae978d42 Mon Sep 17 00:00:00 2001 From: yaqi Date: Fri, 5 Jul 2019 11:10:33 +0800 Subject: [PATCH] auto remove the pv with hardware problems --- pkg/deleter/deleter.go | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/pkg/deleter/deleter.go b/pkg/deleter/deleter.go index 2ecdb61b1..58c4be79a 100644 --- a/pkg/deleter/deleter.go +++ b/pkg/deleter/deleter.go @@ -69,9 +69,30 @@ func NewDeleter(config *common.RuntimeConfig, cleanupTracker *CleanupStatusTrack // delete them func (d *Deleter) DeletePVs() { for _, pv := range d.Cache.ListPVs() { - if pv.Status.Phase != v1.VolumeReleased { + + var shouldTryToDelete bool + switch pv.Status.Phase { + // PV which is Released state, provisioner will clear it's data + case v1.VolumeReleased: + shouldTryToDelete = true + // When the pv has a hardware problem, + // 1. If the pv is Bound state, some pod is using it, after migrating + // the affected pod to another node, we can delete pvc, so pv will be + // Released state, similar to above. + // 2. If the pv is Available state, after recycling the hardware, + // provisioner automatically removes the pv both in cluster and + // its cache. + case v1.VolumeAvailable: + _, err := os.Stat(pv.Spec.Local.Path) + if os.IsNotExist(err) { + shouldTryToDelete = true + } + } + + if !shouldTryToDelete { continue } + name := pv.Name switch pv.Spec.PersistentVolumeReclaimPolicy { case v1.PersistentVolumeReclaimRetain: