From e29aa22362178cdca064b64d95f29dd13bdc2125 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Wed, 24 Jan 2018 17:52:41 +0530 Subject: [PATCH] UPSTREAM: 56315: Record volumeID in GlusterFS PV spec UPSTREAM: 56823: Add volID based delete() and resize() if volID is available in pv spec UPSTREAM: 57516: Add custom volume name based on SC parameter UPSTREAM: 58513: Add Namespace to glusterfs custom volume names UPSTREAM: 58626: Use correct pv annotation to fetch volume ID Signed-off-by: Humble Chirammal --- .../pkg/volume/glusterfs/glusterfs.go | 97 ++++++++++++++----- 1 file changed, 73 insertions(+), 24 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go index fef3bf6f1172..cfeec376514f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go @@ -36,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/util/mount" @@ -84,6 +85,7 @@ const ( heketiAnn = "heketi-dynamic-provisioner" glusterTypeAnn = "gluster.org/type" glusterDescAnn = "Gluster-Internal: Dynamically provisioned PV" + heketiVolIDAnn = "gluster.kubernetes.io/heketi-volume-id" ) func (plugin *glusterfsPlugin) Init(host volume.VolumeHost) error { @@ -404,17 +406,18 @@ func (plugin *glusterfsPlugin) newProvisionerInternal(options volume.VolumeOptio } type provisionerConfig struct { - url string - user string - userKey string - secretNamespace string - secretName string - secretValue string - clusterID string - gidMin int - gidMax int - volumeType gapi.VolumeDurabilityInfo - volumeOptions []string + url string + user string + userKey string + secretNamespace string + secretName string + secretValue string + clusterID string + gidMin int + gidMax int + volumeType gapi.VolumeDurabilityInfo + volumeOptions []string + volumeNamePrefix string } type glusterfsVolumeProvisioner struct { @@ -598,8 +601,13 @@ func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) { func (d *glusterfsVolumeDeleter) Delete() error { glog.V(2).Infof("delete volume: %s ", d.glusterfsMounter.path) + volumeName := d.glusterfsMounter.path - volumeID := dstrings.TrimPrefix(volumeName, volPrefix) + volumeID, err := getVolumeID(d.spec, volumeName) + if err != nil { + return fmt.Errorf("failed to get volumeID, err: %v", err) + } + class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec) if err != nil { return err @@ -697,7 +705,7 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { glog.V(2).Infof("Allocated GID [%d] for PVC %s", gid, p.options.PVC.Name) - glusterfs, sizeGiB, err := p.CreateVolume(gid) + glusterfs, sizeGiB, volID, err := p.CreateVolume(gid) if err != nil { if releaseErr := gidTable.Release(gid); releaseErr != nil { glog.Errorf("error when releasing GID in storageclass: %s", scName) @@ -723,6 +731,7 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { glusterTypeAnn: "file", "Description": glusterDescAnn, v1.MountOptionAnnotation: "auto_unmount", + heketiVolIDAnn: volID, } pv.Spec.Capacity = v1.ResourceList{ @@ -731,37 +740,44 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { return pv, nil } -func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, err error) { +func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, volID string, err error) { var clusterIDs []string + customVolumeName := "" capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // Glusterfs creates volumes in units of GiB, but heketi documentation incorrectly reports GBs sz := int(volume.RoundUpToGiB(capacity)) glog.V(2).Infof("create volume of size: %d GiB and configuration %+v", sz, p.provisionerConfig) if p.url == "" { glog.Errorf("REST server endpoint is empty") - return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST URL is empty") + return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty") } cli := gcli.NewClient(p.url, p.user, p.secretValue) if cli == nil { glog.Errorf("failed to create glusterfs rest client") - return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") + return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") } if p.provisionerConfig.clusterID != "" { clusterIDs = dstrings.Split(p.clusterID, ",") glog.V(4).Infof("provided clusterIDs: %v", clusterIDs) } + + if p.provisionerConfig.volumeNamePrefix != "" { + customVolumeName = fmt.Sprintf("%s_%s_%s_%s", p.provisionerConfig.volumeNamePrefix, p.options.PVC.Namespace, p.options.PVC.Name, uuid.NewUUID()) + } + gid64 := int64(gid) - volumeReq := &gapi.VolumeCreateRequest{Size: sz, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions} + volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions} volume, err := cli.VolumeCreate(volumeReq) if err != nil { glog.Errorf("error creating volume %v ", err) - return nil, 0, fmt.Errorf("error creating volume %v", err) + return nil, 0, "", fmt.Errorf("error creating volume %v", err) } glog.V(1).Infof("volume with size: %d and name: %s created", volume.Size, volume.Name) + volID = volume.Id dynamicHostIps, err := getClusterNodes(cli, volume.Cluster) if err != nil { glog.Errorf("error [%v] when getting cluster nodes for volume %s", err, volume) - return nil, 0, fmt.Errorf("error [%v] when getting cluster nodes for volume %s", err, volume) + return nil, 0, "", fmt.Errorf("error [%v] when getting cluster nodes for volume %s", err, volume) } // The 'endpointname' is created in form of 'glusterfs-dynamic-'. @@ -777,14 +793,14 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum if deleteErr != nil { glog.Errorf("error when deleting the volume :%v , manual deletion required", deleteErr) } - return nil, 0, fmt.Errorf("failed to create endpoint/service %v", err) + return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v", err) } glog.V(3).Infof("dynamic ep %v and svc : %v ", endpoint, service) return &v1.GlusterfsVolumeSource{ EndpointsName: endpoint.Name, Path: volume.Name, ReadOnly: false, - }, sz, nil + }, sz, volID, nil } func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epServiceName string, hostips []string, pvcname string) (endpoint *v1.Endpoints, service *v1.Service, err error) { @@ -916,6 +932,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa authEnabled := true parseVolumeType := "" parseVolumeOptions := "" + parseVolumeNamePrefix := "" for k, v := range params { switch dstrings.ToLower(k) { @@ -966,7 +983,10 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa if len(v) != 0 { parseVolumeOptions = v } - + case "volumenameprefix": + if len(v) != 0 { + parseVolumeNamePrefix = v + } default: return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, glusterfsPluginName) } @@ -1046,16 +1066,45 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa cfg.volumeOptions = volOptions } + + if len(parseVolumeNamePrefix) != 0 { + if dstrings.Contains(parseVolumeNamePrefix, "_") { + return nil, fmt.Errorf("Storageclass parameter 'volumenameprefix' should not contain '_' in its value") + } + cfg.volumeNamePrefix = parseVolumeNamePrefix + } return &cfg, nil } +// getVolumeID returns volumeID from the PV or volumename. +func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) { + volumeID := "" + + // Get volID from pvspec if available, else fill it from volumename. + if pv != nil { + if pv.Annotations[heketiVolIDAnn] != "" { + volumeID = pv.Annotations[heketiVolIDAnn] + } else { + volumeID = dstrings.TrimPrefix(volumeName, volPrefix) + } + } else { + return volumeID, fmt.Errorf("provided PV spec is nil") + } + if volumeID == "" { + return volumeID, fmt.Errorf("volume ID is empty") + } + return volumeID, nil +} + func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { pvSpec := spec.PersistentVolume.Spec glog.V(2).Infof("Request to expand volume: %s ", pvSpec.Glusterfs.Path) volumeName := pvSpec.Glusterfs.Path + volumeID, err := getVolumeID(spec.PersistentVolume, volumeName) - // Fetch the volume for expansion. - volumeID := dstrings.TrimPrefix(volumeName, volPrefix) + if err != nil { + return oldSize, fmt.Errorf("failed to get volumeID, err: %v", err) + } //Get details of SC. class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume)