diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go index b07bdf0df7ba..e2890dedc3ce 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go @@ -2487,9 +2487,8 @@ func (c *Cloud) ResizeDisk( descErr := fmt.Errorf("AWS.ResizeDisk Error describing volume %s with %v", diskName, err) return oldSize, descErr } - requestBytes := newSize.Value() // AWS resizes in chunks of GiB (not GB) - requestGiB := volumeutil.RoundUpSize(requestBytes, 1024*1024*1024) + requestGiB := volumeutil.RoundUpToGiB(newSize) newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB)) // If disk already if of greater or equal size than requested we return diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go index 7b7bb2dded38..3687b1e7f281 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go @@ -746,13 +746,12 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, return oldSize, err } - requestBytes := newSize.Value() - // GCE resizes in chunks of GBs (not GiB) - requestGB := volumeutil.RoundUpSize(requestBytes, 1000*1000*1000) - newSizeQuant := resource.MustParse(fmt.Sprintf("%dG", requestGB)) + // GCE resizes in chunks of GiBs + requestGIB := volumeutil.RoundUpToGiB(newSize) + newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGIB)) // If disk is already of size equal or greater than requested size, we simply return - if disk.SizeGb >= requestGB { + if disk.SizeGb >= requestGIB { return newSizeQuant, nil } @@ -761,7 +760,7 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, switch zoneInfo := disk.ZoneInfo.(type) { case singleZone: mc = newDiskMetricContextZonal("resize", disk.Region, zoneInfo.zone) - err := gce.manager.ResizeDiskOnCloudProvider(disk, requestGB, zoneInfo.zone) + err := gce.manager.ResizeDiskOnCloudProvider(disk, requestGIB, zoneInfo.zone) if err != nil { return oldSize, mc.Observe(err) @@ -774,7 +773,7 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, } mc = newDiskMetricContextRegional("resize", disk.Region) - err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGB) + err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGIB) if err != nil { return oldSize, mc.Observe(err) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_test.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_test.go index 40855e24a99c..ada583d30e5a 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_test.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_test.go @@ -104,6 +104,7 @@ func TestReadConfig(t *testing.T) { auth-url = http://auth.url user-id = user tenant-name = demo + region = RegionOne [LoadBalancer] create-monitor = yes monitor-delay = 1m @@ -136,6 +137,10 @@ func TestReadConfig(t *testing.T) { t.Errorf("incorrect tenant name: %s", cfg.Global.TenantName) } + if cfg.Global.Region != "RegionOne" { + t.Errorf("incorrect region: %s", cfg.Global.Region) + } + if !cfg.LoadBalancer.CreateMonitor { t.Errorf("incorrect lb.createmonitor: %t", cfg.LoadBalancer.CreateMonitor) } @@ -554,7 +559,7 @@ func TestVolumes(t *testing.T) { tags := map[string]string{ "test": "value", } - vol, _, _, err := os.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1, "", "", &tags) + vol, _, _, _, err := os.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1, "", "", &tags) if err != nil { t.Fatalf("Cannot create a new Cinder volume: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go index f50a23119743..05b4edc137e6 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -411,13 +411,15 @@ func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, ne return oldSize, fmt.Errorf("volume status is not available") } - volSizeBytes := newSize.Value() // Cinder works with gigabytes, convert to GiB with rounding up - volSizeGB := int(volumeutil.RoundUpSize(volSizeBytes, 1024*1024*1024)) - newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGB)) + volSizeGiB, err := volumeutil.RoundUpToGiBInt(newSize) + if err != nil { + return oldSize, err + } + newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGiB)) // if volume size equals to or greater than the newSize, return nil - if volume.Size >= volSizeGB { + if volume.Size >= volSizeGiB { return newSizeQuant, nil } @@ -426,7 +428,7 @@ func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, ne return oldSize, err } - err = volumes.expandVolume(volumeID, volSizeGB) + err = volumes.expandVolume(volumeID, volSizeGiB) if err != nil { return oldSize, err } @@ -443,10 +445,10 @@ func (os *OpenStack) getVolume(volumeID string) (Volume, error) { } // CreateVolume creates a volume of given size (in GiB) -func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) { +func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error) { volumes, err := os.volumeService("") if err != nil { - return "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err) + return "", "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err) } opts := volumeCreateOpts{ @@ -462,11 +464,11 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str volumeID, volumeAZ, err := volumes.createVolume(opts) if err != nil { - return "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("failed to create a %d GB volume: %v", size, err) + return "", "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("failed to create a %d GB volume: %v", size, err) } - glog.Infof("Created volume %v in Availability Zone: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.bsOpts.IgnoreVolumeAZ) - return volumeID, volumeAZ, os.bsOpts.IgnoreVolumeAZ, nil + glog.Infof("Created volume %v in Availability Zone: %v Region: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ) + return volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ, nil } // GetDevicePathBySerialID returns the path of an attached block storage volume, specified by its id. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go index 94f9dd30ec25..b3ba5e624a76 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go @@ -83,11 +83,13 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.K tags["Name"] = volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - requestBytes := capacity.Value() // AWS works with gigabytes, convert to GiB with rounding up - requestGB := int(volumeutil.RoundUpSize(requestBytes, 1024*1024*1024)) + requestGiB, err := volumeutil.RoundUpToGiBInt(capacity) + if err != nil { + return "", 0, nil, "", err + } volumeOptions := &aws.VolumeOptions{ - CapacityGB: requestGB, + CapacityGB: requestGiB, Tags: tags, PVCName: c.options.PVC.Name, } @@ -147,7 +149,7 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.K glog.Errorf("error building labels for new EBS volume %q: %v", name, err) } - return name, int(requestGB), labels, fstype, nil + return name, requestGiB, labels, fstype, nil } // Returns the first path that exists, or empty string if none exist. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go index 2c4c9b8aafd4..0b2363971ee7 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go @@ -100,8 +100,10 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie // maxLength = 79 - (4 for ".vhd") = 75 name := util.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75) capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - requestBytes := capacity.Value() - requestGB := int(util.RoundUpSize(requestBytes, 1024*1024*1024)) + requestGiB, err := util.RoundUpToGiBInt(capacity) + if err != nil { + return nil, err + } for k, v := range p.options.Parameters { switch strings.ToLower(k) { @@ -157,18 +159,18 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie if p.options.CloudTags != nil { tags = *(p.options.CloudTags) } - diskURI, err = diskController.CreateManagedDisk(name, skuName, resourceGroup, requestGB, tags) + diskURI, err = diskController.CreateManagedDisk(name, skuName, resourceGroup, requestGiB, tags) if err != nil { return nil, err } } else { if kind == v1.AzureDedicatedBlobDisk { - _, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGB) + _, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGiB) if err != nil { return nil, err } } else { - diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB) + diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGiB) if err != nil { return nil, err } @@ -187,7 +189,7 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy, AccessModes: supportedModes, Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)), + v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGiB)), }, PersistentVolumeSource: v1.PersistentVolumeSource{ AzureDisk: &v1.AzureDiskVolumeSource{ diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go b/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go index 6acb573a51bb..eaec0a1dccdb 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go @@ -615,8 +615,8 @@ func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID return expected.isAttached, expected.instanceID, expected.ret } -func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) { - return "", "", false, errors.New("Not implemented") +func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error) { + return "", "", "", false, errors.New("Not implemented") } func (testcase *testcase) GetDevicePath(volumeID string) string { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go index d4697b6179f8..55270b37a132 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go @@ -53,7 +53,7 @@ type BlockStorageProvider interface { AttachDisk(instanceID, volumeID string) (string, error) DetachDisk(instanceID, volumeID string) error DeleteVolume(volumeID string) error - CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) + CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error) GetDevicePath(volumeID string) string InstanceID() (string, error) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go index 8a5e25d8daf8..ab115f83add0 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go @@ -169,9 +169,12 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, } capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - volSizeBytes := capacity.Value() // Cinder works with gigabytes, convert to GiB with rounding up - volSizeGB := int(volutil.RoundUpSize(volSizeBytes, 1024*1024*1024)) + volSizeGiB, err := volutil.RoundUpToGiBInt(capacity) + if err != nil { + return "", 0, nil, "", err + } + name := volutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters vtype := "" availability := "" @@ -208,10 +211,10 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, } } - volumeID, volumeAZ, IgnoreVolumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags) - if errr != nil { - glog.V(2).Infof("Error creating cinder volume: %v", errr) - return "", 0, nil, "", errr + volumeID, volumeAZ, volumeRegion, IgnoreVolumeAZ, err := cloud.CreateVolume(name, volSizeGiB, vtype, availability, c.options.CloudTags) + if err != nil { + glog.V(2).Infof("Error creating cinder volume: %v", err) + return "", 0, nil, "", err } glog.V(2).Infof("Successfully created cinder volume %s", volumeID) @@ -219,8 +222,9 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeLabels = make(map[string]string) if IgnoreVolumeAZ == false { volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ + volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion } - return volumeID, volSizeGB, volumeLabels, fstype, nil + return volumeID, volSizeGiB, volumeLabels, fstype, nil } func probeAttachedVolume() error { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util.go b/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util.go index 3d9149cd34eb..4798ed692ef0 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util.go @@ -49,7 +49,7 @@ func (util *FlockerUtil) DeleteVolume(d *flockerVolumeDeleter) error { return d.flockerClient.DeleteDataset(datasetUUID) } -func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGB int, labels map[string]string, err error) { +func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGiB int, labels map[string]string, err error) { if c.flockerClient == nil { c.flockerClient, err = c.plugin.newFlockerClient("") @@ -74,7 +74,10 @@ func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() - volumeSizeGB = int(volutil.RoundUpSize(requestBytes, 1024*1024*1024)) + volumeSizeGiB, err = volutil.RoundUpToGiBInt(capacity) + if err != nil { + return + } createOptions := &flockerapi.CreateDatasetOptions{ MaximumSize: requestBytes, diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go index 18e040433a62..bcb20328a166 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go @@ -492,7 +492,7 @@ func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedT PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, AccessModes: c.options.PVC.Spec.AccessModes, Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dG", sizeGB)), + v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), }, PersistentVolumeSource: v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go index 54a1de07f114..c3d80c68d124 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go @@ -177,7 +177,7 @@ func TestPlugin(t *testing.T) { } cap := persistentSpec.Spec.Capacity[v1.ResourceStorage] size := cap.Value() - if size != 100*util.GB { + if size != 100*util.GIB { t.Errorf("Provision() returned unexpected volume size: %v", size) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go index 8774d77c8fcc..92b7f8eb2cab 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go @@ -86,8 +86,8 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin name := volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - // GCE PDs are allocated in chunks of GBs (not GiBs) - requestGB := volumeutil.RoundUpToGB(capacity) + // GCE PDs are allocated in chunks of GiBs + requestGB := volumeutil.RoundUpToGiB(capacity) // Apply Parameters. // Values for parameter "replication-type" are canonicalized to lower case. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go index a2d200d216aa..59fb71c1a0e7 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go @@ -744,7 +744,10 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // GlusterFS/heketi creates volumes in units of GiB. - sz := int(volutil.RoundUpToGiB(capacity)) + sz, err := volutil.RoundUpToGiBInt(capacity) + if err != nil { + return nil, 0, "", err + } glog.V(2).Infof("create volume of size %dGiB", sz) if p.url == "" { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go b/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go index d4eb69a2fbd5..a7d3a0fe9f33 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go @@ -88,9 +88,11 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd } capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - volSizeBytes := capacity.Value() - // PhotonController works with GB, convert to GB with rounding up - volSizeGB := int(volumeutil.RoundUpSize(volSizeBytes, 1024*1024*1024)) + // PhotonController works with GiB, convert to GiB with rounding up + volSizeGB, err := volumeutil.RoundUpToGiBInt(capacity) + if err != nil { + return "", 0, "", err + } name := volumeutil.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 255) volumeOptions := &photon.VolumeOptions{ CapacityGB: volSizeGB, diff --git a/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go b/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go index fa9bd54a8064..d61879b7bb6c 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go @@ -35,7 +35,10 @@ type quobyteVolumeManager struct { func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner, createQuota bool) (quobyte *v1.QuobyteVolumeSource, size int, err error) { capacity := provisioner.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - volumeSize := int(util.RoundUpSize(capacity.Value(), 1024*1024*1024)) + volumeSize, err := util.RoundUpToGiBInt(capacity) + if err != nil { + return nil, 0, err + } // Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited) // to simulate a size constraint we set here a Quota for logical space volumeRequest := &quobyteapi.CreateVolumeRequest{ diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go index 1860886777ce..72990dfc1e75 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go @@ -575,7 +575,10 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVo capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volSizeBytes := capacity.Value() // Convert to MB that rbd defaults on. - sz := int(volutil.RoundUpSize(volSizeBytes, 1024*1024)) + sz, err := volutil.RoundUpSizeInt(volSizeBytes, 1024*1024) + if err != nil { + return nil, 0, err + } volSz := fmt.Sprintf("%d", sz) mon := util.kernelRBDMonitorsOpt(p.Mon) if p.rbdMounter.imageFormat == rbdImageFormat2 { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go b/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go index 8b5c2311c9fc..1130ae5b5edb 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go @@ -602,7 +602,11 @@ func (c *storageosProvisioner) Provision(selectedNode *v1.Node, allowedTopologie c.labels[k] = v } capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - c.sizeGB = int(util.RoundUpSize(capacity.Value(), 1024*1024*1024)) + var err error + c.sizeGB, err = util.RoundUpToGiBInt(capacity) + if err != nil { + return nil, err + } apiCfg, err := parsePVSecret(adminSecretNamespace, adminSecretName, c.plugin.host.GetKubeClient()) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index 1e7b70934069..a8efff57830b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -361,7 +361,11 @@ func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.Pers // RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2' // (2 GiB is the smallest allocatable volume that can hold 1500MiB) func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 { - return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes + roundedUp := volumeSizeBytes / allocationUnitBytes + if volumeSizeBytes%allocationUnitBytes > 0 { + roundedUp += 1 + } + return roundedUp } // RoundUpToGB rounds up given quantity to chunks of GB @@ -376,6 +380,32 @@ func RoundUpToGiB(size resource.Quantity) int64 { return RoundUpSize(requestBytes, GIB) } +// RoundUpSizeInt calculates how many allocation units are needed to accommodate +// a volume of given size. It returns an int instead of an int64 and an error if +// there's overflow +func RoundUpSizeInt(volumeSizeBytes int64, allocationUnitBytes int64) (int, error) { + roundedUp := RoundUpSize(volumeSizeBytes, allocationUnitBytes) + roundedUpInt := int(roundedUp) + if int64(roundedUpInt) != roundedUp { + return 0, fmt.Errorf("capacity %v is too great, casting results in integer overflow", roundedUp) + } + return roundedUpInt, nil +} + +// RoundUpToGBInt rounds up given quantity to chunks of GB. It returns an +// int instead of an int64 and an error if there's overflow +func RoundUpToGBInt(size resource.Quantity) (int, error) { + requestBytes := size.Value() + return RoundUpSizeInt(requestBytes, GB) +} + +// RoundUpToGiBInt rounds up given quantity upto chunks of GiB. It returns an +// int instead of an int64 and an error if there's overflow +func RoundUpToGiBInt(size resource.Quantity) (int, error) { + requestBytes := size.Value() + return RoundUpSizeInt(requestBytes, GIB) +} + // GenerateVolumeName returns a PV name with clusterName prefix. The function // should be used to generate a name of GCE PD or Cinder volume. It basically // adds "-dynamic-" before the PV name, making sure the resulting diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go index 14e235d5db9f..42053ad45f9e 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go @@ -93,10 +93,13 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volSizeBytes := capacity.Value() // vSphere works with kilobytes, convert to KiB with rounding up - volSizeKB := int(volumeutil.RoundUpSize(volSizeBytes, 1024)) + volSizeKiB, err := volumeutil.RoundUpSizeInt(volSizeBytes, 1024) + if err != nil { + return nil, err + } name := volumeutil.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255) volumeOptions := &vclib.VolumeOptions{ - CapacityKB: volSizeKB, + CapacityKB: volSizeKiB, Tags: *v.options.CloudTags, Name: name, } @@ -146,7 +149,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec } volSpec = &VolumeSpec{ Path: vmDiskPath, - Size: volSizeKB, + Size: volSizeKiB, Fstype: fstype, StoragePolicyName: volumeOptions.StoragePolicyName, StoragePolicyID: volumeOptions.StoragePolicyID, diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/volume_provisioning.go b/vendor/k8s.io/kubernetes/test/e2e/storage/volume_provisioning.go index 4d040a8bfeca..26bb95bebbea 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/volume_provisioning.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/volume_provisioning.go @@ -265,8 +265,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { "type": "pd-ssd", "zone": cloudZone, }, - claimSize: "1.5G", - expectedSize: "2G", + claimSize: "1.5Gi", + expectedSize: "2Gi", pvCheck: func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-ssd") }, @@ -278,8 +278,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { parameters: map[string]string{ "type": "pd-standard", }, - claimSize: "1.5G", - expectedSize: "2G", + claimSize: "1.5Gi", + expectedSize: "2Gi", pvCheck: func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, @@ -443,8 +443,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { parameters: map[string]string{ "type": "pd-standard", }, - claimSize: "1G", - expectedSize: "1G", + claimSize: "1Gi", + expectedSize: "1Gi", pvCheck: func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, @@ -477,8 +477,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { parameters: map[string]string{ "type": "pd-standard", }, - claimSize: "1G", - expectedSize: "1G", + claimSize: "1Gi", + expectedSize: "1Gi", pvCheck: func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, @@ -526,7 +526,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { name: "unmanaged_zone", provisioner: "kubernetes.io/gce-pd", parameters: map[string]string{"zone": unmanagedZone}, - claimSize: "1G", + claimSize: "1Gi", } sc := newStorageClass(test, ns, suffix) sc, err = c.StorageV1().StorageClasses().Create(sc) @@ -714,13 +714,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { claimSize: "2Gi", expectedSize: "2Gi", } - // gce or gke - if getDefaultPluginName() == "kubernetes.io/gce-pd" { - // using GB not GiB as e2e test unit since gce-pd returns GB, - // or expectedSize may be greater than claimSize. - test.claimSize = "2G" - test.expectedSize = "2G" - } claim := newClaim(test, ns, "default") testDynamicProvisioning(test, c, claim, nil) @@ -803,17 +796,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { parameters: map[string]string{"resturl": serverUrl}, attach: false, } - - // GCE/GKE - if getDefaultPluginName() == "kubernetes.io/gce-pd" { - // Keeping an extra condition here based on below facts: - //*) gce-pd rounds up to the next gb. - //*) GlusterFS provisioner rounduptoGiB() and send it to backend, - // which does 'size/number' from provisioner*1024*1024*1024 - test.claimSize = "2Gi" - test.expectedSize = "3G" - } - suffix := fmt.Sprintf("glusterdptest") class := newStorageClass(test, ns, suffix) @@ -838,14 +820,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { expectedSize: "2Gi", volumeMode: &block, } - // gce or gke - if getDefaultPluginName() == "kubernetes.io/gce-pd" { - // using GB not GiB as e2e test unit since gce-pd returns GB, - // or expectedSize may be greater than claimSize. - test.claimSize = "2G" - test.expectedSize = "2G" - } - claim := newClaim(test, ns, "default") claim.Spec.VolumeMode = &block testDynamicProvisioning(test, c, claim, nil)