diff --git a/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go b/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go index 04410917fac9..314dec3095c4 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go @@ -531,6 +531,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { ExperimentalQOSReserved: *experimentalQOSReserved, ExperimentalCPUManagerPolicy: s.CPUManagerPolicy, ExperimentalCPUManagerReconcilePeriod: s.CPUManagerReconcilePeriod.Duration, + EnforceCPULimits: s.CPUCFSQuota, }, s.FailSwapOn, devicePluginEnabled, diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go index dfdcf8d82c84..358576732e76 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go @@ -106,6 +106,7 @@ type NodeConfig struct { ExperimentalQOSReserved map[v1.ResourceName]int64 ExperimentalCPUManagerPolicy string ExperimentalCPUManagerReconcilePeriod time.Duration + EnforceCPULimits bool } type NodeAllocatableConfig struct { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go index 6c6c7068172d..2da9c0b09517 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go @@ -300,6 +300,7 @@ func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager { qosContainersInfo: cm.GetQOSContainersInfo(), subsystems: cm.subsystems, cgroupManager: cm.cgroupManager, + enforceCPULimits: cm.EnforceCPULimits, } } return &podContainerManagerNoop{ diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go index 935fb6c8060e..d04128edd719 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go @@ -103,7 +103,7 @@ func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 { } // ResourceConfigForPod takes the input pod and outputs the cgroup resource config. -func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { +func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool) *ResourceConfig { // sum requests and limits. reqs, limits := resource.PodRequestsAndLimits(pod) @@ -146,6 +146,11 @@ func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { } } + // quota is not capped when cfs quota is disabled + if !enforceCPULimits { + cpuQuota = int64(-1) + } + // determine the qos class qosClass := v1qos.GetPodQOS(pod) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux_test.go index d92de4322c76..30894e9fdfab 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux_test.go @@ -57,10 +57,12 @@ func TestResourceConfigForPod(t *testing.T) { guaranteedShares := MilliCPUToShares(100) guaranteedQuota, guaranteedPeriod := MilliCPUToQuota(100) memoryQuantity = resource.MustParse("100Mi") + cpuNoLimit := int64(-1) guaranteedMemory := memoryQuantity.Value() testCases := map[string]struct { - pod *v1.Pod - expected *ResourceConfig + pod *v1.Pod + expected *ResourceConfig + enforceCPULimits bool }{ "besteffort": { pod: &v1.Pod{ @@ -72,7 +74,8 @@ func TestResourceConfigForPod(t *testing.T) { }, }, }, - expected: &ResourceConfig{CpuShares: &minShares}, + enforceCPULimits: true, + expected: &ResourceConfig{CpuShares: &minShares}, }, "burstable-no-limits": { pod: &v1.Pod{ @@ -84,7 +87,8 @@ func TestResourceConfigForPod(t *testing.T) { }, }, }, - expected: &ResourceConfig{CpuShares: &burstableShares}, + enforceCPULimits: true, + expected: &ResourceConfig{CpuShares: &burstableShares}, }, "burstable-with-limits": { pod: &v1.Pod{ @@ -96,7 +100,21 @@ func TestResourceConfigForPod(t *testing.T) { }, }, }, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &burstablePeriod, Memory: &burstableMemory}, + enforceCPULimits: true, + expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &burstablePeriod, Memory: &burstableMemory}, + }, + "burstable-with-limits-no-cpu-enforcement": { + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), + }, + }, + }, + }, + enforceCPULimits: false, + expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &burstablePeriod, Memory: &burstableMemory}, }, "burstable-partial-limits": { pod: &v1.Pod{ @@ -111,7 +129,8 @@ func TestResourceConfigForPod(t *testing.T) { }, }, }, - expected: &ResourceConfig{CpuShares: &burstablePartialShares}, + enforceCPULimits: true, + expected: &ResourceConfig{CpuShares: &burstablePartialShares}, }, "guaranteed": { pod: &v1.Pod{ @@ -123,11 +142,25 @@ func TestResourceConfigForPod(t *testing.T) { }, }, }, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory}, + enforceCPULimits: true, + expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory}, + }, + "guaranteed-no-cpu-enforcement": { + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), + }, + }, + }, + }, + enforceCPULimits: false, + expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory}, }, } for testName, testCase := range testCases { - actual := ResourceConfigForPod(testCase.pod) + actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits) if !reflect.DeepEqual(actual.CpuPeriod, testCase.expected.CpuPeriod) { t.Errorf("unexpected result, test: %v, cpu period not as expected", testName) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go index b572f3456f3f..ee3ed91d5576 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go @@ -43,7 +43,7 @@ func MilliCPUToShares(milliCPU int64) int64 { } // ResourceConfigForPod takes the input pod and outputs the cgroup resource config. -func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { +func ResourceConfigForPod(pod *v1.Pod, enforceCPULimit bool) *ResourceConfig { return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go index e62d192891d4..4e635d46d497 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go @@ -45,6 +45,8 @@ type podContainerManagerImpl struct { // cgroupManager is the cgroup Manager Object responsible for managing all // pod cgroups. cgroupManager CgroupManager + // enforceCPULimits controls whether cfs quota is enforced or not + enforceCPULimits bool } // Make sure that podContainerManagerImpl implements the PodContainerManager interface @@ -75,7 +77,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error { // Create the pod container containerConfig := &CgroupConfig{ Name: podContainerName, - ResourceParameters: ResourceConfigForPod(pod), + ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits), } if err := m.cgroupManager.Create(containerConfig); err != nil { return fmt.Errorf("failed to create container for %v : %v", podContainerName, err)