From c8b74946e369bef21d045022ae23cea0c1569d78 Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Wed, 25 Jul 2018 16:07:31 -0400 Subject: [PATCH] UPSTREAM: 64860:checkLimitsForResolvConf for the pod create and update events instead of checking period --- .../k8s.io/kubernetes/pkg/kubelet/kubelet.go | 13 +++-- .../kubernetes/pkg/kubelet/kubelet_test.go | 50 +++++++++++++++++++ .../kubernetes/pkg/kubelet/network/dns/dns.go | 8 +-- 3 files changed, 62 insertions(+), 9 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index bb78ffb24b05..e0d67843b77d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -1374,11 +1374,6 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { // handled by pod workers). go wait.Until(kl.podKiller, 1*time.Second, wait.NeverStop) - // Start gorouting responsible for checking limits in resolv.conf - if kl.dnsConfigurer.ResolverConfig != "" { - go wait.Until(func() { kl.dnsConfigurer.CheckLimitsForResolvConf() }, 30*time.Second, wait.NeverStop) - } - // Start component sync loops. kl.statusManager.Start() kl.probeManager.Start() @@ -1968,6 +1963,10 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { start := kl.clock.Now() sort.Sort(sliceutils.PodsByCreationTime(pods)) for _, pod := range pods { + // Responsible for checking limits in resolv.conf + if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" { + kl.dnsConfigurer.CheckLimitsForResolvConf() + } existingPods := kl.podManager.GetPods() // Always add the pod to the pod manager. Kubelet relies on the pod // manager as the source of truth for the desired state. If a pod does @@ -2005,6 +2004,10 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) { start := kl.clock.Now() for _, pod := range pods { + // Responsible for checking limits in resolv.conf + if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" { + kl.dnsConfigurer.CheckLimitsForResolvConf() + } kl.podManager.UpdatePod(pod) if kubepod.IsMirrorPod(pod) { kl.handleMirrorPod(pod, start) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go index fd18b128331b..7ef5ffa4a5c4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go @@ -51,6 +51,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/logs" + "k8s.io/kubernetes/pkg/kubelet/network/dns" "k8s.io/kubernetes/pkg/kubelet/pleg" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing" @@ -481,6 +482,16 @@ func TestHandlePortConflicts(t *testing.T) { }, }} + recorder := record.NewFakeRecorder(20) + nodeRef := &v1.ObjectReference{ + Kind: "Node", + Name: string("testNode"), + UID: types.UID("testNode"), + Namespace: "", + } + testClusterDNSDomain := "TEST" + kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "") + spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}} pods := []*v1.Pod{ podWithUIDNameNsSpec("123456789", "newpod", "foo", spec), @@ -517,6 +528,15 @@ func TestHandleHostNameConflicts(t *testing.T) { }, }, }} + recorder := record.NewFakeRecorder(20) + nodeRef := &v1.ObjectReference{ + Kind: "Node", + Name: string("testNode"), + UID: types.UID("testNode"), + Namespace: "", + } + testClusterDNSDomain := "TEST" + kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "") // default NodeName in test is 127.0.0.1 pods := []*v1.Pod{ @@ -551,6 +571,15 @@ func TestHandleNodeSelector(t *testing.T) { }, } kl.nodeInfo = testNodeInfo{nodes: nodes} + recorder := record.NewFakeRecorder(20) + nodeRef := &v1.ObjectReference{ + Kind: "Node", + Name: string("testNode"), + UID: types.UID("testNode"), + Namespace: "", + } + testClusterDNSDomain := "TEST" + kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "") pods := []*v1.Pod{ podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}), podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}), @@ -582,6 +611,16 @@ func TestHandleMemExceeded(t *testing.T) { } kl.nodeInfo = testNodeInfo{nodes: nodes} + recorder := record.NewFakeRecorder(20) + nodeRef := &v1.ObjectReference{ + Kind: "Node", + Name: string("testNode"), + UID: types.UID("testNode"), + Namespace: "", + } + testClusterDNSDomain := "TEST" + kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "") + spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ @@ -589,6 +628,7 @@ func TestHandleMemExceeded(t *testing.T) { }, }}}, } + pods := []*v1.Pod{ podWithUIDNameNsSpec("123456789", "newpod", "foo", spec), podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec), @@ -668,6 +708,16 @@ func TestHandlePluginResources(t *testing.T) { kl.admitHandlers = lifecycle.PodAdmitHandlers{} kl.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kl.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), updatePluginResourcesFunc)) + recorder := record.NewFakeRecorder(20) + nodeRef := &v1.ObjectReference{ + Kind: "Node", + Name: string("testNode"), + UID: types.UID("testNode"), + Namespace: "", + } + testClusterDNSDomain := "TEST" + kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "") + // pod requiring adjustedResource can be successfully allocated because updatePluginResourcesFunc // adjusts node.allocatableResource for this resource to a sufficient value. fittingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName), diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go index a3e42358c7ba..dd414f6029dc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go @@ -156,7 +156,7 @@ func (c *Configurer) CheckLimitsForResolvConf() { f, err := os.Open(c.ResolverConfig) if err != nil { c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error()) - glog.Error("CheckLimitsForResolvConf: " + err.Error()) + glog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error()) return } defer f.Close() @@ -164,7 +164,7 @@ func (c *Configurer) CheckLimitsForResolvConf() { _, hostSearch, _, err := parseResolvConf(f) if err != nil { c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error()) - glog.Error("CheckLimitsForResolvConf: " + err.Error()) + glog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error()) return } @@ -177,14 +177,14 @@ func (c *Configurer) CheckLimitsForResolvConf() { if len(hostSearch) > domainCountLimit { log := fmt.Sprintf("Resolv.conf file '%s' contains search line consisting of more than %d domains!", c.ResolverConfig, domainCountLimit) c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log) - glog.Error("CheckLimitsForResolvConf: " + log) + glog.V(4).Infof("CheckLimitsForResolvConf: " + log) return } if len(strings.Join(hostSearch, " ")) > validation.MaxDNSSearchListChars { log := fmt.Sprintf("Resolv.conf file '%s' contains search line which length is more than allowed %d chars!", c.ResolverConfig, validation.MaxDNSSearchListChars) c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log) - glog.Error("CheckLimitsForResolvConf: " + log) + glog.V(4).Infof("CheckLimitsForResolvConf: " + log) return }