From 8bd8e3b32a82b19d603abeed5064be5d0be6614f Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Thu, 22 Feb 2018 14:34:16 -0500 Subject: [PATCH 1/3] tags existing deployment nodes as "found" --- pkg/oc/graph/kubegraph/edges.go | 2 ++ pkg/oc/graph/kubegraph/nodes/nodes.go | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/oc/graph/kubegraph/edges.go b/pkg/oc/graph/kubegraph/edges.go index 229c2abca7d8..f744291c2be6 100644 --- a/pkg/oc/graph/kubegraph/edges.go +++ b/pkg/oc/graph/kubegraph/edges.go @@ -258,6 +258,8 @@ func AddHPAScaleRefEdges(g osgraph.Graph) { syntheticNode = appsgraph.FindOrCreateSyntheticDeploymentConfigNode(g, &appsapi.DeploymentConfig{ObjectMeta: syntheticMeta}) case r == extensions.Resource("deployments"): syntheticNode = kubegraph.FindOrCreateSyntheticDeploymentNode(g, &extensions.Deployment{ObjectMeta: syntheticMeta}) + case r == extensions.Resource("replicasets"): + syntheticNode = kubegraph.FindOrCreateSyntheticReplicaSetNode(g, &extensions.ReplicaSet{ObjectMeta: syntheticMeta}) default: continue } diff --git a/pkg/oc/graph/kubegraph/nodes/nodes.go b/pkg/oc/graph/kubegraph/nodes/nodes.go index 03be6b246e6c..9fefa9dfc41a 100644 --- a/pkg/oc/graph/kubegraph/nodes/nodes.go +++ b/pkg/oc/graph/kubegraph/nodes/nodes.go @@ -229,7 +229,7 @@ func EnsureStatefulSetNode(g osgraph.MutableUniqueGraph, statefulSet *kapps.Stat node := osgraph.EnsureUnique(g, nodeName, func(node osgraph.Node) graph.Node { - return &StatefulSetNode{node, statefulSet, false} + return &StatefulSetNode{node, statefulSet, true} }, ).(*StatefulSetNode) @@ -259,7 +259,7 @@ func EnsureDeploymentNode(g osgraph.MutableUniqueGraph, deployment *extensions.D node := osgraph.EnsureUnique(g, nodeName, func(node osgraph.Node) graph.Node { - return &DeploymentNode{Node: node, Deployment: deployment} + return &DeploymentNode{Node: node, Deployment: deployment, IsFound: true} }, ).(*DeploymentNode) From 98f27ebc7f049a39dfe9137572f83f7de9b486d5 Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Fri, 23 Feb 2018 18:43:33 -0500 Subject: [PATCH 2/3] add daemonsets to status graph --- pkg/oc/cli/describe/projectstatus.go | 94 +++++++++++++++++++ .../graph/genericgraph/graphview/daemonset.go | 66 +++++++++++++ pkg/oc/graph/kubegraph/edges.go | 2 + 3 files changed, 162 insertions(+) create mode 100644 pkg/oc/graph/genericgraph/graphview/daemonset.go diff --git a/pkg/oc/cli/describe/projectstatus.go b/pkg/oc/cli/describe/projectstatus.go index b173d523496e..bc0fbb7d5dcd 100644 --- a/pkg/oc/cli/describe/projectstatus.go +++ b/pkg/oc/cli/describe/projectstatus.go @@ -98,6 +98,7 @@ func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, set &horizontalPodAutoscalerLoader{namespace: namespace, lister: d.KubeClient.Autoscaling()}, &deploymentLoader{namespace: namespace, lister: d.KubeClient.Extensions()}, &replicasetLoader{namespace: namespace, lister: d.KubeClient.Extensions()}, + &daemonsetLoader{namespace: namespace, lister: d.KubeClient.Extensions()}, // TODO check swagger for feature enablement and selectively add bcLoader and buildLoader // then remove errors.TolerateNotFoundError method. &bcLoader{namespace: namespace, lister: d.BuildClient}, @@ -203,6 +204,9 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error standaloneDeployments, coveredByDeployments := graphview.AllDeployments(g, coveredNodes) coveredNodes.Insert(coveredByDeployments.List()...) + standaloneStatefulSets, coveredByStatefulSets := graphview.AllStatefulSets(g, coveredNodes) + coveredNodes.Insert(coveredByStatefulSets.List()...) + standaloneRCs, coveredByRCs := graphview.AllReplicationControllers(g, coveredNodes) coveredNodes.Insert(coveredByRCs.List()...) @@ -212,6 +216,9 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error standaloneImages, coveredByImages := graphview.AllImagePipelinesFromBuildConfig(g, coveredNodes) coveredNodes.Insert(coveredByImages.List()...) + standaloneDaemonSets, coveredByDaemonSets := graphview.AllDaemonSets(g, coveredNodes) + coveredNodes.Insert(coveredByDaemonSets.List()...) + standalonePods, coveredByPods := graphview.AllPods(g, coveredNodes) coveredNodes.Insert(coveredByPods.List()...) @@ -318,6 +325,15 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error })...) } + for _, standaloneStatefulSet := range standaloneStatefulSets { + if !standaloneStatefulSet.StatefulSet.Found() { + continue + } + + fmt.Fprintln(out) + printLines(out, indent, 0, describeStatefulSetInServiceGroup(f, standaloneStatefulSet)...) + } + for _, standaloneImage := range standaloneImages { fmt.Fprintln(out) lines := describeStandaloneBuildGroup(f, standaloneImage, namespace) @@ -343,6 +359,15 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error printLines(out, indent, 0, describeRSInServiceGroup(f, standaloneRS.RS)...) } + for _, standaloneDaemonSet := range standaloneDaemonSets { + if !standaloneDaemonSet.DaemonSet.Found() { + continue + } + + fmt.Fprintln(out) + printLines(out, indent, 0, describeDaemonSetInServiceGroup(f, standaloneDaemonSet)...) + } + monopods, err := filterBoringPods(standalonePods) if err != nil { return err @@ -586,6 +611,9 @@ func (f namespacedFormatter) ResourceName(obj interface{}) string { case *kubegraph.PersistentVolumeClaimNode: return namespaceNameWithType("pvc", t.PersistentVolumeClaim.Name, t.PersistentVolumeClaim.Namespace, f.currentNamespace, f.hideNamespace) + case *kubegraph.DaemonSetNode: + return namespaceNameWithType("daemonset", t.DaemonSet.Name, t.DaemonSet.Namespace, f.currentNamespace, f.hideNamespace) + case *imagegraph.ImageStreamNode: return namespaceNameWithType("is", t.ImageStream.Name, t.ImageStream.Namespace, f.currentNamespace, f.hideNamespace) case *imagegraph.ImageStreamTagNode: @@ -723,6 +751,42 @@ func describeStatefulSetInServiceGroup(f formatter, node graphview.StatefulSet) return lines } +func describeDaemonSetInServiceGroup(f formatter, node graphview.DaemonSet) []string { + local := namespacedFormatter{currentNamespace: node.DaemonSet.DaemonSet.Namespace} + includeLastPass := false + + if len(node.Images) == 1 { + format := "%s manages %s %s" + lines := []string{fmt.Sprintf(format, f.ResourceName(node.DaemonSet), describeImageInPipeline(local, node.Images[0], node.DaemonSet.DaemonSet.Namespace), "")} + if len(lines[0]) > 120 && strings.Contains(lines[0], " <- ") { + segments := strings.SplitN(lines[0], " <- ", 2) + lines[0] = segments[0] + " <-" + lines = append(lines, segments[1]) + } + + lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(node.Images[0].Build, node.Images[0].LastSuccessfulBuild, node.Images[0].LastUnsuccessfulBuild, node.Images[0].ActiveBuilds, node.Images[0].DestinationResolved, includeLastPass)...)...) + lines = append(lines, describeDaemonSetStatus(node.DaemonSet.DaemonSet)) + return lines + } + + images := []string{} + for _, container := range node.DaemonSet.DaemonSet.Spec.Template.Spec.Containers { + images = append(images, container.Image) + } + imagesWithoutTriggers := "" + if len(node.Images) == 0 { + imagesWithoutTriggers = strings.Join(images, ",") + } + format := "%s manages %s" + lines := []string{fmt.Sprintf(format, f.ResourceName(node.DaemonSet), imagesWithoutTriggers)} + for _, image := range node.Images { + lines = append(lines, describeImageInPipeline(local, image, node.DaemonSet.DaemonSet.Namespace)) + lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(image.Build, image.LastSuccessfulBuild, image.LastUnsuccessfulBuild, image.ActiveBuilds, image.DestinationResolved, includeLastPass)...)...) + } + lines = append(lines, describeDaemonSetStatus(node.DaemonSet.DaemonSet)) + return lines +} + func describeRCInServiceGroup(f formatter, rcNode *kubegraph.ReplicationControllerNode) []string { if rcNode.ReplicationController.Spec.Template == nil { return []string{} @@ -1282,6 +1346,12 @@ func describeStatefulSetStatus(p *kapps.StatefulSet) string { return fmt.Sprintf("created %s ago%s", timeAt, describePodSummaryInline(int32(p.Status.Replicas), int32(p.Status.Replicas), int32(p.Spec.Replicas), false, 0)) } +func describeDaemonSetStatus(ds *kapisext.DaemonSet) string { + timeAt := strings.ToLower(formatRelativeTime(ds.CreationTimestamp.Time)) + replicaSetRevision := ds.Generation + return fmt.Sprintf("generation #%d running for %s%s", replicaSetRevision, timeAt, describePodSummaryInline(ds.Status.NumberReady, ds.Status.NumberAvailable, ds.Status.DesiredNumberScheduled, false, 0)) +} + func describeRCStatus(rc *kapi.ReplicationController) string { timeAt := strings.ToLower(formatRelativeTime(rc.CreationTimestamp.Time)) return fmt.Sprintf("rc/%s created %s ago%s", rc.Name, timeAt, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, rc.Spec.Replicas, false, 0)) @@ -1590,6 +1660,30 @@ func (l *deploymentLoader) AddToGraph(g osgraph.Graph) error { return nil } +type daemonsetLoader struct { + namespace string + lister kapisextclient.DaemonSetsGetter + items []kapisext.DaemonSet +} + +func (l *daemonsetLoader) Load() error { + list, err := l.lister.DaemonSets(l.namespace).List(metav1.ListOptions{}) + if err != nil { + return err + } + + l.items = list.Items + return nil +} + +func (l *daemonsetLoader) AddToGraph(g osgraph.Graph) error { + for i := range l.items { + kubegraph.EnsureDaemonSetNode(g, &l.items[i]) + } + + return nil +} + type replicasetLoader struct { namespace string lister kapisextclient.ReplicaSetsGetter diff --git a/pkg/oc/graph/genericgraph/graphview/daemonset.go b/pkg/oc/graph/genericgraph/graphview/daemonset.go new file mode 100644 index 000000000000..1f8a82dcec51 --- /dev/null +++ b/pkg/oc/graph/genericgraph/graphview/daemonset.go @@ -0,0 +1,66 @@ +package graphview + +import ( + appsedges "github.com/openshift/origin/pkg/oc/graph/appsgraph" + osgraph "github.com/openshift/origin/pkg/oc/graph/genericgraph" + kubeedges "github.com/openshift/origin/pkg/oc/graph/kubegraph" + kubegraph "github.com/openshift/origin/pkg/oc/graph/kubegraph/nodes" +) + +type DaemonSet struct { + DaemonSet *kubegraph.DaemonSetNode + + OwnedPods []*kubegraph.PodNode + CreatedPods []*kubegraph.PodNode + + Images []ImagePipeline +} + +// AllDaemonSets returns all the DaemonSets that aren't in the excludes set and the set of covered NodeIDs +func AllDaemonSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]DaemonSet, IntSet) { + covered := IntSet{} + views := []DaemonSet{} + + for _, uncastNode := range g.NodesByKind(kubegraph.DaemonSetNodeKind) { + if excludeNodeIDs.Has(uncastNode.ID()) { + continue + } + + view, covers := NewDaemonSet(g, uncastNode.(*kubegraph.DaemonSetNode)) + covered.Insert(covers.List()...) + views = append(views, view) + } + + return views, covered +} + +// NewDaemonSet returns the DaemonSet and a set of all the NodeIDs covered by the DaemonSet +func NewDaemonSet(g osgraph.Graph, node *kubegraph.DaemonSetNode) (DaemonSet, IntSet) { + covered := IntSet{} + covered.Insert(node.ID()) + + view := DaemonSet{} + view.DaemonSet = node + + for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.ManagedByControllerEdgeKind) { + podNode := uncastPodNode.(*kubegraph.PodNode) + covered.Insert(podNode.ID()) + view.OwnedPods = append(view.OwnedPods, podNode) + } + + for _, istNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.TriggersDeploymentEdgeKind) { + imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, istNode, istNode.(ImageTagLocation)) + covered.Insert(covers.List()...) + view.Images = append(view.Images, imagePipeline) + } + + // for image that we use, create an image pipeline and add it to the list + for _, tagNode := range g.PredecessorNodesByEdgeKind(node, appsedges.UsedInDeploymentEdgeKind) { + imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, tagNode, tagNode.(ImageTagLocation)) + + covered.Insert(covers.List()...) + view.Images = append(view.Images, imagePipeline) + } + + return view, covered +} diff --git a/pkg/oc/graph/kubegraph/edges.go b/pkg/oc/graph/kubegraph/edges.go index f744291c2be6..c98b6d8aec81 100644 --- a/pkg/oc/graph/kubegraph/edges.go +++ b/pkg/oc/graph/kubegraph/edges.go @@ -137,6 +137,8 @@ func AddAllManagedByControllerPodEdges(g osgraph.MutableUniqueGraph) { case *kubegraph.StatefulSetNode: // TODO: refactor to handle expanded selectors (along with ReplicaSets and Deployments) AddManagedByControllerPodEdges(g, cast, cast.StatefulSet.Namespace, cast.StatefulSet.Spec.Selector.MatchLabels) + case *kubegraph.DaemonSetNode: + AddManagedByControllerPodEdges(g, cast, cast.DaemonSet.Namespace, cast.DaemonSet.Spec.Selector.MatchLabels) } } } From 6c7059f873c6b27c22b1d6bb15f23401f4636cb3 Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Fri, 23 Feb 2018 19:31:25 -0500 Subject: [PATCH 3/3] add tests --- pkg/oc/cli/describe/projectstatus_test.go | 26 ++++++ .../genericgraph/test/hpa-with-scale-ref.yaml | 83 +++++++++++++++++++ .../test/rollingupdate-daemonset.yaml | 28 +++++++ 3 files changed, 137 insertions(+) create mode 100644 pkg/oc/graph/genericgraph/test/hpa-with-scale-ref.yaml create mode 100644 pkg/oc/graph/genericgraph/test/rollingupdate-daemonset.yaml diff --git a/pkg/oc/cli/describe/projectstatus_test.go b/pkg/oc/cli/describe/projectstatus_test.go index 29ea54e9a9f6..fb11d85e0cb3 100644 --- a/pkg/oc/cli/describe/projectstatus_test.go +++ b/pkg/oc/cli/describe/projectstatus_test.go @@ -433,6 +433,32 @@ func TestProjectStatus(t *testing.T) { }, Time: mustParseTime("2016-04-07T04:12:25Z"), }, + "standalone daemonset": { + File: "rollingupdate-daemonset.yaml", + Extra: []runtime.Object{ + &projectapi.Project{ + ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""}, + }, + }, + ErrFn: func(err error) bool { return err == nil }, + Contains: []string{ + "daemonset/bind manages gcr.io/google-containers/pause:2.0", + "generation #0 running for about a minute", + }, + Time: mustParseTime("2016-04-07T04:12:25Z"), + }, + "hpa non-missing scaleref": { + File: "hpa-with-scale-ref.yaml", + Extra: []runtime.Object{ + &projectapi.Project{ + ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""}, + }, + }, + ErrFn: func(err error) bool { return err == nil }, + Contains: []string{ + "deployment/ruby-deploy deploys istag/ruby-deploy:latest", + }, + }, } oldTimeFn := timeNowFn defer func() { timeNowFn = oldTimeFn }() diff --git a/pkg/oc/graph/genericgraph/test/hpa-with-scale-ref.yaml b/pkg/oc/graph/genericgraph/test/hpa-with-scale-ref.yaml new file mode 100644 index 000000000000..0d8758dee32c --- /dev/null +++ b/pkg/oc/graph/genericgraph/test/hpa-with-scale-ref.yaml @@ -0,0 +1,83 @@ +apiVersion: v1 +items: +- apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + annotations: + deployment.kubernetes.io/revision: "1" + image.openshift.io/triggers: '[{"from":{"kind":"ImageStreamTag","name":"ruby-deploy:latest"},"fieldPath":"spec.template.spec.containers[?(@.name==\"ruby-deploy\")].image"}]' + openshift.io/generated-by: OpenShiftNewApp + generation: 1 + labels: + app: ruby-deploy + name: ruby-deploy + spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: ruby-deploy + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: ruby-deploy + spec: + containers: + - image: busybox@sha256:4cee1979ba0bf7db9fc5d28fb7b798ca69ae95a47c5fecf46327720df4ff352d + imagePullPolicy: IfNotPresent + name: ruby-deploy + ports: + - containerPort: 8080 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + status: + conditions: + - lastTransitionTime: 2018-02-24T00:51:47Z + lastUpdateTime: 2018-02-24T00:51:47Z + message: Deployment does not have minimum availability. + reason: MinimumReplicasUnavailable + status: "False" + type: Available + - lastTransitionTime: 2018-02-24T00:51:47Z + lastUpdateTime: 2018-02-24T00:51:47Z + message: ReplicaSet "ruby-deploy-599994c49b" is progressing. + reason: ReplicaSetUpdated + status: "True" + type: Progressing + observedGeneration: 1 + replicas: 1 + unavailableReplicas: 1 + updatedReplicas: 1 +- apiVersion: autoscaling/v1 + kind: HorizontalPodAutoscaler + metadata: + name: ruby-deploy + spec: + maxReplicas: 1 + minReplicas: 1 + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: ruby-deploy + targetCPUUtilizationPercentage: 80 + status: + currentReplicas: 0 + desiredReplicas: 0 +kind: List +metadata: + resourceVersion: "" + selfLink: "" diff --git a/pkg/oc/graph/genericgraph/test/rollingupdate-daemonset.yaml b/pkg/oc/graph/genericgraph/test/rollingupdate-daemonset.yaml new file mode 100644 index 000000000000..56e41a21fc64 --- /dev/null +++ b/pkg/oc/graph/genericgraph/test/rollingupdate-daemonset.yaml @@ -0,0 +1,28 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: bind + creationTimestamp: 2016-04-07T04:11:25Z +spec: + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 10% + template: + metadata: + labels: + service: bind + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "service" + operator: "In" + values: ["bind"] + topologyKey: "kubernetes.io/hostname" + namespaces: [] + containers: + - name: kubernetes-pause + image: gcr.io/google-containers/pause:2.0