Skip to content

Commit

Permalink
Merge pull request #18723 from juanvallejo/jvallejo/mark-found-deploy…
Browse files Browse the repository at this point in the history
…ment-nodes

Automatic merge from submit-queue.

tags existing deployment nodes as "found"

Fixes https://bugzilla.redhat.com/show_bug.cgi?id=1544183#c3
Fixes https://bugzilla.redhat.com/show_bug.cgi?id=1540560#c7
Followup to: #18579

cc @soltysh
  • Loading branch information
openshift-merge-robot authored Feb 26, 2018
2 parents a1c2561 + 6c7059f commit 1e29a56
Show file tree
Hide file tree
Showing 7 changed files with 303 additions and 2 deletions.
94 changes: 94 additions & 0 deletions pkg/oc/cli/describe/projectstatus.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, set
&horizontalPodAutoscalerLoader{namespace: namespace, lister: d.KubeClient.Autoscaling()},
&deploymentLoader{namespace: namespace, lister: d.KubeClient.Extensions()},
&replicasetLoader{namespace: namespace, lister: d.KubeClient.Extensions()},
&daemonsetLoader{namespace: namespace, lister: d.KubeClient.Extensions()},
// TODO check swagger for feature enablement and selectively add bcLoader and buildLoader
// then remove errors.TolerateNotFoundError method.
&bcLoader{namespace: namespace, lister: d.BuildClient},
Expand Down Expand Up @@ -203,6 +204,9 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error
standaloneDeployments, coveredByDeployments := graphview.AllDeployments(g, coveredNodes)
coveredNodes.Insert(coveredByDeployments.List()...)

standaloneStatefulSets, coveredByStatefulSets := graphview.AllStatefulSets(g, coveredNodes)
coveredNodes.Insert(coveredByStatefulSets.List()...)

standaloneRCs, coveredByRCs := graphview.AllReplicationControllers(g, coveredNodes)
coveredNodes.Insert(coveredByRCs.List()...)

Expand All @@ -212,6 +216,9 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error
standaloneImages, coveredByImages := graphview.AllImagePipelinesFromBuildConfig(g, coveredNodes)
coveredNodes.Insert(coveredByImages.List()...)

standaloneDaemonSets, coveredByDaemonSets := graphview.AllDaemonSets(g, coveredNodes)
coveredNodes.Insert(coveredByDaemonSets.List()...)

standalonePods, coveredByPods := graphview.AllPods(g, coveredNodes)
coveredNodes.Insert(coveredByPods.List()...)

Expand Down Expand Up @@ -318,6 +325,15 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error
})...)
}

for _, standaloneStatefulSet := range standaloneStatefulSets {
if !standaloneStatefulSet.StatefulSet.Found() {
continue
}

fmt.Fprintln(out)
printLines(out, indent, 0, describeStatefulSetInServiceGroup(f, standaloneStatefulSet)...)
}

for _, standaloneImage := range standaloneImages {
fmt.Fprintln(out)
lines := describeStandaloneBuildGroup(f, standaloneImage, namespace)
Expand All @@ -343,6 +359,15 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error
printLines(out, indent, 0, describeRSInServiceGroup(f, standaloneRS.RS)...)
}

for _, standaloneDaemonSet := range standaloneDaemonSets {
if !standaloneDaemonSet.DaemonSet.Found() {
continue
}

fmt.Fprintln(out)
printLines(out, indent, 0, describeDaemonSetInServiceGroup(f, standaloneDaemonSet)...)
}

monopods, err := filterBoringPods(standalonePods)
if err != nil {
return err
Expand Down Expand Up @@ -586,6 +611,9 @@ func (f namespacedFormatter) ResourceName(obj interface{}) string {
case *kubegraph.PersistentVolumeClaimNode:
return namespaceNameWithType("pvc", t.PersistentVolumeClaim.Name, t.PersistentVolumeClaim.Namespace, f.currentNamespace, f.hideNamespace)

case *kubegraph.DaemonSetNode:
return namespaceNameWithType("daemonset", t.DaemonSet.Name, t.DaemonSet.Namespace, f.currentNamespace, f.hideNamespace)

case *imagegraph.ImageStreamNode:
return namespaceNameWithType("is", t.ImageStream.Name, t.ImageStream.Namespace, f.currentNamespace, f.hideNamespace)
case *imagegraph.ImageStreamTagNode:
Expand Down Expand Up @@ -723,6 +751,42 @@ func describeStatefulSetInServiceGroup(f formatter, node graphview.StatefulSet)
return lines
}

func describeDaemonSetInServiceGroup(f formatter, node graphview.DaemonSet) []string {
local := namespacedFormatter{currentNamespace: node.DaemonSet.DaemonSet.Namespace}
includeLastPass := false

if len(node.Images) == 1 {
format := "%s manages %s %s"
lines := []string{fmt.Sprintf(format, f.ResourceName(node.DaemonSet), describeImageInPipeline(local, node.Images[0], node.DaemonSet.DaemonSet.Namespace), "")}
if len(lines[0]) > 120 && strings.Contains(lines[0], " <- ") {
segments := strings.SplitN(lines[0], " <- ", 2)
lines[0] = segments[0] + " <-"
lines = append(lines, segments[1])
}

lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(node.Images[0].Build, node.Images[0].LastSuccessfulBuild, node.Images[0].LastUnsuccessfulBuild, node.Images[0].ActiveBuilds, node.Images[0].DestinationResolved, includeLastPass)...)...)
lines = append(lines, describeDaemonSetStatus(node.DaemonSet.DaemonSet))
return lines
}

images := []string{}
for _, container := range node.DaemonSet.DaemonSet.Spec.Template.Spec.Containers {
images = append(images, container.Image)
}
imagesWithoutTriggers := ""
if len(node.Images) == 0 {
imagesWithoutTriggers = strings.Join(images, ",")
}
format := "%s manages %s"
lines := []string{fmt.Sprintf(format, f.ResourceName(node.DaemonSet), imagesWithoutTriggers)}
for _, image := range node.Images {
lines = append(lines, describeImageInPipeline(local, image, node.DaemonSet.DaemonSet.Namespace))
lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(image.Build, image.LastSuccessfulBuild, image.LastUnsuccessfulBuild, image.ActiveBuilds, image.DestinationResolved, includeLastPass)...)...)
}
lines = append(lines, describeDaemonSetStatus(node.DaemonSet.DaemonSet))
return lines
}

func describeRCInServiceGroup(f formatter, rcNode *kubegraph.ReplicationControllerNode) []string {
if rcNode.ReplicationController.Spec.Template == nil {
return []string{}
Expand Down Expand Up @@ -1282,6 +1346,12 @@ func describeStatefulSetStatus(p *kapps.StatefulSet) string {
return fmt.Sprintf("created %s ago%s", timeAt, describePodSummaryInline(int32(p.Status.Replicas), int32(p.Status.Replicas), int32(p.Spec.Replicas), false, 0))
}

func describeDaemonSetStatus(ds *kapisext.DaemonSet) string {
timeAt := strings.ToLower(formatRelativeTime(ds.CreationTimestamp.Time))
replicaSetRevision := ds.Generation
return fmt.Sprintf("generation #%d running for %s%s", replicaSetRevision, timeAt, describePodSummaryInline(ds.Status.NumberReady, ds.Status.NumberAvailable, ds.Status.DesiredNumberScheduled, false, 0))
}

func describeRCStatus(rc *kapi.ReplicationController) string {
timeAt := strings.ToLower(formatRelativeTime(rc.CreationTimestamp.Time))
return fmt.Sprintf("rc/%s created %s ago%s", rc.Name, timeAt, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, rc.Spec.Replicas, false, 0))
Expand Down Expand Up @@ -1590,6 +1660,30 @@ func (l *deploymentLoader) AddToGraph(g osgraph.Graph) error {
return nil
}

type daemonsetLoader struct {
namespace string
lister kapisextclient.DaemonSetsGetter
items []kapisext.DaemonSet
}

func (l *daemonsetLoader) Load() error {
list, err := l.lister.DaemonSets(l.namespace).List(metav1.ListOptions{})
if err != nil {
return err
}

l.items = list.Items
return nil
}

func (l *daemonsetLoader) AddToGraph(g osgraph.Graph) error {
for i := range l.items {
kubegraph.EnsureDaemonSetNode(g, &l.items[i])
}

return nil
}

type replicasetLoader struct {
namespace string
lister kapisextclient.ReplicaSetsGetter
Expand Down
26 changes: 26 additions & 0 deletions pkg/oc/cli/describe/projectstatus_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,32 @@ func TestProjectStatus(t *testing.T) {
},
Time: mustParseTime("2016-04-07T04:12:25Z"),
},
"standalone daemonset": {
File: "rollingupdate-daemonset.yaml",
Extra: []runtime.Object{
&projectapi.Project{
ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
},
},
ErrFn: func(err error) bool { return err == nil },
Contains: []string{
"daemonset/bind manages gcr.io/google-containers/pause:2.0",
"generation #0 running for about a minute",
},
Time: mustParseTime("2016-04-07T04:12:25Z"),
},
"hpa non-missing scaleref": {
File: "hpa-with-scale-ref.yaml",
Extra: []runtime.Object{
&projectapi.Project{
ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
},
},
ErrFn: func(err error) bool { return err == nil },
Contains: []string{
"deployment/ruby-deploy deploys istag/ruby-deploy:latest",
},
},
}
oldTimeFn := timeNowFn
defer func() { timeNowFn = oldTimeFn }()
Expand Down
66 changes: 66 additions & 0 deletions pkg/oc/graph/genericgraph/graphview/daemonset.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
package graphview

import (
appsedges "github.com/openshift/origin/pkg/oc/graph/appsgraph"
osgraph "github.com/openshift/origin/pkg/oc/graph/genericgraph"
kubeedges "github.com/openshift/origin/pkg/oc/graph/kubegraph"
kubegraph "github.com/openshift/origin/pkg/oc/graph/kubegraph/nodes"
)

type DaemonSet struct {
DaemonSet *kubegraph.DaemonSetNode

OwnedPods []*kubegraph.PodNode
CreatedPods []*kubegraph.PodNode

Images []ImagePipeline
}

// AllDaemonSets returns all the DaemonSets that aren't in the excludes set and the set of covered NodeIDs
func AllDaemonSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]DaemonSet, IntSet) {
covered := IntSet{}
views := []DaemonSet{}

for _, uncastNode := range g.NodesByKind(kubegraph.DaemonSetNodeKind) {
if excludeNodeIDs.Has(uncastNode.ID()) {
continue
}

view, covers := NewDaemonSet(g, uncastNode.(*kubegraph.DaemonSetNode))
covered.Insert(covers.List()...)
views = append(views, view)
}

return views, covered
}

// NewDaemonSet returns the DaemonSet and a set of all the NodeIDs covered by the DaemonSet
func NewDaemonSet(g osgraph.Graph, node *kubegraph.DaemonSetNode) (DaemonSet, IntSet) {
covered := IntSet{}
covered.Insert(node.ID())

view := DaemonSet{}
view.DaemonSet = node

for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.ManagedByControllerEdgeKind) {
podNode := uncastPodNode.(*kubegraph.PodNode)
covered.Insert(podNode.ID())
view.OwnedPods = append(view.OwnedPods, podNode)
}

for _, istNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.TriggersDeploymentEdgeKind) {
imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, istNode, istNode.(ImageTagLocation))
covered.Insert(covers.List()...)
view.Images = append(view.Images, imagePipeline)
}

// for image that we use, create an image pipeline and add it to the list
for _, tagNode := range g.PredecessorNodesByEdgeKind(node, appsedges.UsedInDeploymentEdgeKind) {
imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, tagNode, tagNode.(ImageTagLocation))

covered.Insert(covers.List()...)
view.Images = append(view.Images, imagePipeline)
}

return view, covered
}
83 changes: 83 additions & 0 deletions pkg/oc/graph/genericgraph/test/hpa-with-scale-ref.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
apiVersion: v1
items:
- apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
image.openshift.io/triggers: '[{"from":{"kind":"ImageStreamTag","name":"ruby-deploy:latest"},"fieldPath":"spec.template.spec.containers[?(@.name==\"ruby-deploy\")].image"}]'
openshift.io/generated-by: OpenShiftNewApp
generation: 1
labels:
app: ruby-deploy
name: ruby-deploy
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: ruby-deploy
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: ruby-deploy
spec:
containers:
- image: busybox@sha256:4cee1979ba0bf7db9fc5d28fb7b798ca69ae95a47c5fecf46327720df4ff352d
imagePullPolicy: IfNotPresent
name: ruby-deploy
ports:
- containerPort: 8080
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
conditions:
- lastTransitionTime: 2018-02-24T00:51:47Z
lastUpdateTime: 2018-02-24T00:51:47Z
message: Deployment does not have minimum availability.
reason: MinimumReplicasUnavailable
status: "False"
type: Available
- lastTransitionTime: 2018-02-24T00:51:47Z
lastUpdateTime: 2018-02-24T00:51:47Z
message: ReplicaSet "ruby-deploy-599994c49b" is progressing.
reason: ReplicaSetUpdated
status: "True"
type: Progressing
observedGeneration: 1
replicas: 1
unavailableReplicas: 1
updatedReplicas: 1
- apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: ruby-deploy
spec:
maxReplicas: 1
minReplicas: 1
scaleTargetRef:
apiVersion: extensions/v1beta1
kind: Deployment
name: ruby-deploy
targetCPUUtilizationPercentage: 80
status:
currentReplicas: 0
desiredReplicas: 0
kind: List
metadata:
resourceVersion: ""
selfLink: ""
28 changes: 28 additions & 0 deletions pkg/oc/graph/genericgraph/test/rollingupdate-daemonset.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: bind
creationTimestamp: 2016-04-07T04:11:25Z
spec:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 10%
template:
metadata:
labels:
service: bind
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "service"
operator: "In"
values: ["bind"]
topologyKey: "kubernetes.io/hostname"
namespaces: []
containers:
- name: kubernetes-pause
image: gcr.io/google-containers/pause:2.0
4 changes: 4 additions & 0 deletions pkg/oc/graph/kubegraph/edges.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,8 @@ func AddAllManagedByControllerPodEdges(g osgraph.MutableUniqueGraph) {
case *kubegraph.StatefulSetNode:
// TODO: refactor to handle expanded selectors (along with ReplicaSets and Deployments)
AddManagedByControllerPodEdges(g, cast, cast.StatefulSet.Namespace, cast.StatefulSet.Spec.Selector.MatchLabels)
case *kubegraph.DaemonSetNode:
AddManagedByControllerPodEdges(g, cast, cast.DaemonSet.Namespace, cast.DaemonSet.Spec.Selector.MatchLabels)
}
}
}
Expand Down Expand Up @@ -258,6 +260,8 @@ func AddHPAScaleRefEdges(g osgraph.Graph) {
syntheticNode = appsgraph.FindOrCreateSyntheticDeploymentConfigNode(g, &appsapi.DeploymentConfig{ObjectMeta: syntheticMeta})
case r == extensions.Resource("deployments"):
syntheticNode = kubegraph.FindOrCreateSyntheticDeploymentNode(g, &extensions.Deployment{ObjectMeta: syntheticMeta})
case r == extensions.Resource("replicasets"):
syntheticNode = kubegraph.FindOrCreateSyntheticReplicaSetNode(g, &extensions.ReplicaSet{ObjectMeta: syntheticMeta})
default:
continue
}
Expand Down
Loading

0 comments on commit 1e29a56

Please sign in to comment.