-
Notifications
You must be signed in to change notification settings - Fork 4.7k
/
Copy pathservice.go
85 lines (71 loc) · 2.96 KB
/
service.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
package builds
import (
"context"
"fmt"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
kapierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kdeployutil "k8s.io/kubernetes/test/e2e/framework/deployment"
admissionapi "k8s.io/pod-security-admission/api"
exutil "github.com/openshift/origin/test/extended/util"
"github.com/openshift/origin/test/extended/util/image"
)
var _ = g.Describe("[sig-builds][Feature:Builds] build can reference a cluster service", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIWithPodSecurityLevel("build-service", admissionapi.LevelBaseline)
testDockerfile = fmt.Sprintf(`
FROM %s
RUN cat /etc/resolv.conf
RUN curl -vvv hello-nodejs:8080
`, image.ShellImage())
)
g.Context("", func() {
g.BeforeEach(func() {
exutil.PreTestDump()
})
g.AfterEach(func() {
if g.CurrentSpecReport().Failed() {
exutil.DumpPodStates(oc)
exutil.DumpConfigMapStates(oc)
exutil.DumpPodLogsStartingWith("", oc)
}
})
g.Describe("with a build being created from new-build", func() {
g.It("should be able to run a build that references a cluster service [apigroup:build.openshift.io]", func() {
g.By("standing up a new hello world nodejs service via oc new-app")
err := oc.Run("new-app").Args("registry.redhat.io/ubi8/nodejs-16:latest~https://github.com/sclorg/nodejs-ex.git", "--name", "hello-nodejs").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), "hello-nodejs-1", nil, nil, nil)
if err != nil {
exutil.DumpBuildLogs("hello-nodejs", oc)
}
o.Expect(err).NotTo(o.HaveOccurred())
deploy, derr := oc.KubeClient().AppsV1().Deployments(oc.Namespace()).Get(context.Background(), "hello-nodejs", metav1.GetOptions{})
if kapierrs.IsNotFound(derr) {
// if deployment is not there we're working with old new-app producing deployment configs
err := exutil.WaitForDeploymentConfig(oc.KubeClient(), oc.AppsClient().AppsV1(), oc.Namespace(), "hello-nodejs", 1, true, oc)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
// if present - wait for deployment
o.Expect(derr).NotTo(o.HaveOccurred())
err := kdeployutil.WaitForDeploymentComplete(oc.KubeClient(), deploy)
o.Expect(err).NotTo(o.HaveOccurred())
}
err = exutil.WaitForEndpoint(oc.KubeFramework().ClientSet, oc.Namespace(), "hello-nodejs")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("calling oc new-build with a Dockerfile")
err = oc.Run("new-build").Args("-D", "-", "--to", "test:latest").InputString(testDockerfile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("expecting the build is in Complete phase")
err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), "test-1", nil, nil, nil)
//debug for failures
if err != nil {
exutil.DumpBuildLogs("test", oc)
}
o.Expect(err).NotTo(o.HaveOccurred())
})
})
})
})