Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OCPNODE-2596: Add SigstoreImageVerification e2e tests #29530

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
238 changes: 238 additions & 0 deletions test/extended/imagepolicy/imagepolicy.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,238 @@
package imagepolicy

import (
"context"
"fmt"
"path/filepath"
"time"

g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
machineconfighelper "github.com/openshift/origin/test/extended/machine_config"
exutil "github.com/openshift/origin/test/extended/util"
kapiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
admissionapi "k8s.io/pod-security-admission/api"
)

const (
testReleaseImageScope = "quay.io/openshift-release-dev/ocp-release@sha256:fbad931c725b2e5b937b295b58345334322bdabb0b67da1c800a53686d7397da"
testReferenceImageScope = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4db234f37ae6712e2f7ed8d13f7fb49971c173d0e4f74613d0121672fa2e01f5"
registriesWorkerPoolMachineConfig = "99-worker-generated-registries"
registriesMasterPoolMachineConfig = "99-master-generated-registries"
testPodName = "signature-validation-test-pod"
workerPool = "worker"
masterPool = "master"
SignatureValidationFaildReason = "SignatureValidationFailed"
)

var _ = g.Describe("[sig-imagepolicy][OCPFeatureGate:SigstoreImageVerification][Serial]", g.Ordered, func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIWithoutNamespace("cluster-image-policy")
tctx = context.Background()
cli = exutil.NewCLIWithPodSecurityLevel("verifysigstore-e2e", admissionapi.LevelBaseline)
clif = cli.KubeFramework()
imgpolicyCli = exutil.NewCLIWithPodSecurityLevel("verifysigstore-imagepolicy-e2e", admissionapi.LevelBaseline)
imgpolicyClif = imgpolicyCli.KubeFramework()
imagePolicyBaseDir = exutil.FixturePath("testdata", "imagepolicy")
invalidPublicKeyClusterImagePolicyFixture = filepath.Join(imagePolicyBaseDir, "invalid-public-key-cluster-image-policy.yaml")
publiKeyRekorClusterImagePolicyFixture = filepath.Join(imagePolicyBaseDir, "public-key-rekor-cluster-image-policy.yaml")
invalidPublicKeyImagePolicyFixture = filepath.Join(imagePolicyBaseDir, "invalid-public-key-image-policy.yaml")
publiKeyRekorImagePolicyFixture = filepath.Join(imagePolicyBaseDir, "public-key-rekor-image-policy.yaml")
)

g.BeforeAll(func() {
if !exutil.IsTechPreviewNoUpgrade(tctx, oc.AdminConfigClient()) {
g.Skip("skipping, this feature is only supported on TechPreviewNoUpgrade clusters")
}

outStr, err := oc.Run("adm", "release", "info", testReleaseImageScope).Args("-o=go-template", "--template={{.digest}}").Output()
if err != nil || outStr == "" {
o.Expect(err).ToNot(o.HaveOccurred())
e2eskipper.Skipf("can't validate %s release image for testing, consider updating the test", testReleaseImageScope)
}
})

g.It("Should fail clusterimagepolicy signature validation root of trust does not match the identity in the signature", func() {
createClusterImagePolicy(oc, invalidPublicKeyClusterImagePolicyFixture)
g.DeferCleanup(deleteClusterImagePolicy, oc, invalidPublicKeyClusterImagePolicyFixture)

pod, err := launchTestPod(tctx, clif, testPodName, testReleaseImageScope)
o.Expect(err).NotTo(o.HaveOccurred())
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)

err = waitForTestPodContainerToFailSignatureValidation(tctx, clif, pod)
o.Expect(err).NotTo(o.HaveOccurred())
})

g.It("Should fail clusterimagepolicy signature validation when scope in allowedRegistries list does not skip signature verification", func() {
// Ensure allowedRegistries do not skip signature verification by adding testReleaseImageScope to the list
allowedRegistries := []string{"quay.io", "registry.redhat.io", "image-registry.openshift-image-registry.svc:5000", testReleaseImageScope}
updateImageConfig(oc, allowedRegistries)
g.DeferCleanup(cleanupImageConfig, oc)

createClusterImagePolicy(oc, invalidPublicKeyClusterImagePolicyFixture)
g.DeferCleanup(deleteClusterImagePolicy, oc, invalidPublicKeyClusterImagePolicyFixture)

pod, err := launchTestPod(tctx, clif, testPodName, testReleaseImageScope)
o.Expect(err).NotTo(o.HaveOccurred())
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)

err = waitForTestPodContainerToFailSignatureValidation(tctx, clif, pod)
o.Expect(err).NotTo(o.HaveOccurred())
})

g.It("Should pass clusterimagepolicy signature validation with signed image", func() {
createClusterImagePolicy(oc, publiKeyRekorClusterImagePolicyFixture)
g.DeferCleanup(deleteClusterImagePolicy, oc, publiKeyRekorClusterImagePolicyFixture)

pod, err := launchTestPod(tctx, clif, testPodName, testReleaseImageScope)
o.Expect(err).NotTo(o.HaveOccurred())
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)

err = e2epod.WaitForPodSuccessInNamespace(tctx, clif.ClientSet, pod.Name, pod.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
})

g.It("Should fail imagepolicy signature validation in different namespaces root of trust does not match the identity in the signature", func() {
createImagePolicy(oc, invalidPublicKeyImagePolicyFixture, imgpolicyClif.Namespace.Name)
g.DeferCleanup(deleteImagePolicy, oc, invalidPublicKeyImagePolicyFixture, imgpolicyClif.Namespace.Name)

createImagePolicy(oc, invalidPublicKeyImagePolicyFixture, clif.Namespace.Name)
g.DeferCleanup(deleteImagePolicy, oc, invalidPublicKeyImagePolicyFixture, clif.Namespace.Name)

pod, err := launchTestPod(tctx, imgpolicyClif, testPodName, testReferenceImageScope)
o.Expect(err).NotTo(o.HaveOccurred())
g.DeferCleanup(deleteTestPod, tctx, imgpolicyClif, testPodName)

err = waitForTestPodContainerToFailSignatureValidation(tctx, imgpolicyClif, pod)
o.Expect(err).NotTo(o.HaveOccurred())

pod, err = launchTestPod(tctx, clif, testPodName, testReferenceImageScope)
o.Expect(err).NotTo(o.HaveOccurred())
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)

err = waitForTestPodContainerToFailSignatureValidation(tctx, clif, pod)
o.Expect(err).NotTo(o.HaveOccurred())
})

g.It("Should pass imagepolicy signature validation with signed image in namespaces", func() {
createImagePolicy(oc, publiKeyRekorImagePolicyFixture, clif.Namespace.Name)
g.DeferCleanup(deleteImagePolicy, oc, publiKeyRekorImagePolicyFixture, clif.Namespace.Name)

createImagePolicy(oc, publiKeyRekorImagePolicyFixture, imgpolicyClif.Namespace.Name)
g.DeferCleanup(deleteImagePolicy, oc, publiKeyRekorImagePolicyFixture, imgpolicyClif.Namespace.Name)

pod, err := launchTestPod(tctx, clif, testPodName, testReferenceImageScope)
o.Expect(err).NotTo(o.HaveOccurred())
g.DeferCleanup(deleteTestPod, tctx, clif, testPodName)

err = e2epod.WaitForPodSuccessInNamespace(tctx, clif.ClientSet, pod.Name, pod.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())

pod, err = launchTestPod(tctx, imgpolicyClif, testPodName, testReferenceImageScope)
o.Expect(err).NotTo(o.HaveOccurred())
g.DeferCleanup(deleteTestPod, tctx, imgpolicyClif, testPodName)

err = e2epod.WaitForPodSuccessInNamespace(tctx, imgpolicyClif.ClientSet, pod.Name, pod.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
})
})

func createClusterImagePolicy(oc *exutil.CLI, fixture string) {
err := oc.Run("create").Args("-f", fixture).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
time.Sleep(10 * time.Second)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For what do we need the sleep here and below?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This sleep waits for the MachineConfigPool status to change to Updating. There is a delay in the status change after the CR is created.

Copy link
Contributor

@djoshy djoshy Feb 13, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's interesting! we don't need this delay in the MCO e2es, but perhaps the oc library is faster/slower for some reason.

machineconfighelper.WaitForConfigAndPoolComplete(oc, workerPool, registriesWorkerPoolMachineConfig)
machineconfighelper.WaitForConfigAndPoolComplete(oc, masterPool, registriesMasterPoolMachineConfig)
}

func deleteClusterImagePolicy(oc *exutil.CLI, fixture string) error {
return oc.Run("delete").Args("-f", fixture).Execute()
}

func createImagePolicy(oc *exutil.CLI, fixture string, namespace string) {
err := oc.Run("create").Args("-f", fixture, "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
time.Sleep(10 * time.Second)
machineconfighelper.WaitForConfigAndPoolComplete(oc, workerPool, registriesWorkerPoolMachineConfig)
machineconfighelper.WaitForConfigAndPoolComplete(oc, masterPool, registriesMasterPoolMachineConfig)
}

func deleteImagePolicy(oc *exutil.CLI, fixture string, namespace string) error {
return oc.Run("delete").Args("-f", fixture, "-n", namespace).Execute()
}

func updateImageConfig(oc *exutil.CLI, allowedRegistries []string) {
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
imageConfig, err := oc.AdminConfigClient().ConfigV1().Images().Get(
context.Background(), "cluster", metav1.GetOptions{},
)
if err != nil {
return err
}
imageConfig.Spec.RegistrySources.AllowedRegistries = allowedRegistries
_, err = oc.AdminConfigClient().ConfigV1().Images().Update(
context.Background(), imageConfig, metav1.UpdateOptions{},
)
return err
})
o.Expect(err).NotTo(o.HaveOccurred(), "error updating image config")
time.Sleep(10 * time.Second)
machineconfighelper.WaitForConfigAndPoolComplete(oc, workerPool, registriesWorkerPoolMachineConfig)
machineconfighelper.WaitForConfigAndPoolComplete(oc, masterPool, registriesMasterPoolMachineConfig)
}

func cleanupImageConfig(oc *exutil.CLI) error {
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
imageConfig, err := oc.AdminConfigClient().ConfigV1().Images().Get(
context.Background(), "cluster", metav1.GetOptions{},
)
if err != nil {
return err
}
imageConfig.Spec.RegistrySources.AllowedRegistries = []string{}
_, err = oc.AdminConfigClient().ConfigV1().Images().Update(
context.Background(), imageConfig, metav1.UpdateOptions{},
)
return err
})
}

func launchTestPod(ctx context.Context, f *e2e.Framework, podName, image string) (*kapiv1.Pod, error) {
g.By(fmt.Sprintf("launching the pod: %s", podName))
contName := fmt.Sprintf("%s-container", podName)
pod := &kapiv1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: kapiv1.PodSpec{
Containers: []kapiv1.Container{
{
Name: contName,
Image: image,
ImagePullPolicy: kapiv1.PullAlways,
},
},
RestartPolicy: kapiv1.RestartPolicyNever,
},
}
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
return pod, err
}

func deleteTestPod(ctx context.Context, f *e2e.Framework, podName string) error {
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0))
}

func waitForTestPodContainerToFailSignatureValidation(ctx context.Context, f *e2e.Framework, pod *kapiv1.Pod) error {
return e2epod.WaitForPodContainerToFail(ctx, f.ClientSet, pod.Namespace, pod.Name, 0, SignatureValidationFaildReason, e2e.PodStartShortTimeout)
}
1 change: 1 addition & 0 deletions test/extended/include.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
_ "github.com/openshift/origin/test/extended/idling"
_ "github.com/openshift/origin/test/extended/image_ecosystem"
_ "github.com/openshift/origin/test/extended/imageapis"
_ "github.com/openshift/origin/test/extended/imagepolicy"
_ "github.com/openshift/origin/test/extended/images"
_ "github.com/openshift/origin/test/extended/images/trigger"
_ "github.com/openshift/origin/test/extended/kernel"
Expand Down
79 changes: 79 additions & 0 deletions test/extended/machine_config/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -292,3 +292,82 @@ func WaitForOneMasterNodeToBeReady(oc *exutil.CLI) error {
}, 5*time.Minute, 10*time.Second).Should(o.BeTrue())
return nil
}

// WaitForConfigAndPoolComplete is a helper function that gets a renderedConfig and waits for its pool to complete.
// The return value is the final rendered config.
func WaitForConfigAndPoolComplete(oc *exutil.CLI, pool, mcName string) string {
config, err := WaitForRenderedConfig(oc, pool, mcName)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v: failed to render machine config %s from pool %s", err, mcName, pool))

err = WaitForPoolComplete(oc, pool, config)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v: pool %s did not update to config %s", err, pool, config))
return config
}

// WaitForRenderedConfig polls a MachineConfigPool until it has
// included the given mcName in its config, and returns the new
// rendered config name.
func WaitForRenderedConfig(oc *exutil.CLI, pool, mcName string) (string, error) {
return WaitForRenderedConfigs(oc, pool, mcName)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe I'm not familiar with the logic here. Since the parameter "mcName" in func WaitForRenderedConfig is a string type, why do you realize a new func WaitForRenderedConfigs and treat this parameter as ...string type.
Of course you can keep them there, just be confusing.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I borrowed this function from MCO e2e test package, I guess there might be callers who directly use this method with multiple mcName.
I would also let folks from MCO review.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Got it, that's okay.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for catching this! I think it is something that was missed when WaitForRenderedConfigs was first added, we'll try to clean it up in the MCO repo.

}

// WaitForRenderedConfigs polls a MachineConfigPool until it has
// included the given mcNames in its config, and returns the new
// rendered config name.
func WaitForRenderedConfigs(oc *exutil.CLI, pool string, mcNames ...string) (string, error) {
var renderedConfig string
machineConfigClient, err := machineconfigclient.NewForConfig(oc.KubeFramework().ClientConfig())
o.Expect(err).NotTo(o.HaveOccurred())
found := make(map[string]bool)
o.Eventually(func() bool {
// Set up the list
for _, name := range mcNames {
found[name] = false
}

// Update found based on the MCP
mcp, err := machineConfigClient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pool, metav1.GetOptions{})
if err != nil {
return false
}
for _, mc := range mcp.Spec.Configuration.Source {
if _, ok := found[mc.Name]; ok {
found[mc.Name] = true
}
}

// If any are still false, then they weren't included in the MCP
for _, nameFound := range found {
if !nameFound {
return false
}
}

// All the required names were found
renderedConfig = mcp.Spec.Configuration.Name
return true
}, 5*time.Minute, 10*time.Second).Should(o.BeTrue())
return renderedConfig, nil
}

// WaitForPoolComplete polls a pool until it has completed an update to target
func WaitForPoolComplete(oc *exutil.CLI, pool, target string) error {
machineConfigClient, err := machineconfigclient.NewForConfig(oc.KubeFramework().ClientConfig())
o.Expect(err).NotTo(o.HaveOccurred())
framework.Logf("Waiting for pool %s to complete %s", pool, target)
o.Eventually(func() bool {
mcp, err := machineConfigClient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pool, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to grab machineconfigpools, error :%v", err)
return false
}
if mcp.Status.Configuration.Name != target {
return false
}
if IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolUpdated) {
return true
}
return false
}, 20*time.Minute, 10*time.Second).Should(o.BeTrue())
return nil
}
Loading