diff --git a/contrib/completions/bash/oc b/contrib/completions/bash/oc index 71a8edf2b139..5c2e8f55e032 100644 --- a/contrib/completions/bash/oc +++ b/contrib/completions/bash/oc @@ -11823,6 +11823,59 @@ _oc_image_append() noun_aliases=() } +_oc_image_extract() +{ + last_command="oc_image_extract" + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--dry-run") + local_nonpersistent_flags+=("--dry-run") + flags+=("--filter-by-os=") + local_nonpersistent_flags+=("--filter-by-os=") + flags+=("--insecure") + local_nonpersistent_flags+=("--insecure") + flags+=("--max-per-registry=") + local_nonpersistent_flags+=("--max-per-registry=") + flags+=("--only-files") + local_nonpersistent_flags+=("--only-files") + flags+=("--path=") + local_nonpersistent_flags+=("--path=") + flags+=("--as=") + flags+=("--as-group=") + flags+=("--cache-dir=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--config=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--loglevel=") + flags+=("--logspec=") + flags+=("--match-server-version") + flags+=("--namespace=") + flags_with_completion+=("--namespace") + flags_completion+=("__oc_get_namespaces") + two_word_flags+=("-n") + flags_with_completion+=("-n") + flags_completion+=("__oc_get_namespaces") + flags+=("--request-timeout=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--token=") + flags+=("--user=") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + _oc_image_mirror() { last_command="oc_image_mirror" @@ -11890,6 +11943,7 @@ _oc_image() last_command="oc_image" commands=() commands+=("append") + commands+=("extract") commands+=("mirror") flags=() diff --git a/contrib/completions/zsh/oc b/contrib/completions/zsh/oc index 26c16b8bc396..e0d64003d4fc 100644 --- a/contrib/completions/zsh/oc +++ b/contrib/completions/zsh/oc @@ -11965,6 +11965,59 @@ _oc_image_append() noun_aliases=() } +_oc_image_extract() +{ + last_command="oc_image_extract" + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--dry-run") + local_nonpersistent_flags+=("--dry-run") + flags+=("--filter-by-os=") + local_nonpersistent_flags+=("--filter-by-os=") + flags+=("--insecure") + local_nonpersistent_flags+=("--insecure") + flags+=("--max-per-registry=") + local_nonpersistent_flags+=("--max-per-registry=") + flags+=("--only-files") + local_nonpersistent_flags+=("--only-files") + flags+=("--path=") + local_nonpersistent_flags+=("--path=") + flags+=("--as=") + flags+=("--as-group=") + flags+=("--cache-dir=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--config=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--loglevel=") + flags+=("--logspec=") + flags+=("--match-server-version") + flags+=("--namespace=") + flags_with_completion+=("--namespace") + flags_completion+=("__oc_get_namespaces") + two_word_flags+=("-n") + flags_with_completion+=("-n") + flags_completion+=("__oc_get_namespaces") + flags+=("--request-timeout=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--token=") + flags+=("--user=") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + _oc_image_mirror() { last_command="oc_image_mirror" @@ -12032,6 +12085,7 @@ _oc_image() last_command="oc_image" commands=() commands+=("append") + commands+=("extract") commands+=("mirror") flags=() diff --git a/docs/man/man1/.files_generated_oc b/docs/man/man1/.files_generated_oc index 9f8fc5a3d9af..665aa26d122a 100644 --- a/docs/man/man1/.files_generated_oc +++ b/docs/man/man1/.files_generated_oc @@ -230,6 +230,7 @@ oc-extract.1 oc-get.1 oc-idle.1 oc-image-append.1 +oc-image-extract.1 oc-image-mirror.1 oc-image.1 oc-import-app.json.1 diff --git a/docs/man/man1/oc-image-extract.1 b/docs/man/man1/oc-image-extract.1 new file mode 100644 index 000000000000..b6fd7a0f9896 --- /dev/null +++ b/docs/man/man1/oc-image-extract.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/pkg/oc/cli/image/append/append.go b/pkg/oc/cli/image/append/append.go index 0988b2ebc979..0ecd92d4f692 100644 --- a/pkg/oc/cli/image/append/append.go +++ b/pkg/oc/cli/image/append/append.go @@ -9,8 +9,6 @@ import ( "io/ioutil" "net/http" "os" - "regexp" - "runtime" "strconv" "time" @@ -20,8 +18,6 @@ import ( "github.com/docker/distribution" distributioncontext "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client" @@ -38,6 +34,8 @@ import ( "github.com/openshift/origin/pkg/image/dockerlayer/add" "github.com/openshift/origin/pkg/image/registryclient" "github.com/openshift/origin/pkg/image/registryclient/dockercredentials" + imagemanifest "github.com/openshift/origin/pkg/oc/cli/image/manifest" + "github.com/openshift/origin/pkg/oc/cli/image/workqueue" ) var ( @@ -86,10 +84,7 @@ type AppendImageOptions struct { DropHistory bool CreatedAt string - OSFilter *regexp.Regexp - DefaultOSFilter bool - - FilterByOS string + FilterOptions imagemanifest.FilterOptions MaxPerRegistry int @@ -100,12 +95,6 @@ type AppendImageOptions struct { genericclioptions.IOStreams } -// schema2ManifestOnly specifically requests a manifest list first -var schema2ManifestOnly = distribution.WithManifestMediaTypes([]string{ - manifestlist.MediaTypeManifestList, - schema2.MediaTypeManifest, -}) - func NewAppendImageOptions(streams genericclioptions.IOStreams) *AppendImageOptions { return &AppendImageOptions{ IOStreams: streams, @@ -129,9 +118,10 @@ func NewCmdAppendImage(name string, streams genericclioptions.IOStreams) *cobra. } flag := cmd.Flags() + o.FilterOptions.Bind(flag) + flag.BoolVar(&o.DryRun, "dry-run", o.DryRun, "Print the actions that would be taken and exit without writing to the destination.") flag.BoolVar(&o.Insecure, "insecure", o.Insecure, "Allow push and pull operations to registries to be made over HTTP") - flag.StringVar(&o.FilterByOS, "filter-by-os", o.FilterByOS, "A regular expression to control which images are mirrored. Images will be passed as '/[/]'.") flag.StringVar(&o.From, "from", o.From, "The image to use as a base. If empty, a new scratch image is created.") flag.StringVar(&o.To, "to", o.To, "The Docker repository tag to upload the appended image to.") @@ -148,17 +138,8 @@ func NewCmdAppendImage(name string, streams genericclioptions.IOStreams) *cobra. } func (o *AppendImageOptions) Complete(cmd *cobra.Command, args []string) error { - pattern := o.FilterByOS - if len(pattern) == 0 && !cmd.Flags().Changed("filter-by-os") { - o.DefaultOSFilter = true - pattern = regexp.QuoteMeta(fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)) - } - if len(pattern) > 0 { - re, err := regexp.Compile(pattern) - if err != nil { - return fmt.Errorf("--filter-by-os was not a valid regular expression: %v", err) - } - o.OSFilter = re + if err := o.FilterOptions.Complete(cmd.Flags()); err != nil { + return err } for _, arg := range args { @@ -175,20 +156,6 @@ func (o *AppendImageOptions) Complete(cmd *cobra.Command, args []string) error { return nil } -// includeDescriptor returns true if the provided manifest should be included. -func (o *AppendImageOptions) includeDescriptor(d *manifestlist.ManifestDescriptor, hasMultiple bool) bool { - if o.OSFilter == nil { - return true - } - if o.DefaultOSFilter && !hasMultiple { - return true - } - if len(d.Platform.Variant) > 0 { - return o.OSFilter.MatchString(fmt.Sprintf("%s/%s/%s", d.Platform.OS, d.Platform.Architecture, d.Platform.Variant)) - } - return o.OSFilter.MatchString(fmt.Sprintf("%s/%s", d.Platform.OS, d.Platform.Architecture)) -} - func (o *AppendImageOptions) Run() error { var createdAt *time.Time if len(o.CreatedAt) > 0 { @@ -256,97 +223,16 @@ func (o *AppendImageOptions) Run() error { return err } fromRepo = repo - var srcDigest digest.Digest - if len(from.Tag) > 0 { - desc, err := repo.Tags(ctx).Get(ctx, from.Tag) - if err != nil { - return err - } - srcDigest = desc.Digest - } else { - srcDigest = digest.Digest(from.ID) - } - manifests, err := repo.Manifests(ctx) - if err != nil { - return err - } - srcManifest, err := manifests.Get(ctx, srcDigest, schema2ManifestOnly) - if err != nil { - return err - } - originalSrcDigest := srcDigest - srcManifests, srcManifest, srcDigest, err := processManifestList(ctx, srcDigest, srcManifest, manifests, *from, o.includeDescriptor) + srcManifest, _, location, err := imagemanifest.FirstManifest(ctx, *from, repo, o.FilterOptions.Include) if err != nil { - return err + return fmt.Errorf("unable to read image %s: %v", from, err) } - if len(srcManifests) == 0 { - return fmt.Errorf("filtered all images from %s", from) - } - - var location string - if srcDigest == originalSrcDigest { - location = fmt.Sprintf("manifest %s", srcDigest) - } else { - location = fmt.Sprintf("manifest %s in manifest list %s", srcDigest, originalSrcDigest) + base, layers, err = imagemanifest.ManifestToImageConfig(ctx, srcManifest, repo.Blobs(ctx), location) + if err != nil { + return fmt.Errorf("unable to parse image %s: %v", from, err) } - switch t := srcManifest.(type) { - case *schema2.DeserializedManifest: - if t.Config.MediaType != schema2.MediaTypeImageConfig { - return fmt.Errorf("unable to append layers to images with config %s from %s", t.Config.MediaType, location) - } - configJSON, err := repo.Blobs(ctx).Get(ctx, t.Config.Digest) - if err != nil { - return fmt.Errorf("unable to find manifest for image %s: %v", *from, err) - } - glog.V(4).Infof("Raw image config json:\n%s", string(configJSON)) - config := &docker10.DockerImageConfig{} - if err := json.Unmarshal(configJSON, &config); err != nil { - return fmt.Errorf("the source image manifest could not be parsed: %v", err) - } - - base = config - layers = t.Layers - base.Size = 0 - for _, layer := range t.Layers { - base.Size += layer.Size - } - - case *schema1.SignedManifest: - if glog.V(4) { - _, configJSON, _ := srcManifest.Payload() - glog.Infof("Raw image config json:\n%s", string(configJSON)) - } - if len(t.History) == 0 { - return fmt.Errorf("input image is in an unknown format: no v1Compatibility history") - } - config := &docker10.DockerV1CompatibilityImage{} - if err := json.Unmarshal([]byte(t.History[0].V1Compatibility), &config); err != nil { - return err - } - - base = &docker10.DockerImageConfig{} - if err := docker10.Convert_DockerV1CompatibilityImage_to_DockerImageConfig(config, base); err != nil { - return err - } - - // schema1 layers are in reverse order - layers = make([]distribution.Descriptor, 0, len(t.FSLayers)) - for i := len(t.FSLayers) - 1; i >= 0; i-- { - layer := distribution.Descriptor{ - MediaType: schema2.MediaTypeLayer, - Digest: t.FSLayers[i].BlobSum, - // size must be reconstructed from the blobs - } - // we must reconstruct the tar sum from the blobs - add.AddLayerToConfig(base, layer, "") - layers = append(layers, layer) - } - - default: - return fmt.Errorf("unable to append layers to images of type %T from %s", srcManifest, location) - } } else { base = add.NewEmptyConfig() layers = []distribution.Descriptor{add.AddScratchLayerToConfig(base)} @@ -445,8 +331,8 @@ func (o *AppendImageOptions) Run() error { // upload base layers in parallel stopCh := make(chan struct{}) defer close(stopCh) - q := newWorkQueue(o.MaxPerRegistry, stopCh) - err = q.Try(func(w Try) { + q := workqueue.New(o.MaxPerRegistry, stopCh) + err = q.Try(func(w workqueue.Try) { for i := range layers[:numLayers] { layer := &layers[i] index := i @@ -551,7 +437,7 @@ func (o *AppendImageOptions) Run() error { if err != nil { return fmt.Errorf("unable to upload the new image manifest: %v", err) } - toDigest, err := putManifestInCompatibleSchema(ctx, manifest, to.Tag, toManifests, fromRepo.Blobs(ctx), toRepo.Named()) + toDigest, err := imagemanifest.PutManifestInCompatibleSchema(ctx, manifest, to.Tag, toManifests, fromRepo.Blobs(ctx), toRepo.Named()) if err != nil { return fmt.Errorf("unable to convert the image to a compatible schema version: %v", err) } diff --git a/pkg/oc/cli/image/append/manifest.go b/pkg/oc/cli/image/append/manifest.go deleted file mode 100644 index 461f61193f72..000000000000 --- a/pkg/oc/cli/image/append/manifest.go +++ /dev/null @@ -1,178 +0,0 @@ -package append - -import ( - "context" - "fmt" - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - - "github.com/docker/libtrust" - "github.com/golang/glog" - digest "github.com/opencontainers/go-digest" - - imagereference "github.com/openshift/origin/pkg/image/apis/image/reference" -) - -func processManifestList(ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imagereference.DockerImageReference, filterFn func(*manifestlist.ManifestDescriptor, bool) bool) ([]distribution.Manifest, distribution.Manifest, digest.Digest, error) { - var srcManifests []distribution.Manifest - switch t := srcManifest.(type) { - case *manifestlist.DeserializedManifestList: - manifestDigest := srcDigest - manifestList := t - - filtered := make([]manifestlist.ManifestDescriptor, 0, len(t.Manifests)) - for _, manifest := range t.Manifests { - if !filterFn(&manifest, len(t.Manifests) > 1) { - glog.V(5).Infof("Skipping image for %#v from %s", manifest.Platform, ref) - continue - } - glog.V(5).Infof("Including image for %#v from %s", manifest.Platform, ref) - filtered = append(filtered, manifest) - } - - if len(filtered) == 0 { - return nil, nil, "", nil - } - - // if we're filtering the manifest list, update the source manifest and digest - if len(filtered) != len(t.Manifests) { - var err error - t, err = manifestlist.FromDescriptors(filtered) - if err != nil { - return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list: %v", ref, err) - } - _, body, err := t.Payload() - if err != nil { - return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list (bad payload): %v", ref, err) - } - manifestList = t - manifestDigest = srcDigest.Algorithm().FromBytes(body) - glog.V(5).Infof("Filtered manifest list to new digest %s:\n%s", manifestDigest, body) - } - - for i, manifest := range t.Manifests { - childManifest, err := manifests.Get(ctx, manifest.Digest, distribution.WithManifestMediaTypes([]string{manifestlist.MediaTypeManifestList, schema2.MediaTypeManifest})) - if err != nil { - return nil, nil, "", fmt.Errorf("unable to retrieve source image %s manifest #%d from manifest list: %v", ref, i+1, err) - } - srcManifests = append(srcManifests, childManifest) - } - - switch { - case len(srcManifests) == 1: - _, body, err := srcManifests[0].Payload() - if err != nil { - return nil, nil, "", fmt.Errorf("unable to convert source image %s manifest list to single manifest: %v", ref, err) - } - manifestDigest := srcDigest.Algorithm().FromBytes(body) - glog.V(5).Infof("Used only one manifest from the list %s", manifestDigest) - return srcManifests, srcManifests[0], manifestDigest, nil - default: - return append(srcManifests, manifestList), manifestList, manifestDigest, nil - } - - default: - return []distribution.Manifest{srcManifest}, srcManifest, srcDigest, nil - } -} - -// TDOO: remove when quay.io switches to v2 schema -func putManifestInCompatibleSchema( - ctx context.Context, - srcManifest distribution.Manifest, - tag string, - toManifests distribution.ManifestService, - // supports schema2 -> schema1 downconversion - blobs distribution.BlobService, - ref reference.Named, -) (digest.Digest, error) { - var options []distribution.ManifestServiceOption - if len(tag) > 0 { - glog.V(5).Infof("Put manifest %s:%s", ref, tag) - options = []distribution.ManifestServiceOption{distribution.WithTag(tag)} - } else { - glog.V(5).Infof("Put manifest %s", ref) - } - toDigest, err := toManifests.Put(ctx, srcManifest, options...) - if err == nil { - return toDigest, nil - } - errs, ok := err.(errcode.Errors) - if !ok || len(errs) == 0 { - return toDigest, err - } - errcode, ok := errs[0].(errcode.Error) - if !ok || errcode.ErrorCode() != v2.ErrorCodeManifestInvalid { - return toDigest, err - } - // try downconverting to v2-schema1 - schema2Manifest, ok := srcManifest.(*schema2.DeserializedManifest) - if !ok { - return toDigest, err - } - tagRef, tagErr := reference.WithTag(ref, tag) - if tagErr != nil { - return toDigest, err - } - glog.V(5).Infof("Registry reported invalid manifest error, attempting to convert to v2schema1 as ref %s", tagRef) - schema1Manifest, convertErr := convertToSchema1(ctx, blobs, schema2Manifest, tagRef) - if convertErr != nil { - return toDigest, err - } - if glog.V(6) { - _, data, _ := schema1Manifest.Payload() - glog.Infof("Converted to v2schema1\n%s", string(data)) - } - return toManifests.Put(ctx, schema1Manifest, distribution.WithTag(tag)) -} - -// TDOO: remove when quay.io switches to v2 schema -func convertToSchema1(ctx context.Context, blobs distribution.BlobService, schema2Manifest *schema2.DeserializedManifest, ref reference.Named) (distribution.Manifest, error) { - targetDescriptor := schema2Manifest.Target() - configJSON, err := blobs.Get(ctx, targetDescriptor.Digest) - if err != nil { - return nil, err - } - trustKey, err := loadPrivateKey() - if err != nil { - return nil, err - } - builder := schema1.NewConfigManifestBuilder(blobs, trustKey, ref, configJSON) - for _, d := range schema2Manifest.Layers { - if err := builder.AppendReference(d); err != nil { - return nil, err - } - } - manifest, err := builder.Build(ctx) - if err != nil { - return nil, err - } - return manifest, nil -} - -var ( - privateKeyLock sync.Mutex - privateKey libtrust.PrivateKey -) - -// TDOO: remove when quay.io switches to v2 schema -func loadPrivateKey() (libtrust.PrivateKey, error) { - privateKeyLock.Lock() - defer privateKeyLock.Unlock() - if privateKey != nil { - return privateKey, nil - } - trustKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, err - } - privateKey = trustKey - return privateKey, nil -} diff --git a/pkg/oc/cli/image/archive/archive.go b/pkg/oc/cli/image/archive/archive.go new file mode 100644 index 000000000000..2f7b8b06997e --- /dev/null +++ b/pkg/oc/cli/image/archive/archive.go @@ -0,0 +1,438 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + // REMOVED: use remap instead + //UIDMaps []idtools.IDMap + //GIDMaps []idtools.IDMap + ChownOpts *idtools.IDPair + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + + // ADDED: allow bypassing chown + // If false, no chown will be performed + Chown bool + + AlterHeaders AlterHeader + } +) + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type AlterHeader interface { + Alter(*tar.Header) (bool, error) +} + +type RemapIDs struct { + mappings *idtools.IDMappings +} + +func (r RemapIDs) Alter(hdr *tar.Header) (bool, error) { + ids, err := r.mappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return true, err +} + +// ApplyLayer is copied from github.com/docker/docker/pkg/archive +func ApplyLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + dest = filepath.Clean(dest) + var err error + layer, err = archive.DecompressStream(layer) + if err != nil { + return 0, err + } + return unpackLayer(dest, layer, options) +} + +// unpackLayer is copied from github.com/docker/docker/pkg/archive +// unpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func unpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{Chown: true} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + // idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + if options.AlterHeaders != nil { + ok, err := options.AlterHeaders.Alter(hdr) + if err != nil { + return 0, err + } + if !ok { + continue + } + } + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600, "") + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, archive.WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, archive.WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, options.Chown, options.ChownOpts, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != archive.WhiteoutOpaqueDir { + continue + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == archive.WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(archive.WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), archive.WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + // if err := remapIDs(idMappings, srcHdr); err != nil { + // return 0, err + // } + + if err := createTarFile(path, dest, srcHdr, srcData, options.Chown, options.ChownOpts, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} diff --git a/pkg/oc/cli/image/archive/archive_linux.go b/pkg/oc/cli/image/archive/archive_linux.go new file mode 100644 index 000000000000..52351954d645 --- /dev/null +++ b/pkg/oc/cli/image/archive/archive_linux.go @@ -0,0 +1,93 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format archive.WhiteoutFormat) tarWhiteoutConverter { + if format == archive.OverlayWhiteoutFormat { + return overlayWhiteoutConverter{} + } + return nil +} + +type overlayWhiteoutConverter struct{} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, archive.WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, archive.WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == archive.WhiteoutOpaqueDir { + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + originalBase := base[len(archive.WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + return false, err + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/pkg/oc/cli/image/archive/archive_other.go b/pkg/oc/cli/image/archive/archive_other.go new file mode 100644 index 000000000000..9069c03a398a --- /dev/null +++ b/pkg/oc/cli/image/archive/archive_other.go @@ -0,0 +1,9 @@ +// +build !linux + +package archive + +import "github.com/docker/docker/pkg/archive" + +func getWhiteoutConverter(format archive.WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/pkg/oc/cli/image/archive/archive_unix.go b/pkg/oc/cli/image/archive/archive_unix.go new file mode 100644 index 000000000000..9eb92b306515 --- /dev/null +++ b/pkg/oc/cli/image/archive/archive_unix.go @@ -0,0 +1,78 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "bufio" + "fmt" + "os" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +// runningInUserNS detects whether we are currently running in a user namespace. +// Copied from github.com/opencontainers/runc/libcontainer/system +func runningInUserNS() bool { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return false + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return false + } + + line := string(l) + var a, b, c int64 + fmt.Sscanf(line, "%d %d %d", &a, &b, &c) + /* + * We assume we are in the initial user namespace if we have a full + * range - 4294967295 uids starting at uid 0. + */ + if a == 0 && b == 0 && c == 4294967295 { + return false + } + return true +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if runningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/pkg/oc/cli/image/archive/archive_windows.go b/pkg/oc/cli/image/archive/archive_windows.go new file mode 100644 index 000000000000..e9d83376ea5e --- /dev/null +++ b/pkg/oc/cli/image/archive/archive_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "os" +) + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} diff --git a/pkg/oc/cli/image/archive/time_linux.go b/pkg/oc/cli/image/archive/time_linux.go new file mode 100644 index 000000000000..3448569b1ebb --- /dev/null +++ b/pkg/oc/cli/image/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/pkg/oc/cli/image/archive/time_unsupported.go b/pkg/oc/cli/image/archive/time_unsupported.go new file mode 100644 index 000000000000..e85aac054080 --- /dev/null +++ b/pkg/oc/cli/image/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/pkg/oc/cli/image/extract/extract.go b/pkg/oc/cli/image/extract/extract.go new file mode 100644 index 000000000000..e825a6a7c772 --- /dev/null +++ b/pkg/oc/cli/image/extract/extract.go @@ -0,0 +1,570 @@ +package extract + +import ( + "archive/tar" + "context" + "fmt" + "io" + "math" + "os" + "os/user" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/golang/glog" + "github.com/spf13/cobra" + + "github.com/docker/distribution" + dockerarchive "github.com/docker/docker/pkg/archive" + digest "github.com/opencontainers/go-digest" + + "k8s.io/client-go/rest" + "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + + "github.com/openshift/origin/pkg/image/apis/image/docker10" + imagereference "github.com/openshift/origin/pkg/image/apis/image/reference" + "github.com/openshift/origin/pkg/image/registryclient" + "github.com/openshift/origin/pkg/image/registryclient/dockercredentials" + "github.com/openshift/origin/pkg/oc/cli/image/archive" + imagemanifest "github.com/openshift/origin/pkg/oc/cli/image/manifest" +) + +var ( + desc = templates.LongDesc(` + Extract the contents of an image to disk + + Download an image or parts of an image to the filesystem. Allows users to access the + contents of images without requiring a container runtime engine running. + + Pass images to extract as arguments. The --paths flag allows you to define multiple + source to destination directory mappings. The source section may be either a file, a + directory (ends with a '/'), or a file pattern within a directory. The destination + section is a directory to extract to. Both source and destination must be specified. + + If the specified image supports multiple operating systems, the image that matches the + current operating system will be chosen. Otherwise you must pass --filter-by-os to + select the desired image. + + You may further qualify the image by adding a layer selector to the end of the image + string to only extract specific layers within an image. The supported selectors are: + + [] - select the layer at the provided index (zero-indexed) + [,] - select layers by index, exclusive + [~] - select the layer with the matching prefix or return an error + + Negative indices are counted from the end of the list, e.g. [-1] selects the last + layer. + + Experimental: This command is under active development and may change without notice.`) + + example = templates.Examples(` +# Extract the busybox image into the current directory +%[1]s docker.io/library/busybox:latest + +# Extract the busybox image to a temp directory (must exist) +%[1]s docker.io/library/busybox:latest --path /:/tmp/busybox + +# Extract a single file from the image into the current directory +%[1]s docker.io/library/centos:7 --path /bin/bash:. + +# Extract all .repo files from the image's /etc/yum.repos.d/ folder. +%[1]s docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:. + +# Extract the last layer in the image +%[1]s docker.io/library/centos:7[-1] + +# Extract the first three layers of the image +%[1]s docker.io/library/centos:7[:3] + +# Extract the last three layers of the image +%[1]s docker.io/library/centos:7[-3:] +`) +) + +type Options struct { + Mappings []Mapping + + Paths []string + + OnlyFiles bool + + FilterOptions imagemanifest.FilterOptions + + MaxPerRegistry int + + DryRun bool + Insecure bool + + genericclioptions.IOStreams + + ImageMetadataCallback func(m *Mapping, dgst digest.Digest, imageConfig *docker10.DockerImageConfig) +} + +func NewOptions(streams genericclioptions.IOStreams) *Options { + return &Options{ + Paths: []string{"/:."}, + + IOStreams: streams, + MaxPerRegistry: 3, + } +} + +// New creates a new command +func New(name string, streams genericclioptions.IOStreams) *cobra.Command { + o := NewOptions(streams) + + cmd := &cobra.Command{ + Use: "extract", + Short: "Copy files from an image to the filesystem", + Long: desc, + Example: fmt.Sprintf(example, name), + Run: func(c *cobra.Command, args []string) { + kcmdutil.CheckErr(o.Complete(c, args)) + kcmdutil.CheckErr(o.Run()) + }, + } + + flag := cmd.Flags() + o.FilterOptions.Bind(flag) + + flag.BoolVar(&o.DryRun, "dry-run", o.DryRun, "Print the actions that would be taken and exit without writing any contents.") + flag.BoolVar(&o.Insecure, "insecure", o.Insecure, "Allow pull operations to registries to be made over HTTP") + + flag.StringSliceVar(&o.Paths, "path", o.Paths, "Extract only part of an image. Must be SRC:DST where SRC is the path within the image and DST a local directory. If not specified the default is to extract everything to the current directory.") + flag.BoolVar(&o.OnlyFiles, "only-files", o.OnlyFiles, "Only extract regular files and directories from the image.") + + flag.IntVar(&o.MaxPerRegistry, "max-per-registry", o.MaxPerRegistry, "Number of concurrent requests allowed per registry.") + + return cmd +} + +type LayerFilter interface { + Filter(layers []distribution.Descriptor) ([]distribution.Descriptor, error) +} + +type Mapping struct { + Image string + ImageRef imagereference.DockerImageReference + + LayerFilter LayerFilter + + From string + To string +} + +func parseMappings(images, paths []string) ([]Mapping, error) { + layerFilter := regexp.MustCompile(`^(.*)\[([^\]]*)\](.*)$`) + + var mappings []Mapping + for _, image := range images { + for _, arg := range paths { + parts := strings.SplitN(arg, ":", 2) + var mapping Mapping + switch len(parts) { + case 2: + mapping = Mapping{Image: image, From: parts[0], To: parts[1]} + default: + return nil, fmt.Errorf("--paths must be of the form SRC:DST") + } + if matches := layerFilter.FindStringSubmatch(mapping.Image); len(matches) > 0 { + if len(matches[1]) == 0 || len(matches[2]) == 0 || len(matches[3]) != 0 { + return nil, fmt.Errorf("layer selectors must be of the form IMAGE[\\d:\\d]") + } + mapping.Image = matches[1] + var err error + mapping.LayerFilter, err = parseLayerFilter(matches[2]) + if err != nil { + return nil, err + } + } + if len(mapping.From) > 1 { + mapping.From = strings.TrimPrefix(mapping.From, "/") + } + if len(mapping.To) > 0 { + fi, err := os.Stat(mapping.To) + if err != nil { + return nil, fmt.Errorf("invalid argument: %s", err) + } + if !fi.IsDir() { + return nil, fmt.Errorf("invalid argument: %s is not a directory", arg) + } + } + src, err := imagereference.Parse(mapping.Image) + if err != nil { + return nil, err + } + if len(src.Tag) == 0 && len(src.ID) == 0 { + return nil, fmt.Errorf("source image must point to an image ID or image tag") + } + mapping.ImageRef = src + mappings = append(mappings, mapping) + } + } + return mappings, nil +} + +func (o *Options) Complete(cmd *cobra.Command, args []string) error { + if err := o.FilterOptions.Complete(cmd.Flags()); err != nil { + return err + } + + if len(args) == 0 { + return fmt.Errorf("you must specify at least one image to extract as an argument") + } + + var err error + o.Mappings, err = parseMappings(args, o.Paths) + if err != nil { + return err + } + return nil +} + +func (o *Options) Run() error { + preserveOwnership := false + u, err := user.Current() + if err != nil { + fmt.Fprintf(os.Stderr, "warning: Could not load current user information: %v\n", err) + } + if u != nil { + if uid, err := strconv.Atoi(u.Uid); err == nil && uid == 0 { + preserveOwnership = true + } + } + + rt, err := rest.TransportFor(&rest.Config{}) + if err != nil { + return err + } + insecureRT, err := rest.TransportFor(&rest.Config{TLSClientConfig: rest.TLSClientConfig{Insecure: true}}) + if err != nil { + return err + } + creds := dockercredentials.NewLocal() + ctx := context.Background() + fromContext := registryclient.NewContext(rt, insecureRT).WithCredentials(creds) + + for _, mapping := range o.Mappings { + from := mapping.ImageRef + + repo, err := fromContext.Repository(ctx, from.DockerClientDefaults().RegistryURL(), from.RepositoryName(), o.Insecure) + if err != nil { + return err + } + + srcManifest, srcDigest, location, err := imagemanifest.FirstManifest(ctx, from, repo, o.FilterOptions.Include) + if err != nil { + return fmt.Errorf("unable to read image %s: %v", from, err) + } + + imageConfig, layers, err := imagemanifest.ManifestToImageConfig(ctx, srcManifest, repo.Blobs(ctx), location) + if err != nil { + return fmt.Errorf("unable to parse image %s: %v", from, err) + } + + var alter alterations + if o.OnlyFiles { + alter = append(alter, filesOnly{}) + } + if len(mapping.From) > 0 { + switch { + case strings.HasSuffix(mapping.From, "/"): + alter = append(alter, newCopyFromDirectory(mapping.From)) + default: + name, parent := path.Base(mapping.From), path.Dir(mapping.From) + if name == "." || parent == "." { + return fmt.Errorf("unexpected directory from mapping %s", mapping.From) + } + alter = append(alter, newCopyFromPattern(parent, name)) + } + } + + filteredLayers := layers + if mapping.LayerFilter != nil { + filteredLayers, err = mapping.LayerFilter.Filter(filteredLayers) + if err != nil { + return fmt.Errorf("unable to filter layers for %s: %v", from, err) + } + } + + glog.V(5).Infof("Extracting from layers\n:%#v", filteredLayers) + + for i := range filteredLayers { + layer := &layers[i] + + err := func() error { + fromBlobs := repo.Blobs(ctx) + + // source + r, err := fromBlobs.Open(ctx, layer.Digest) + if err != nil { + return fmt.Errorf("unable to access the source layer %s: %v", layer.Digest, err) + } + defer r.Close() + + options := &archive.TarOptions{ + AlterHeaders: alter, + Chown: preserveOwnership, + } + + if o.DryRun { + return printLayer(os.Stdout, r, mapping.To, options) + } + + glog.V(4).Infof("Extracting layer %s with options %#v", layer.Digest, options) + if _, err := archive.ApplyLayer(mapping.To, r, options); err != nil { + return err + } + return nil + }() + if err != nil { + return err + } + } + + if o.ImageMetadataCallback != nil { + o.ImageMetadataCallback(&mapping, srcDigest, imageConfig) + } + } + return nil +} + +func printLayer(w io.Writer, r io.Reader, path string, options *archive.TarOptions) error { + rc, err := dockerarchive.DecompressStream(r) + if err != nil { + return err + } + defer rc.Close() + tr := tar.NewReader(rc) + for { + hdr, err := tr.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + if options.AlterHeaders != nil { + ok, err := options.AlterHeaders.Alter(hdr) + if err != nil { + return err + } + if !ok { + continue + } + } + if len(hdr.Name) == 0 { + continue + } + switch hdr.Typeflag { + case tar.TypeDir: + fmt.Fprintf(w, "Creating directory %s\n", filepath.Join(path, hdr.Name)) + case tar.TypeReg, tar.TypeRegA: + fmt.Fprintf(w, "Creating file %s\n", filepath.Join(path, hdr.Name)) + case tar.TypeLink: + fmt.Fprintf(w, "Link %s to %s\n", hdr.Name, filepath.Join(path, hdr.Linkname)) + case tar.TypeSymlink: + fmt.Fprintf(w, "Symlink %s to %s\n", hdr.Name, filepath.Join(path, hdr.Linkname)) + default: + fmt.Fprintf(w, "Extracting %s with type %0x\n", filepath.Join(path, hdr.Name), hdr.Typeflag) + } + } +} + +type alterations []archive.AlterHeader + +func (a alterations) Alter(hdr *tar.Header) (bool, error) { + for _, item := range a { + ok, err := item.Alter(hdr) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + } + return true, nil +} + +type copyFromDirectory struct { + From string +} + +func newCopyFromDirectory(from string) archive.AlterHeader { + if !strings.HasSuffix(from, "/") { + from = from + "/" + } + return ©FromDirectory{From: from} +} + +func (n *copyFromDirectory) Alter(hdr *tar.Header) (bool, error) { + return changeTarEntryParent(hdr, n.From), nil +} + +type copyFromPattern struct { + Base string + Name string +} + +func newCopyFromPattern(dir, name string) archive.AlterHeader { + if !strings.HasSuffix(dir, "/") { + dir = dir + "/" + } + return ©FromPattern{Base: dir, Name: name} +} + +func (n *copyFromPattern) Alter(hdr *tar.Header) (bool, error) { + if !changeTarEntryParent(hdr, n.Base) { + return false, nil + } + matchName := hdr.Name + if i := strings.Index(matchName, "/"); i != -1 { + matchName = matchName[:i] + } + if ok, err := path.Match(n.Name, matchName); !ok || err != nil { + glog.V(5).Infof("Excluded %s due to filter %s", hdr.Name, n.Name) + return false, err + } + return true, nil +} + +func changeTarEntryParent(hdr *tar.Header, from string) bool { + if !strings.HasPrefix(hdr.Name, from) { + return false + } + if len(hdr.Linkname) > 0 { + if strings.HasPrefix(hdr.Linkname, from) { + hdr.Linkname = strings.TrimPrefix(hdr.Linkname, from) + glog.V(5).Infof("Updated link to %s", hdr.Linkname) + } else { + glog.V(4).Infof("Name %s won't correctly point to %s outside of %s", hdr.Name, hdr.Linkname, from) + } + } + hdr.Name = strings.TrimPrefix(hdr.Name, from) + glog.V(5).Infof("Updated name %s", hdr.Name) + return true +} + +type filesOnly struct { +} + +func (_ filesOnly) Alter(hdr *tar.Header) (bool, error) { + switch hdr.Typeflag { + case tar.TypeReg, tar.TypeRegA, tar.TypeDir: + return true, nil + default: + return false, nil + } +} + +func parseLayerFilter(s string) (LayerFilter, error) { + if strings.HasPrefix(s, "~") { + s = s[1:] + return &prefixLayerFilter{Prefix: s}, nil + } + + if strings.Contains(s, ":") { + l := &indexLayerFilter{From: 0, To: math.MaxInt32} + parts := strings.SplitN(s, ":", 2) + if len(parts[0]) > 0 { + i, err := strconv.Atoi(parts[0]) + if err != nil { + return nil, fmt.Errorf("[from:to] must have valid numbers: %v", err) + } + l.From = int32(i) + } + if len(parts[1]) > 0 { + i, err := strconv.Atoi(parts[1]) + if err != nil { + return nil, fmt.Errorf("[from:to] must have valid numbers: %v", err) + } + l.To = int32(i) + } + if l.To > 0 && l.To < l.From { + return nil, fmt.Errorf("[from:to] to must be larger than from") + } + return l, nil + } + + if i, err := strconv.Atoi(s); err == nil { + l := NewPositionLayerFilter(int32(i)) + return l, nil + } + + return nil, fmt.Errorf("the layer selector [%s] is not valid, must be [from:to], [index], or [~digest]", s) +} + +type prefixLayerFilter struct { + Prefix string +} + +func (s *prefixLayerFilter) Filter(layers []distribution.Descriptor) ([]distribution.Descriptor, error) { + var filtered []distribution.Descriptor + for _, d := range layers { + if strings.HasPrefix(d.Digest.String(), s.Prefix) { + filtered = append(filtered, d) + } + } + if len(filtered) == 0 { + return nil, fmt.Errorf("no layers start with '%s'", s.Prefix) + } + if len(filtered) > 1 { + return nil, fmt.Errorf("multiple layers start with '%s', you must be more specific", s.Prefix) + } + return filtered, nil +} + +type indexLayerFilter struct { + From int32 + To int32 +} + +func (s *indexLayerFilter) Filter(layers []distribution.Descriptor) ([]distribution.Descriptor, error) { + l := int32(len(layers)) + from := s.From + to := s.To + if from < 0 { + from = l + from + } + if to < 0 { + to = l + to + } + if to > l { + to = l + } + if from < 0 || to < 0 || from >= l { + if s.To == math.MaxInt32 { + return nil, fmt.Errorf("tried to select [%d:], but image only has %d layers", s.From, l) + } + return nil, fmt.Errorf("tried to select [%d:%d], but image only has %d layers", s.From, s.To, l) + } + if to < from { + to, from = from, to + } + return layers[from:to], nil +} + +type positionLayerFilter struct { + At int32 +} + +func NewPositionLayerFilter(at int32) LayerFilter { + return &positionLayerFilter{at} +} + +func (s *positionLayerFilter) Filter(layers []distribution.Descriptor) ([]distribution.Descriptor, error) { + l := int32(len(layers)) + at := s.At + if at < 0 { + at = l + s.At + } + if at < 0 || at >= l { + return nil, fmt.Errorf("tried to select layer %d, but image only has %d layers", s.At, l) + } + return []distribution.Descriptor{layers[at]}, nil +} diff --git a/pkg/oc/cli/image/image.go b/pkg/oc/cli/image/image.go index 36deb78ba53c..33088b367dc1 100644 --- a/pkg/oc/cli/image/image.go +++ b/pkg/oc/cli/image/image.go @@ -11,6 +11,7 @@ import ( "github.com/openshift/origin/pkg/cmd/templates" "github.com/openshift/origin/pkg/oc/cli/image/append" + "github.com/openshift/origin/pkg/oc/cli/image/extract" "github.com/openshift/origin/pkg/oc/cli/image/mirror" ) @@ -37,6 +38,7 @@ func NewCmdImage(fullName string, f kcmdutil.Factory, streams genericclioptions. Message: "Advanced commands:", Commands: []*cobra.Command{ append.NewCmdAppendImage(name, streams), + extract.New(name, streams), mirror.NewCmdMirrorImage(name, streams), }, }, diff --git a/pkg/oc/cli/image/manifest/manifest.go b/pkg/oc/cli/image/manifest/manifest.go new file mode 100644 index 000000000000..06ade54f2eff --- /dev/null +++ b/pkg/oc/cli/image/manifest/manifest.go @@ -0,0 +1,351 @@ +package manifest + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "runtime" + "sync" + + "github.com/spf13/pflag" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + + "github.com/docker/libtrust" + "github.com/golang/glog" + digest "github.com/opencontainers/go-digest" + + "github.com/openshift/origin/pkg/image/apis/image/docker10" + imagereference "github.com/openshift/origin/pkg/image/apis/image/reference" + "github.com/openshift/origin/pkg/image/dockerlayer/add" +) + +// FilterOptions assist in filtering out unneeded manifests from ManifestList objects. +type FilterOptions struct { + FilterByOS string + DefaultOSFilter bool + OSFilter *regexp.Regexp +} + +// Bind adds the options to the flag set. +func (o *FilterOptions) Bind(flags *pflag.FlagSet) { + flags.StringVar(&o.FilterByOS, "filter-by-os", o.FilterByOS, "A regular expression to control which images are mirrored. Images will be passed as '/[/]'.") +} + +// Complete checks whether the flags are ready for use. +func (o *FilterOptions) Complete(flags *pflag.FlagSet) error { + pattern := o.FilterByOS + if len(pattern) == 0 && !flags.Changed("filter-by-os") { + o.DefaultOSFilter = true + pattern = regexp.QuoteMeta(fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)) + } + if len(pattern) > 0 { + re, err := regexp.Compile(pattern) + if err != nil { + return fmt.Errorf("--filter-by-os was not a valid regular expression: %v", err) + } + o.OSFilter = re + } + return nil +} + +// Include returns true if the provided manifest should be included, or the first image if the user didn't alter the +// default selection and there is only one image. +func (o *FilterOptions) Include(d *manifestlist.ManifestDescriptor, hasMultiple bool) bool { + if o.OSFilter == nil { + return true + } + if o.DefaultOSFilter && !hasMultiple { + return true + } + if len(d.Platform.Variant) > 0 { + return o.OSFilter.MatchString(fmt.Sprintf("%s/%s/%s", d.Platform.OS, d.Platform.Architecture, d.Platform.Variant)) + } + return o.OSFilter.MatchString(fmt.Sprintf("%s/%s", d.Platform.OS, d.Platform.Architecture)) +} + +// IncludeAll returns true if the provided manifest matches the filter, or all if there was no filter. +func (o *FilterOptions) IncludeAll(d *manifestlist.ManifestDescriptor, hasMultiple bool) bool { + if o.OSFilter == nil { + return true + } + if len(d.Platform.Variant) > 0 { + return o.OSFilter.MatchString(fmt.Sprintf("%s/%s/%s", d.Platform.OS, d.Platform.Architecture, d.Platform.Variant)) + } + return o.OSFilter.MatchString(fmt.Sprintf("%s/%s", d.Platform.OS, d.Platform.Architecture)) +} + +type FilterFunc func(*manifestlist.ManifestDescriptor, bool) bool + +// PreferManifestList specifically requests a manifest list first +var PreferManifestList = distribution.WithManifestMediaTypes([]string{ + manifestlist.MediaTypeManifestList, + schema2.MediaTypeManifest, +}) + +// FirstManifest returns the first manifest at the request location that matches the filter function. +func FirstManifest(ctx context.Context, from imagereference.DockerImageReference, repo distribution.Repository, filterFn FilterFunc) (distribution.Manifest, digest.Digest, string, error) { + var srcDigest digest.Digest + if len(from.Tag) > 0 { + desc, err := repo.Tags(ctx).Get(ctx, from.Tag) + if err != nil { + return nil, "", "", err + } + srcDigest = desc.Digest + } else { + srcDigest = digest.Digest(from.ID) + } + manifests, err := repo.Manifests(ctx) + if err != nil { + return nil, "", "", err + } + srcManifest, err := manifests.Get(ctx, srcDigest, PreferManifestList) + if err != nil { + return nil, "", "", err + } + + originalSrcDigest := srcDigest + srcManifests, srcManifest, srcDigest, err := ProcessManifestList(ctx, srcDigest, srcManifest, manifests, from, filterFn) + if err != nil { + return nil, "", "", err + } + if len(srcManifests) == 0 { + return nil, "", "", fmt.Errorf("filtered all images from %s", from) + } + + var location string + if srcDigest == originalSrcDigest { + location = fmt.Sprintf("manifest %s", srcDigest) + } else { + location = fmt.Sprintf("manifest %s in manifest list %s", srcDigest, originalSrcDigest) + } + return srcManifest, srcDigest, location, nil +} + +// ManifestToImageConfig takes an image manifest and converts it into a structured object. +func ManifestToImageConfig(ctx context.Context, srcManifest distribution.Manifest, blobs distribution.BlobService, location string) (*docker10.DockerImageConfig, []distribution.Descriptor, error) { + switch t := srcManifest.(type) { + case *schema2.DeserializedManifest: + if t.Config.MediaType != schema2.MediaTypeImageConfig { + return nil, nil, fmt.Errorf("%s does not have the expected image configuration media type: %s", location, t.Config.MediaType) + } + configJSON, err := blobs.Get(ctx, t.Config.Digest) + if err != nil { + return nil, nil, fmt.Errorf("cannot retrieve image configuration for %s: %v", location, err) + } + glog.V(4).Infof("Raw image config json:\n%s", string(configJSON)) + config := &docker10.DockerImageConfig{} + if err := json.Unmarshal(configJSON, &config); err != nil { + return nil, nil, fmt.Errorf("unable to parse image configuration: %v", err) + } + + base := config + layers := t.Layers + base.Size = 0 + for _, layer := range t.Layers { + base.Size += layer.Size + } + + return base, layers, nil + + case *schema1.SignedManifest: + if glog.V(4) { + _, configJSON, _ := srcManifest.Payload() + glog.Infof("Raw image config json:\n%s", string(configJSON)) + } + if len(t.History) == 0 { + return nil, nil, fmt.Errorf("input image is in an unknown format: no v1Compatibility history") + } + config := &docker10.DockerV1CompatibilityImage{} + if err := json.Unmarshal([]byte(t.History[0].V1Compatibility), &config); err != nil { + return nil, nil, err + } + + base := &docker10.DockerImageConfig{} + if err := docker10.Convert_DockerV1CompatibilityImage_to_DockerImageConfig(config, base); err != nil { + return nil, nil, err + } + + // schema1 layers are in reverse order + layers := make([]distribution.Descriptor, 0, len(t.FSLayers)) + for i := len(t.FSLayers) - 1; i >= 0; i-- { + layer := distribution.Descriptor{ + MediaType: schema2.MediaTypeLayer, + Digest: t.FSLayers[i].BlobSum, + // size must be reconstructed from the blobs + } + // we must reconstruct the tar sum from the blobs + add.AddLayerToConfig(base, layer, "") + layers = append(layers, layer) + } + + return base, layers, nil + + default: + return nil, nil, fmt.Errorf("unknown image manifest of type %T from %s", srcManifest, location) + } +} + +func ProcessManifestList(ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imagereference.DockerImageReference, filterFn FilterFunc) ([]distribution.Manifest, distribution.Manifest, digest.Digest, error) { + var srcManifests []distribution.Manifest + switch t := srcManifest.(type) { + case *manifestlist.DeserializedManifestList: + manifestDigest := srcDigest + manifestList := t + + filtered := make([]manifestlist.ManifestDescriptor, 0, len(t.Manifests)) + for _, manifest := range t.Manifests { + if !filterFn(&manifest, len(t.Manifests) > 1) { + glog.V(5).Infof("Skipping image for %#v from %s", manifest.Platform, ref) + continue + } + glog.V(5).Infof("Including image for %#v from %s", manifest.Platform, ref) + filtered = append(filtered, manifest) + } + + if len(filtered) == 0 { + return nil, nil, "", nil + } + + // if we're filtering the manifest list, update the source manifest and digest + if len(filtered) != len(t.Manifests) { + var err error + t, err = manifestlist.FromDescriptors(filtered) + if err != nil { + return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list: %v", ref, err) + } + _, body, err := t.Payload() + if err != nil { + return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list (bad payload): %v", ref, err) + } + manifestList = t + manifestDigest = srcDigest.Algorithm().FromBytes(body) + glog.V(5).Infof("Filtered manifest list to new digest %s:\n%s", manifestDigest, body) + } + + for i, manifest := range t.Manifests { + childManifest, err := manifests.Get(ctx, manifest.Digest, distribution.WithManifestMediaTypes([]string{manifestlist.MediaTypeManifestList, schema2.MediaTypeManifest})) + if err != nil { + return nil, nil, "", fmt.Errorf("unable to retrieve source image %s manifest #%d from manifest list: %v", ref, i+1, err) + } + srcManifests = append(srcManifests, childManifest) + } + + switch { + case len(srcManifests) == 1: + _, body, err := srcManifests[0].Payload() + if err != nil { + return nil, nil, "", fmt.Errorf("unable to convert source image %s manifest list to single manifest: %v", ref, err) + } + manifestDigest := srcDigest.Algorithm().FromBytes(body) + glog.V(5).Infof("Used only one manifest from the list %s", manifestDigest) + return srcManifests, srcManifests[0], manifestDigest, nil + default: + return append(srcManifests, manifestList), manifestList, manifestDigest, nil + } + + default: + return []distribution.Manifest{srcManifest}, srcManifest, srcDigest, nil + } +} + +// TDOO: remove when quay.io switches to v2 schema +func PutManifestInCompatibleSchema( + ctx context.Context, + srcManifest distribution.Manifest, + tag string, + toManifests distribution.ManifestService, + // supports schema2 -> schema1 downconversion + blobs distribution.BlobService, + ref reference.Named, +) (digest.Digest, error) { + var options []distribution.ManifestServiceOption + if len(tag) > 0 { + glog.V(5).Infof("Put manifest %s:%s", ref, tag) + options = []distribution.ManifestServiceOption{distribution.WithTag(tag)} + } else { + glog.V(5).Infof("Put manifest %s", ref) + } + toDigest, err := toManifests.Put(ctx, srcManifest, options...) + if err == nil { + return toDigest, nil + } + errs, ok := err.(errcode.Errors) + if !ok || len(errs) == 0 { + return toDigest, err + } + errcode, ok := errs[0].(errcode.Error) + if !ok || errcode.ErrorCode() != v2.ErrorCodeManifestInvalid { + return toDigest, err + } + // try downconverting to v2-schema1 + schema2Manifest, ok := srcManifest.(*schema2.DeserializedManifest) + if !ok { + return toDigest, err + } + tagRef, tagErr := reference.WithTag(ref, tag) + if tagErr != nil { + return toDigest, err + } + glog.V(5).Infof("Registry reported invalid manifest error, attempting to convert to v2schema1 as ref %s", tagRef) + schema1Manifest, convertErr := convertToSchema1(ctx, blobs, schema2Manifest, tagRef) + if convertErr != nil { + return toDigest, err + } + if glog.V(6) { + _, data, _ := schema1Manifest.Payload() + glog.Infof("Converted to v2schema1\n%s", string(data)) + } + return toManifests.Put(ctx, schema1Manifest, distribution.WithTag(tag)) +} + +// TDOO: remove when quay.io switches to v2 schema +func convertToSchema1(ctx context.Context, blobs distribution.BlobService, schema2Manifest *schema2.DeserializedManifest, ref reference.Named) (distribution.Manifest, error) { + targetDescriptor := schema2Manifest.Target() + configJSON, err := blobs.Get(ctx, targetDescriptor.Digest) + if err != nil { + return nil, err + } + trustKey, err := loadPrivateKey() + if err != nil { + return nil, err + } + builder := schema1.NewConfigManifestBuilder(blobs, trustKey, ref, configJSON) + for _, d := range schema2Manifest.Layers { + if err := builder.AppendReference(d); err != nil { + return nil, err + } + } + manifest, err := builder.Build(ctx) + if err != nil { + return nil, err + } + return manifest, nil +} + +var ( + privateKeyLock sync.Mutex + privateKey libtrust.PrivateKey +) + +// TDOO: remove when quay.io switches to v2 schema +func loadPrivateKey() (libtrust.PrivateKey, error) { + privateKeyLock.Lock() + defer privateKeyLock.Unlock() + if privateKey != nil { + return privateKey, nil + } + trustKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err + } + privateKey = trustKey + return privateKey, nil +} diff --git a/pkg/oc/cli/image/mirror/manifest.go b/pkg/oc/cli/image/mirror/manifest.go deleted file mode 100644 index 081024d0005c..000000000000 --- a/pkg/oc/cli/image/mirror/manifest.go +++ /dev/null @@ -1,178 +0,0 @@ -package mirror - -import ( - "context" - "fmt" - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - - "github.com/docker/libtrust" - "github.com/golang/glog" - digest "github.com/opencontainers/go-digest" - - imagereference "github.com/openshift/origin/pkg/image/apis/image/reference" -) - -func processManifestList(ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imagereference.DockerImageReference, filterFn func(*manifestlist.ManifestDescriptor, bool) bool) ([]distribution.Manifest, distribution.Manifest, digest.Digest, error) { - var srcManifests []distribution.Manifest - switch t := srcManifest.(type) { - case *manifestlist.DeserializedManifestList: - manifestDigest := srcDigest - manifestList := t - - filtered := make([]manifestlist.ManifestDescriptor, 0, len(t.Manifests)) - for _, manifest := range t.Manifests { - if !filterFn(&manifest, len(t.Manifests) > 1) { - glog.V(5).Infof("Skipping image for %#v from %s", manifest.Platform, ref) - continue - } - glog.V(5).Infof("Including image for %#v from %s", manifest.Platform, ref) - filtered = append(filtered, manifest) - } - - if len(filtered) == 0 { - return nil, nil, "", nil - } - - // if we're filtering the manifest list, update the source manifest and digest - if len(filtered) != len(t.Manifests) { - var err error - t, err = manifestlist.FromDescriptors(filtered) - if err != nil { - return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list: %v", ref, err) - } - _, body, err := t.Payload() - if err != nil { - return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list (bad payload): %v", ref, err) - } - manifestList = t - manifestDigest = srcDigest.Algorithm().FromBytes(body) - glog.V(5).Infof("Filtered manifest list to new digest %s:\n%s", manifestDigest, body) - } - - for i, manifest := range t.Manifests { - childManifest, err := manifests.Get(ctx, manifest.Digest, distribution.WithManifestMediaTypes([]string{manifestlist.MediaTypeManifestList, schema2.MediaTypeManifest})) - if err != nil { - return nil, nil, "", fmt.Errorf("unable to retrieve source image %s manifest #%d from manifest list: %v", ref, i+1, err) - } - srcManifests = append(srcManifests, childManifest) - } - - switch { - case len(srcManifests) == 1: - _, body, err := srcManifests[0].Payload() - if err != nil { - return nil, nil, "", fmt.Errorf("unable to convert source image %s manifest list to single manifest: %v", ref, err) - } - manifestDigest := srcDigest.Algorithm().FromBytes(body) - glog.V(5).Infof("Used only one manifest from the list %s", manifestDigest) - return srcManifests, srcManifests[0], manifestDigest, nil - default: - return append(srcManifests, manifestList), manifestList, manifestDigest, nil - } - - default: - return []distribution.Manifest{srcManifest}, srcManifest, srcDigest, nil - } -} - -// TDOO: remove when quay.io switches to v2 schema -func putManifestInCompatibleSchema( - ctx context.Context, - srcManifest distribution.Manifest, - tag string, - toManifests distribution.ManifestService, - // supports schema2 -> schema1 downconversion - blobs distribution.BlobService, - ref reference.Named, -) (digest.Digest, error) { - var options []distribution.ManifestServiceOption - if len(tag) > 0 { - glog.V(5).Infof("Put manifest %s:%s", ref, tag) - options = []distribution.ManifestServiceOption{distribution.WithTag(tag)} - } else { - glog.V(5).Infof("Put manifest %s", ref) - } - toDigest, err := toManifests.Put(ctx, srcManifest, options...) - if err == nil { - return toDigest, nil - } - errs, ok := err.(errcode.Errors) - if !ok || len(errs) == 0 { - return toDigest, err - } - errcode, ok := errs[0].(errcode.Error) - if !ok || errcode.ErrorCode() != v2.ErrorCodeManifestInvalid { - return toDigest, err - } - // try downconverting to v2-schema1 - schema2Manifest, ok := srcManifest.(*schema2.DeserializedManifest) - if !ok { - return toDigest, err - } - tagRef, tagErr := reference.WithTag(ref, tag) - if tagErr != nil { - return toDigest, err - } - glog.V(5).Infof("Registry reported invalid manifest error, attempting to convert to v2schema1 as ref %s", tagRef) - schema1Manifest, convertErr := convertToSchema1(ctx, blobs, schema2Manifest, tagRef) - if convertErr != nil { - return toDigest, err - } - if glog.V(6) { - _, data, _ := schema1Manifest.Payload() - glog.Infof("Converted to v2schema1\n%s", string(data)) - } - return toManifests.Put(ctx, schema1Manifest, distribution.WithTag(tag)) -} - -// TDOO: remove when quay.io switches to v2 schema -func convertToSchema1(ctx context.Context, blobs distribution.BlobService, schema2Manifest *schema2.DeserializedManifest, ref reference.Named) (distribution.Manifest, error) { - targetDescriptor := schema2Manifest.Target() - configJSON, err := blobs.Get(ctx, targetDescriptor.Digest) - if err != nil { - return nil, err - } - trustKey, err := loadPrivateKey() - if err != nil { - return nil, err - } - builder := schema1.NewConfigManifestBuilder(blobs, trustKey, ref, configJSON) - for _, d := range schema2Manifest.Layers { - if err := builder.AppendReference(d); err != nil { - return nil, err - } - } - manifest, err := builder.Build(ctx) - if err != nil { - return nil, err - } - return manifest, nil -} - -var ( - privateKeyLock sync.Mutex - privateKey libtrust.PrivateKey -) - -// TDOO: remove when quay.io switches to v2 schema -func loadPrivateKey() (libtrust.PrivateKey, error) { - privateKeyLock.Lock() - defer privateKeyLock.Unlock() - if privateKey != nil { - return privateKey, nil - } - trustKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, err - } - privateKey = trustKey - return privateKey, nil -} diff --git a/pkg/oc/cli/image/mirror/mirror.go b/pkg/oc/cli/image/mirror/mirror.go index fb009b21cc51..ecc61093e661 100644 --- a/pkg/oc/cli/image/mirror/mirror.go +++ b/pkg/oc/cli/image/mirror/mirror.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "regexp" "time" "github.com/docker/distribution" @@ -27,6 +26,8 @@ import ( imagereference "github.com/openshift/origin/pkg/image/apis/image/reference" "github.com/openshift/origin/pkg/image/registryclient" "github.com/openshift/origin/pkg/image/registryclient/dockercredentials" + imagemanifest "github.com/openshift/origin/pkg/oc/cli/image/manifest" + "github.com/openshift/origin/pkg/oc/cli/image/workqueue" ) var ( @@ -76,9 +77,8 @@ var ( type MirrorImageOptions struct { Mappings []Mapping - OSFilter *regexp.Regexp - FilterByOS string + FilterOptions imagemanifest.FilterOptions DryRun bool Insecure bool @@ -102,12 +102,6 @@ func NewMirrorImageOptions(streams genericclioptions.IOStreams) *MirrorImageOpti } } -// schema2ManifestOnly specifically requests a manifest list first -var schema2ManifestOnly = distribution.WithManifestMediaTypes([]string{ - manifestlist.MediaTypeManifestList, - schema2.MediaTypeManifest, -}) - // NewCommandMirrorImage copies images from one location to another. func NewCmdMirrorImage(name string, streams genericclioptions.IOStreams) *cobra.Command { o := NewMirrorImageOptions(streams) @@ -118,17 +112,18 @@ func NewCmdMirrorImage(name string, streams genericclioptions.IOStreams) *cobra. Long: mirrorDesc, Example: fmt.Sprintf(mirrorExample, name+" mirror"), Run: func(c *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(args)) + kcmdutil.CheckErr(o.Complete(c, args)) kcmdutil.CheckErr(o.Run()) }, } flag := cmd.Flags() + o.FilterOptions.Bind(flag) + flag.BoolVar(&o.DryRun, "dry-run", o.DryRun, "Print the actions that would be taken and exit without writing to the destinations.") flag.BoolVar(&o.Insecure, "insecure", o.Insecure, "Allow push and pull operations to registries to be made over HTTP") flag.BoolVar(&o.SkipMount, "skip-mount", o.SkipMount, "Always push layers instead of cross-mounting them") flag.BoolVar(&o.SkipMultipleScopes, "skip-multiple-scopes", o.SkipMultipleScopes, "Some registries do not support multiple scopes passed to the registry login.") - flag.StringVar(&o.FilterByOS, "filter-by-os", o.FilterByOS, "A regular expression to control which images are mirrored. Images will be passed as '/[/]'.") flag.BoolVar(&o.Force, "force", o.Force, "Attempt to write all layers and manifests even if they exist in the remote repository.") flag.IntVar(&o.MaxRegistry, "max-registry", 4, "Number of concurrent registries to connect to at any one time.") flag.IntVar(&o.MaxPerRegistry, "max-per-registry", 6, "Number of concurrent requests allowed per registry.") @@ -138,7 +133,11 @@ func NewCmdMirrorImage(name string, streams genericclioptions.IOStreams) *cobra. return cmd } -func (o *MirrorImageOptions) Complete(args []string) error { +func (o *MirrorImageOptions) Complete(cmd *cobra.Command, args []string) error { + if err := o.FilterOptions.Complete(cmd.Flags()); err != nil { + return err + } + overlap := make(map[string]string) var err error @@ -164,15 +163,6 @@ func (o *MirrorImageOptions) Complete(args []string) error { } } - pattern := o.FilterByOS - if len(pattern) > 0 { - re, err := regexp.Compile(pattern) - if err != nil { - return fmt.Errorf("--filter-by-os was not a valid regular expression: %v", err) - } - o.OSFilter = re - } - return nil } @@ -192,17 +182,6 @@ func (o *MirrorImageOptions) Repository(ctx context.Context, context *registrycl } } -// includeDescriptor returns true if the provided manifest should be included. -func (o *MirrorImageOptions) includeDescriptor(d *manifestlist.ManifestDescriptor, hasMultiple bool) bool { - if o.OSFilter == nil { - return true - } - if len(d.Platform.Variant) > 0 { - return o.OSFilter.MatchString(fmt.Sprintf("%s/%s/%s", d.Platform.OS, d.Platform.Architecture, d.Platform.Variant)) - } - return o.OSFilter.MatchString(fmt.Sprintf("%s/%s", d.Platform.OS, d.Platform.Architecture)) -} - func (o *MirrorImageOptions) Run() error { start := time.Now() p, err := o.plan() @@ -232,10 +211,10 @@ func (o *MirrorImageOptions) Run() error { stopCh := make(chan struct{}) defer close(stopCh) - q := newWorkQueue(o.MaxRegistry, stopCh) - registryWorkers := make(map[string]*workQueue) + q := workqueue.New(o.MaxRegistry, stopCh) + registryWorkers := make(map[string]workqueue.Interface) for name := range p.RegistryNames() { - registryWorkers[name] = newWorkQueue(o.MaxPerRegistry, stopCh) + registryWorkers[name] = workqueue.New(o.MaxPerRegistry, stopCh) } next := time.Now() @@ -247,12 +226,12 @@ func (o *MirrorImageOptions) Run() error { ctx := apirequest.NewContext() for j := range work.phases { phase := &work.phases[j] - q.Batch(func(w Work) { + q.Batch(func(w workqueue.Work) { for i := range phase.independent { unit := phase.independent[i] w.Parallel(func() { // upload blobs - registryWorkers[unit.registry.name].Batch(func(w Work) { + registryWorkers[unit.registry.name].Batch(func(w workqueue.Work) { for i := range unit.repository.blobs { op := unit.repository.blobs[i] for digestString := range op.blobs { @@ -318,11 +297,11 @@ func (o *MirrorImageOptions) plan() (*plan, error) { stopCh := make(chan struct{}) defer close(stopCh) - q := newWorkQueue(o.MaxRegistry, stopCh) - registryWorkers := make(map[string]*workQueue) + q := workqueue.New(o.MaxRegistry, stopCh) + registryWorkers := make(map[string]workqueue.Interface) for name := range tree { if _, ok := registryWorkers[name.registry]; !ok { - registryWorkers[name.registry] = newWorkQueue(o.MaxPerRegistry, stopCh) + registryWorkers[name.registry] = workqueue.New(o.MaxPerRegistry, stopCh) } } @@ -330,7 +309,7 @@ func (o *MirrorImageOptions) plan() (*plan, error) { for name := range tree { src := tree[name] - q.Queue(func(_ Work) { + q.Queue(func(_ workqueue.Work) { srcRepo, err := fromContext.Repository(ctx, src.ref.DockerClientDefaults().RegistryURL(), src.ref.RepositoryName(), o.Insecure) if err != nil { plan.AddError(retrieverError{err: fmt.Errorf("unable to connect to %s: %v", src.ref, err), src: src.ref}) @@ -342,7 +321,7 @@ func (o *MirrorImageOptions) plan() (*plan, error) { return } rq := registryWorkers[name.registry] - rq.Batch(func(w Work) { + rq.Batch(func(w workqueue.Work) { // convert source tags to digests for tag := range src.tags { srcTag, pushTargets := tag, src.tags[tag] @@ -361,13 +340,13 @@ func (o *MirrorImageOptions) plan() (*plan, error) { canonicalFrom := srcRepo.Named() - rq.Queue(func(w Work) { + rq.Queue(func(w workqueue.Work) { for key := range src.digests { srcDigestString, pushTargets := key, src.digests[key] w.Parallel(func() { // load the manifest srcDigest := godigest.Digest(srcDigestString) - srcManifest, err := manifests.Get(ctx, godigest.Digest(srcDigest), schema2ManifestOnly) + srcManifest, err := manifests.Get(ctx, godigest.Digest(srcDigest), imagemanifest.PreferManifestList) if err != nil { plan.AddError(retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s manifest %s: %v", src.ref, srcDigest, err)}) return @@ -375,7 +354,7 @@ func (o *MirrorImageOptions) plan() (*plan, error) { // filter or load manifest list as appropriate originalSrcDigest := srcDigest - srcManifests, srcManifest, srcDigest, err := processManifestList(ctx, srcDigest, srcManifest, manifests, src.ref, o.includeDescriptor) + srcManifests, srcManifest, srcDigest, err := imagemanifest.ProcessManifestList(ctx, srcDigest, srcManifest, manifests, src.ref, o.FilterOptions.IncludeAll) if err != nil { plan.AddError(retrieverError{src: src.ref, err: err}) return @@ -599,7 +578,7 @@ func copyManifests( panic(fmt.Sprintf("empty source manifest for %s", srcDigest)) } for _, tag := range tags.List() { - toDigest, err := putManifestInCompatibleSchema(ctx, srcManifest, tag, plan.to, plan.toBlobs, ref) + toDigest, err := imagemanifest.PutManifestInCompatibleSchema(ctx, srcManifest, tag, plan.to, plan.toBlobs, ref) if err != nil { errs = append(errs, fmt.Errorf("unable to push manifest to %s: %v", plan.toRef, err)) continue @@ -622,7 +601,7 @@ func copyManifests( if !ok { panic(fmt.Sprintf("empty source manifest for %s", srcDigest)) } - toDigest, err := putManifestInCompatibleSchema(ctx, srcManifest, "", plan.to, plan.toBlobs, ref) + toDigest, err := imagemanifest.PutManifestInCompatibleSchema(ctx, srcManifest, "", plan.to, plan.toBlobs, ref) if err != nil { errs = append(errs, fmt.Errorf("unable to push manifest to %s: %v", plan.toRef, err)) continue diff --git a/pkg/oc/cli/image/mirror/workqueue.go b/pkg/oc/cli/image/mirror/workqueue.go deleted file mode 100644 index 6587bff17816..000000000000 --- a/pkg/oc/cli/image/mirror/workqueue.go +++ /dev/null @@ -1,131 +0,0 @@ -package mirror - -import ( - "sync" - - "github.com/golang/glog" -) - -type workQueue struct { - ch chan workUnit - wg *sync.WaitGroup -} - -func newWorkQueue(workers int, stopCh <-chan struct{}) *workQueue { - q := &workQueue{ - ch: make(chan workUnit, 100), - wg: &sync.WaitGroup{}, - } - go q.run(workers, stopCh) - return q -} - -func (q *workQueue) run(workers int, stopCh <-chan struct{}) { - for i := 0; i < workers; i++ { - go func(i int) { - defer glog.V(4).Infof("worker %d stopping", i) - for { - select { - case work, ok := <-q.ch: - if !ok { - return - } - work.fn() - work.wg.Done() - case <-stopCh: - return - } - } - }(i) - } - <-stopCh -} - -func (q *workQueue) Batch(fn func(Work)) { - w := &worker{ - wg: &sync.WaitGroup{}, - ch: q.ch, - } - fn(w) - w.wg.Wait() -} - -func (q *workQueue) Try(fn func(Try)) error { - w := &worker{ - wg: &sync.WaitGroup{}, - ch: q.ch, - err: make(chan error), - } - fn(w) - return w.FirstError() -} - -func (q *workQueue) Queue(fn func(Work)) { - w := &worker{ - wg: q.wg, - ch: q.ch, - } - fn(w) -} - -func (q *workQueue) Done() { - q.wg.Wait() -} - -type workUnit struct { - fn func() - wg *sync.WaitGroup -} - -type Work interface { - Parallel(fn func()) -} - -type Try interface { - Try(fn func() error) -} - -type worker struct { - wg *sync.WaitGroup - ch chan workUnit - err chan error -} - -func (w *worker) FirstError() error { - done := make(chan struct{}) - go func() { - w.wg.Wait() - close(done) - }() - for { - select { - case err := <-w.err: - if err != nil { - return err - } - case <-done: - return nil - } - } -} - -func (w *worker) Parallel(fn func()) { - w.wg.Add(1) - w.ch <- workUnit{wg: w.wg, fn: fn} -} - -func (w *worker) Try(fn func() error) { - w.wg.Add(1) - w.ch <- workUnit{ - wg: w.wg, - fn: func() { - err := fn() - if w.err == nil { - // TODO: have the work queue accumulate errors and release them with Done() - glog.Errorf("Worker error: %v", err) - return - } - w.err <- err - }, - } -} diff --git a/pkg/oc/cli/image/append/workqueue.go b/pkg/oc/cli/image/workqueue/workqueue.go similarity index 91% rename from pkg/oc/cli/image/append/workqueue.go rename to pkg/oc/cli/image/workqueue/workqueue.go index fb57f1a746d4..7b84cef79d6f 100644 --- a/pkg/oc/cli/image/append/workqueue.go +++ b/pkg/oc/cli/image/workqueue/workqueue.go @@ -1,4 +1,4 @@ -package append +package workqueue import ( "sync" @@ -6,12 +6,27 @@ import ( "github.com/golang/glog" ) +type Work interface { + Parallel(fn func()) +} + +type Try interface { + Try(fn func() error) +} + +type Interface interface { + Batch(func(Work)) + Try(func(Try)) error + Queue(func(Work)) + Done() +} + type workQueue struct { ch chan workUnit wg *sync.WaitGroup } -func newWorkQueue(workers int, stopCh <-chan struct{}) *workQueue { +func New(workers int, stopCh <-chan struct{}) Interface { q := &workQueue{ ch: make(chan workUnit, 100), wg: &sync.WaitGroup{}, @@ -77,14 +92,6 @@ type workUnit struct { wg *sync.WaitGroup } -type Work interface { - Parallel(fn func()) -} - -type Try interface { - Try(fn func() error) -} - type worker struct { wg *sync.WaitGroup ch chan workUnit diff --git a/test/extended/images/extract.go b/test/extended/images/extract.go new file mode 100644 index 000000000000..fbf7af9cdea5 --- /dev/null +++ b/test/extended/images/extract.go @@ -0,0 +1,76 @@ +package images + +import ( + "github.com/MakeNowJust/heredoc" + g "github.com/onsi/ginkgo" + o "github.com/onsi/gomega" + + kapi "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + imageapi "github.com/openshift/api/image/v1" + imageclientset "github.com/openshift/client-go/image/clientset/versioned" + exutil "github.com/openshift/origin/test/extended/util" +) + +var _ = g.Describe("[Feature:ImageExtract] Image extract", func() { + defer g.GinkgoRecover() + + var oc *exutil.CLI + var ns string + + g.AfterEach(func() { + if g.CurrentGinkgoTestDescription().Failed && len(ns) > 0 { + exutil.DumpPodLogsStartingWithInNamespace("", ns, oc) + } + }) + + oc = exutil.NewCLI("image-extract", exutil.KubeConfigPath()) + + g.It("should extract content from an image", func() { + ns = oc.Namespace() + cli := oc.KubeFramework().PodClient() + client := imageclientset.NewForConfigOrDie(oc.UserConfig()).Image() + + _, err := client.ImageStreamImports(ns).Create(&imageapi.ImageStreamImport{ + ObjectMeta: metav1.ObjectMeta{ + Name: "1", + }, + Spec: imageapi.ImageStreamImportSpec{ + Import: true, + Images: []imageapi.ImageImportSpec{ + { + From: kapi.ObjectReference{Kind: "DockerImage", Name: "busybox:latest"}, + To: &kapi.LocalObjectReference{Name: "busybox"}, + }, + { + From: kapi.ObjectReference{Kind: "DockerImage", Name: "mysql:latest"}, + To: &kapi.LocalObjectReference{Name: "mysql"}, + }, + }, + }, + }) + o.Expect(err).ToNot(o.HaveOccurred()) + + // busyboxLayers := isi.Status.Images[0].Image.DockerImageLayers + // busyboxLen := len(busyboxLayers) + // mysqlLayers := isi.Status.Images[1].Image.DockerImageLayers + // mysqlLen := len(mysqlLayers) + + pod := cli.Create(cliPodWithPullSecret(oc, heredoc.Docf(` + set -x + + # command exits if directory doesn't exist + ! oc image extract --insecure docker-registry.default.svc:5000/%[1]s/1:busybox --path=/:/tmp/doesnotexist + + # extract busybox to a directory, verify the contents + mkdir -p /tmp/test + oc image extract --insecure docker-registry.default.svc:5000/%[1]s/1:busybox --path=/:/tmp/test + [ -d /tmp/test/etc ] && [ -d /tmp/test/bin ] + [ -f /tmp/test/bin/ls ] && /tmp/test/bin/ls /tmp/test + oc image extract --insecure docker-registry.default.svc:5000/%[1]s/1:busybox --path=/etc/shadow:/tmp --path=/etc/localtime:/tmp + [ -f /tmp/shadow ] && [ -f /tmp/localtime ] + `, ns))) + cli.WaitForSuccess(pod.Name, podStartupTimeout) + }) +})