def pull(cli): ch.dependencies_check() # Where does it go? dlcache = cli.storage + "/dlcache" if (cli.image_dir is not None): unpack_dir = cli.image_dir image_subdir = "" else: unpack_dir = cli.storage + "/img" image_subdir = None # infer from image ref # Set things up. ref = ch.Image_Ref(cli.image_ref) if (cli.parse_only): print(ref.as_verbose_str) sys.exit(0) image = ch.Image(ref, dlcache, unpack_dir, image_subdir) ch.INFO("pulling image: %s" % image.ref) if (cli.image_dir is not None): ch.INFO("destination: %s" % image.unpack_path) else: ch.DEBUG("destination: %s" % image.unpack_path) ch.DEBUG("use cache: %s" % (not cli.no_cache)) ch.DEBUG("download cache: %s" % image.download_cache) ch.DEBUG("manifest: %s" % image.manifest_path) # Pull! image.pull_to_unpacked(use_cache=(not cli.no_cache), last_layer=cli.last_layer) # Done. ch.INFO("done")
def init_maybe(self, img_path, args, env): if (not self.needs_inject(args)): ch.VERBOSE("workarounds: init: instruction doesn't need injection") return if (self.init_done): ch.VERBOSE("workarounds: init: already initialized") return for (i, (test_cmd, init_cmd)) in enumerate(self.init, 1): ch.INFO("workarounds: init step %s: checking: $ %s" % (i, test_cmd)) args = ["/bin/sh", "-c", test_cmd] exit_code = ch.ch_run_modify(img_path, args, env, fail_ok=True) if (exit_code == 0): ch.INFO( "workarounds: init step %d: exit code %d, step not needed" % (i, exit_code)) else: if (not self.inject_p): ch.INFO("workarounds: init step %d: no --force, skipping" % i) else: ch.INFO("workarounds: init step %d: $ %s" % (i, init_cmd)) args = ["/bin/sh", "-c", init_cmd] ch.ch_run_modify(img_path, args, env) self.init_done = True
def upload(self): ch.INFO("starting upload") ul = ch.Registry_HTTP(self.dst_ref) for (i, (digest, tarball)) in enumerate(self.layers, start=1): ul.layer_from_file(digest, tarball, "layer %d/%d: " % (i, len(self.layers))) ul.config_upload(self.config) ch.INFO("manifest: uploading") ul.manifest_upload(self.manifest) ul.close()
def fatman_load(self): """Load the fat manifest JSON file, downloading it first if needed. If the image has a fat manifest, populate self.architectures; this may be an empty dictionary if no valid architectures were found. Raises: * Not_In_Registry_Error if the image does not exist. * No_Fatman_Error if the image exists but has no fat manifest, i.e., is architecture-unaware. In this case self.architectures is set to None.""" self.architectures = None if (str(self.image.ref) in manifests_internal): # cheat; internal manifest library matches every architecture self.architectures = {ch.arch_host: None} return if (os.path.exists(self.fatman_path) and self.use_cache): ch.INFO("manifest list: using existing file") else: # raises Not_In_Registry_Error if needed self.registry.fatman_to_file(self.fatman_path, "manifest list: downloading") fm = ch.json_from_file(self.fatman_path, "fat manifest") if ("layers" in fm or "fsLayers" in fm): # FIXME (issue #1101): If it's a v2 manifest we could use it instead # of re-requesting later. Maybe we could here move/copy it over to # the skinny manifest path. raise ch.No_Fatman_Error() if ("errors" in fm): # fm is an error blob. (code, msg) = self.error_decode(fm) if (code == "MANIFEST_UNKNOWN"): ch.INFO("manifest list: no such image") return else: ch.FATAL("manifest list: error: %s" % msg) self.architectures = dict() if ("manifests" not in fm): ch.FATAL("manifest list has no key 'manifests'") for m in fm["manifests"]: try: if (m["platform"]["os"] != "linux"): continue arch = m["platform"]["architecture"] if ("variant" in m["platform"]): arch = "%s/%s" % (arch, m["platform"]["variant"]) digest = m["digest"] except KeyError: ch.FATAL("manifest lists missing a required key") if (arch in self.architectures): ch.FATAL("manifest list: duplicate architecture: %s" % arch) self.architectures[arch] = ch.digest_trim(digest) if (len(self.architectures) == 0): ch.WARNING("no valid architectures found")
def inject_first(img, env): c = config(img) if (c is None): return if (os.path.exists("%s/ch/fakeroot-first-run")): ch.DEBUG("fakeroot: already initialized") return ch.INFO("fakeroot: initializing for %s" % c["name"]) for cl in c["first"]: ch.INFO("fakeroot: $ %s" % cl) args = ["/bin/sh", "-c", cl] ch.ch_run_modify(img, args, env)
def inject_run(self, args): if (not self.needs_inject(args)): ch.VERBOSE("workarounds: RUN: instruction doesn't need injection") return args assert (self.init_done) if (not self.inject_p): ch.INFO("workarounds: RUN: available here with --force") return args args = self.each + args self.inject_ct += 1 ch.INFO("workarounds: RUN: new command: %s" % args) return args
def fatman_load(self): """Load the fat manifest JSON file, downloading it first if needed. If the image has a fat manifest, populate self.architectures; this may be an empty dictionary if no valid architectures were found. It is not an error if the image has no fat manifest or the registry reports no such image. In this architecture-unaware condition, set self.architectures to None.""" self.architectures = None if (str(self.image.ref) in manifests_internal): return # no fat manifests for internal library if (os.path.exists(self.fatman_path) and self.use_cache): ch.INFO("manifest list: using existing file") else: ch.INFO("manifest list: downloading") self.registry.fatman_to_file(self.fatman_path, True) if (not os.path.exists(self.fatman_path)): # Response was 404 (or equivalent). ch.INFO("manifest list: no list found") return fm = ch.json_from_file(self.fatman_path, "fat manifest") if ("layers" in fm or "fsLayers" in fm): # If there is no fat manifest but the image exists, we get a skinny # manifest instead. We can't use it, however, because it might be a # v1 manifest when a v2 is available. ¯\_(ツ)_/¯ ch.INFO("manifest list: no valid list found") return if ("errors" in fm): # fm is an error blob. (code, msg) = self.error_decode(fm) if (code == "MANIFEST_UNKNOWN"): ch.INFO("manifest list: no such image") return else: ch.FATAL("manifest list: error: %s" % msg) self.architectures = dict() if ("manifests" not in fm): ch.FATAL("manifest list has no key 'manifests'") for m in fm["manifests"]: try: if (m["platform"]["os"] != "linux"): continue arch = m["platform"]["architecture"] if ("variant" in m["platform"]): arch = "%s/%s" % (arch, m["platform"]["variant"]) digest = m["digest"] except KeyError: ch.FATAL("manifest lists missing a required key") if (arch in self.architectures): ch.FATAL("manifest list: duplicate architecture: %s" % arch) self.architectures[arch] = ch.digest_trim(digest) if (len(self.architectures) == 0): ch.WARNING("no valid architectures found")
def import_(cli): if (not os.path.exists(cli.path)): ch.FATAL("can't copy: not found: %s" % cli.path) dst = ch.Image(ch.Image_Ref(cli.image_ref)) ch.INFO("importing: %s" % cli.path) ch.INFO("destination: %s" % dst) if (os.path.isdir(cli.path)): dst.copy_unpacked(cli.path) else: # tarball, hopefully dst.unpack([cli.path]) # initialize metadata if needed dst.metadata_load() dst.metadata_save() ch.done_notify()
def list_(cli): ch.dependencies_check() imgdir = ch.storage.unpack_base if (cli.image_ref is None): # list all images if (not os.path.isdir(ch.storage.root)): ch.INFO("does not exist: %s" % ch.storage.root) return if (not ch.storage.valid_p()): ch.INFO("not a storage directory: %s" % ch.storage.root) return imgs = ch.ossafe(os.listdir, "can't list directory: %s" % ch.storage.root, imgdir) for img in sorted(imgs): print(ch.Image_Ref(img)) else: # list specified image img = ch.Image(ch.Image_Ref(cli.image_ref)) print("details of image: %s" % img.ref) # present locally? if (not img.unpack_exist_p): stored = "no" else: img.metadata_load() stored = "yes (%s)" % img.metadata["arch"] print("in local storage: %s" % stored) # present remotely? print("full remote ref: %s" % img.ref.canonical) pullet = pull.Image_Puller(img, not cli.no_cache) pullet.fatman_load() if (pullet.architectures is not None): remote = "yes" arch_aware = "yes" arch_avail = " ".join(sorted(pullet.architectures.keys())) else: pullet.manifest_load(True) if (pullet.layer_hashes is not None): remote = "yes" arch_aware = "no" arch_avail = "unknown" else: remote = "no" arch_aware = "n/a" arch_avail = "n/a" pullet.done() print("available remotely: %s" % remote) print("remote arch-aware: %s" % arch_aware) print("host architecture: %s" % ch.arch_host) print("archs available: %s" % arch_avail)
def detect(image, force, no_force_detect): f = None if (no_force_detect): ch.VERBOSE("not detecting --force config, per --no-force-detect") else: # Try to find a real fakeroot config. for (tag, cfg) in DEFAULT_CONFIGS.items(): try: f = Fakeroot(image, tag, cfg, force) break except Config_Aint_Matched: pass # Report findings. if (f is None): msg = "--force not available (no suitable config found)" if (force): ch.WARNING(msg) else: ch.VERBOSE(msg) else: if (force): adj = "will use" else: adj = "available" ch.INFO("%s --force: %s: %s" % (adj, f.tag, f.name)) # Wrap up if (f is None): f = Fakeroot_Noop() return f
def download(self): "Download image metadata and layers and put them in the download cache." # Spec: https://docs.docker.com/registry/spec/manifest-v2-2/ ch.VERBOSE("downloading image: %s" % self.image) try: # fat manifest if (ch.arch != "yolo"): try: self.fatman_load() if (ch.arch not in self.architectures): ch.FATAL("requested arch unavailable: %s" % ch.arch, ("available: %s" % " ".join(sorted(self.architectures.keys())))) except ch.No_Fatman_Error: if (ch.arch == "amd64"): # We're guessing that enough arch-unaware images are amd64 to # barge ahead if requested architecture is amd64. ch.arch = "yolo" ch.WARNING("image is architecture-unaware") ch.WARNING( "requested arch is amd64; using --arch=yolo") else: ch.FATAL("image is architecture-unaware", "consider --arch=yolo") # manifest self.manifest_load() except ch.Not_In_Registry_Error: ch.FATAL("not in registry: %s" % self.registry.ref) # config ch.VERBOSE("config path: %s" % self.config_path) if (self.config_path is not None): if (os.path.exists(self.config_path) and self.use_cache): ch.INFO("config: using existing file") else: self.registry.blob_to_file(self.config_hash, self.config_path, "config: downloading") # layers for (i, lh) in enumerate(self.layer_hashes, start=1): path = self.layer_path(lh) ch.VERBOSE("layer path: %s" % path) msg = "layer %d/%d: %s" % (i, len(self.layer_hashes), lh[:7]) if (os.path.exists(path) and self.use_cache): ch.INFO("%s: using existing file" % msg) else: self.registry.blob_to_file(lh, path, "%s: downloading" % msg) # done self.registry.close()
def main(cli): # Set things up. ref = ch.Image_Ref(cli.image_ref) if (cli.parse_only): print(ref.as_verbose_str) sys.exit(0) image = ch.Image(ref, cli.image_dir) ch.INFO("pulling image: %s" % ref) ch.INFO("requesting arch: %s" % ch.arch) if (cli.image_dir is not None): ch.INFO("destination: %s" % image.unpack_path) else: ch.VERBOSE("destination: %s" % image.unpack_path) pullet = Image_Puller(image, not cli.no_cache) pullet.pull_to_unpacked(cli.last_layer) pullet.done() ch.done_notify()
def prepare(self): """Prepare self.image for pushing to self.dst_ref. Return tuple: (list of gzipped layer tarball paths, config as a sequence of bytes, manifest as a sequence of bytes). There is not currently any support for re-using any previously prepared files already in the upload cache, because we don't yet have a way to know if these have changed until they are already build.""" ch.mkdirs(ch.storage.upload_cache) tars_uc = self.image.tarballs_write(ch.storage.upload_cache) tars_c = list() config = self.config_new() manifest = self.manifest_new() # Prepare layers. for (i, tar_uc) in enumerate(tars_uc, start=1): ch.INFO("layer %d/%d: preparing" % (i, len(tars_uc))) path_uc = ch.storage.upload_cache // tar_uc hash_uc = ch.file_hash(path_uc) config["rootfs"]["diff_ids"].append("sha256:" + hash_uc) #size_uc = ch.file_size(path_uc) path_c = ch.file_gzip(path_uc, ["-9", "--no-name"]) tar_c = path_c.name hash_c = ch.file_hash(path_c) size_c = ch.file_size(path_c) tars_c.append((hash_c, path_c)) manifest["layers"].append({ "mediaType": ch.TYPE_LAYER, "size": size_c, "digest": "sha256:" + hash_c }) # Prepare metadata. ch.INFO("preparing metadata") config_bytes = json.dumps(config, indent=2).encode("UTF-8") config_hash = ch.bytes_hash(config_bytes) manifest["config"]["size"] = len(config_bytes) manifest["config"]["digest"] = "sha256:" + config_hash ch.DEBUG("config: %s\n%s" % (config_hash, config_bytes.decode("UTF-8"))) manifest_bytes = json.dumps(manifest, indent=2).encode("UTF-8") ch.DEBUG("manifest:\n%s" % manifest_bytes.decode("UTF-8")) # Store for the next steps. self.layers = tars_c self.config = config_bytes self.manifest = manifest_bytes
def download(self): "Download image metadata and layers and put them in the download cache." # Spec: https://docs.docker.com/registry/spec/manifest-v2-2/ ch.VERBOSE("downloading image: %s" % self.image) ch.mkdirs(ch.storage.download_cache) # fat manifest if (ch.arch != "yolo"): self.fatman_load() if (self.architectures is not None): if (ch.arch not in self.architectures): ch.FATAL( "requested arch unavailable: %s not one of: %s" % (ch.arch, " ".join(sorted(self.architectures.keys())))) elif (ch.arch == "amd64"): # We're guessing that enough arch-unaware images are amd64 to # barge ahead if requested architecture is amd64. ch.arch = "yolo" ch.WARNING("image is architecture-unaware") ch.WARNING("requested arch is amd64; switching to --arch=yolo") else: ch.FATAL("image is architecture-unaware; try --arch=yolo (?)") # manifest self.manifest_load() # config ch.VERBOSE("config path: %s" % self.config_path) if (self.config_path is not None): if (os.path.exists(self.config_path) and self.use_cache): ch.INFO("config: using existing file") else: ch.INFO("config: downloading") self.registry.blob_to_file(self.config_hash, self.config_path) # layers for (i, lh) in enumerate(self.layer_hashes, start=1): path = self.layer_path(lh) ch.VERBOSE("layer path: %s" % path) ch.INFO("layer %d/%d: %s: " % (i, len(self.layer_hashes), lh[:7]), end="") if (os.path.exists(path) and self.use_cache): ch.INFO("using existing file") else: ch.INFO("downloading") self.registry.blob_to_file(lh, path)
def main(cli): ch.dependencies_check() # Set things up. ref = ch.Image_Ref(cli.image_ref) if (cli.parse_only): ch.INFO(ref.as_verbose_str) sys.exit(0) image = ch.Image(ref, cli.image_dir) ch.INFO("pulling image: %s" % ref) if (cli.image_dir is not None): ch.INFO( "destination: %s" % image.unpack_path) else: ch.VERBOSE("destination: %s" % image.unpack_path) ch.VERBOSE("use cache: %s" % (not cli.no_cache)) ch.VERBOSE("download cache: %s" % ch.storage.download_cache) pullet = Image_Puller(image) ch.VERBOSE("manifest: %s" % pullet.manifest_path) pullet.pull_to_unpacked(use_cache=(not cli.no_cache), last_layer=cli.last_layer) ch.done_notify()
def main(cli): src_ref = ch.Image_Ref(cli.source_ref) ch.INFO("pushing image: %s" % src_ref) image = ch.Image(src_ref, cli.image) # FIXME: validate it's an image using Megan's new function (PR #908) if (not os.path.isdir(image.unpack_path)): if (cli.image is not None): ch.FATAL("can't push: %s does not appear to be an image" % cli.image) else: ch.FATAL("can't push: no image %s" % src_ref) if (cli.image is not None): ch.INFO("image path: %s" % image.unpack_path) else: ch.VERBOSE("image path: %s" % image.unpack_path) if (cli.dest_ref is not None): dst_ref = ch.Image_Ref(cli.dest_ref) ch.INFO("destination: %s" % dst_ref) else: dst_ref = ch.Image_Ref(cli.source_ref) up = Image_Pusher(image, dst_ref) up.push() ch.done_notify()
def pull_to_unpacked(self, last_layer=None): "Pull and flatten image." self.download() layer_paths = [self.layer_path(h) for h in self.layer_hashes] self.image.unpack(layer_paths, last_layer) self.image.metadata_replace(self.config_path) # Check architecture we got. This is limited because image metadata does # not store the variant. Move fast and break things, I guess. arch_image = self.image.metadata["arch"] or "unknown" arch_short = ch.arch.split("/")[0] arch_host_short = ch.arch_host.split("/")[0] if (arch_image != "unknown" and arch_image != arch_host_short): host_mismatch = " (may not match host %s)" % ch.arch_host else: host_mismatch = "" ch.INFO("image arch: %s%s" % (arch_image, host_mismatch)) if (ch.arch != "yolo" and arch_short != arch_image): ch.WARNING("image architecture does not match requested: %s ≠ %s" % (ch.arch, image_arch))
def download(self, use_cache): """Download image metadata and layers and put them in the download cache. If use_cache is True (the default), anything already in the cache is skipped, otherwise download it anyway, overwriting what's in the cache.""" # Spec: https://docs.docker.com/registry/spec/manifest-v2-2/ dl = ch.Registry_HTTP(self.image.ref) ch.VERBOSE("downloading image: %s" % dl.ref) ch.mkdirs(ch.storage.download_cache) # manifest if (os.path.exists(self.manifest_path) and use_cache): ch.INFO("manifest: using existing file") else: ch.INFO("manifest: downloading") dl.manifest_to_file(self.manifest_path) self.manifest_load() # config ch.VERBOSE("config path: %s" % self.config_path) if (self.config_path is not None): if (os.path.exists(self.config_path) and use_cache): ch.INFO("config: using existing file") else: ch.INFO("config: downloading") dl.blob_to_file(self.config_hash, self.config_path) # layers for (i, lh) in enumerate(self.layer_hashes, start=1): path = self.layer_path(lh) ch.VERBOSE("layer path: %s" % path) ch.INFO("layer %d/%d: %s: "% (i, len(self.layer_hashes), lh[:7]), end="") if (os.path.exists(path) and use_cache): ch.INFO("using existing file") else: ch.INFO("downloading") dl.blob_to_file(lh, path) dl.close()
def main(cli_): # CLI namespace. :P global cli cli = cli_ # Infer input file if needed. if (cli.file is None): cli.file = cli.context + "/Dockerfile" # Infer image name if needed. if (cli.tag is None): m = re.search(r"(([^/]+)/)?Dockerfile(\.(.+))?$", os.path.abspath(cli.file)) if (m is not None): if m.group(4): # extension cli.tag = m.group(4) elif m.group(2): # containing directory cli.tag = m.group(2) # Deal with build arguments. def build_arg_get(arg): kv = arg.split("=") if (len(kv) == 2): return kv else: v = os.getenv(kv[0]) if (v is None): ch.FATAL("--build-arg: %s: no value and not in environment" % kv[0]) return (kv[0], v) if (cli.build_arg is None): cli.build_arg = list() cli.build_arg = dict( build_arg_get(i) for i in cli.build_arg ) # Finish CLI initialization. ch.DEBUG(cli) ch.dependencies_check() # Guess whether the context is a URL, and error out if so. This can be a # typical looking URL e.g. "https://..." or also something like # "[email protected]:...". The line noise in the second line of the regex is # to match this second form. Username and host characters from # https://tools.ietf.org/html/rfc3986. if (re.search(r""" ^((git|git+ssh|http|https|ssh):// | ^[\w.~%!$&'\(\)\*\+,;=-]+@[\w.~%!$&'\(\)\*\+,;=-]+:)""", cli.context, re.VERBOSE) is not None): ch.FATAL("not yet supported: issue #773: URL context: %s" % cli.context) if (os.path.exists(cli.context + "/.dockerignore")): ch.WARNING("not yet supported, ignored: issue #777: .dockerignore file") # Set up build environment. global env env = Environment() # Read input file. if (cli.file == "-"): text = ch.ossafe(sys.stdin.read, "can't read stdin") else: fp = ch.open_(cli.file, "rt") text = ch.ossafe(fp.read, "can't read: %s" % cli.file) fp.close() # Parse it. parser = lark.Lark("?start: dockerfile\n" + ch.GRAMMAR, parser="earley", propagate_positions=True) # Avoid Lark issue #237: lark.exceptions.UnexpectedEOF if the file does not # end in newline. text += "\n" try: tree = parser.parse(text) except lark.exceptions.UnexpectedInput as x: ch.DEBUG(x) # noise about what was expected in the grammar ch.FATAL("can't parse: %s:%d,%d\n\n%s" % (cli.file, x.line, x.column, x.get_context(text, 39))) ch.DEBUG(tree.pretty()) # Sometimes we exit after parsing. if (cli.parse_only): sys.exit(0) # Count the number of stages (i.e., FROM instructions) global image_ct image_ct = sum(1 for i in ch.tree_children(tree, "from_")) # Traverse the tree and do what it says. # # We don't actually care whether the tree is traversed breadth-first or # depth-first, but we *do* care that instruction nodes are visited in # order. Neither visit() nor visit_topdown() are documented as of # 2020-06-11 [1], but examining source code [2] shows that visit_topdown() # uses Tree.iter_trees_topdown(), which *is* documented to be in-order [3]. # # This change seems to have been made in 0.8.6 (see PR #761); before then, # visit() was in order. Therefore, we call that instead, if visit_topdown() # is not present, to improve compatibility (see issue #792). # # [1]: https://lark-parser.readthedocs.io/en/latest/visitors/#visitors # [2]: https://github.com/lark-parser/lark/blob/445c8d4/lark/visitors.py#L211 # [3]: https://lark-parser.readthedocs.io/en/latest/classes/#tree ml = Main_Loop() if (hasattr(ml, 'visit_topdown')): ml.visit_topdown(tree) else: ml.visit(tree) # Check that all build arguments were consumed. if (len(cli.build_arg) != 0): ch.FATAL("--build-arg: not consumed: " + " ".join(cli.build_arg.keys())) # Print summary & we're done. if (ml.instruction_ct == 0): ch.FATAL("no instructions found: %s" % cli.file) assert (image_i + 1 == image_ct) # should have errored already if not ch.INFO("grown in %d instructions: %s" % (ml.instruction_ct, images[image_i]))
def announce(self): ch.INFO(self)
def manifest_load(self, continue_404=False): """Download the manifest file if needed, parse it, and set self.config_hash and self.layer_hashes. By default, if the image does not exist, exit with error; if continue_404, then log the condition but do not exit. In this case, self.config_hash and self.layer_hashes will both be None.""" def bad_key(key): ch.FATAL("manifest: %s: no key: %s" % (self.manifest_path, key)) self.config_hash = None self.layer_hashes = None # obtain the manifest try: # internal manifest library, e.g. for "FROM scratch" manifest = manifests_internal[str(self.image.ref)] ch.INFO("manifest: using internal library") except KeyError: # download the file if needed, then parse it if (ch.arch == "yolo" or self.architectures is None): digest = None else: digest = self.architectures[ch.arch] ch.DEBUG("manifest digest: %s" % digest) if (os.path.exists(self.manifest_path) and self.use_cache): ch.INFO("manifest: using existing file") else: ch.INFO("manifest: downloading") self.registry.manifest_to_file(self.manifest_path, digest=digest, continue_404=continue_404) if (not os.path.exists(self.manifest_path)): # response was 404 (or equivalent) ch.INFO("manifest: none found") return manifest = ch.json_from_file(self.manifest_path, "manifest") # validate schema version try: version = manifest['schemaVersion'] except KeyError: bad_key("schemaVersion") if (version not in {1, 2}): ch.FATAL("unsupported manifest schema version: %s" % repr(version)) # load config hash # # FIXME: Manifest version 1 does not list a config blob. It does have # things (plural) that look like a config at history/v1Compatibility as # an embedded JSON string :P but I haven't dug into it. if (version == 1): ch.VERBOSE("no config; manifest schema version 1") self.config_hash = None else: # version == 2 try: self.config_hash = manifest["config"]["digest"] if (self.config_hash is not None): self.config_hash = ch.digest_trim(self.config_hash) except KeyError: bad_key("config/digest") # load layer hashes if (version == 1): key1 = "fsLayers" key2 = "blobSum" else: # version == 2 key1 = "layers" key2 = "digest" if (key1 not in manifest): bad_key(key1) self.layer_hashes = list() for i in manifest[key1]: if (key2 not in i): bad_key("%s/%s" % (key1, key2)) self.layer_hashes.append(ch.digest_trim(i[key2])) if (version == 1): self.layer_hashes.reverse()
def cleanup(self): ch.INFO("cleaning up") # Delete the tarballs since we can't yet cache them. for (_, tar_c) in self.layers: ch.VERBOSE("deleting tarball: %s" % tar_c) ch.unlink(tar_c)