def unpack(self, docleanup=True, destdir=None): # create the work dir if destdir: imagedir = destdir + "/" + str(random.randint( 0, 9999999)) + ".anchoretmp" else: imagedir = self.tmpdir imageId = self.meta['imageId'] imagetar = imagedir + "/image.tar" self.docleanup = docleanup if not os.path.exists(imagedir): os.makedirs(imagedir) # pull the image from docker and store/untar the tar if not os.path.exists(imagetar): import time r = self.docker_cli.get_image(imageId) chunk_size = 1024 * 100000 with open(imagetar, 'w') as OFH: chunk = r.read(chunk_size) while chunk: OFH.write(chunk) chunk = r.read(chunk_size) #with open(imagetar, 'w') as OFH: # OFH.write(self.docker_cli.get_image(imageId).data) sout = subprocess.check_output( ["tar", "-C", imagedir, "-x", "-f", imagetar], stderr=DEVNULL) # store some metadata and dockerfile if present self.meta['sizebytes'] = str(os.path.getsize(imagetar)) if self.dockerfile_contents: anchore_utils.update_file_str(self.dockerfile_contents, os.path.join(imagedir, "Dockerfile"), backup=False) if self.docker_data: anchore_utils.update_file_str(json.dumps(self.docker_data), os.path.join(imagedir, "docker_inspect.json"), backup=False) if self.docker_history: anchore_utils.update_file_str(json.dumps(self.docker_history), os.path.join(imagedir, "docker_history.json"), backup=False) # cleanup os.remove(imagetar) # squash the image layers into unpacked rootfs self.squash(imagedir) return (imagedir)
def unpack(self, docleanup=True, destdir=None): # need docker to be up and running for this if not self.docker_cli: raise Exception("docker cli is not initialized - docker needs to be up and running before containers can be analyzed.") # create the work dir if destdir: imagedir = destdir + "/" + str(random.randint(0, 9999999)) + ".anchoretmp" else: imagedir = self.tmpdir imageId = self.meta['imageId'] imagetar = imagedir + "/image.tar" self.docleanup = docleanup if not os.path.exists(imagedir): os.makedirs(imagedir) if False: # pull the image from docker and store/untar the tar if not os.path.exists(imagetar): try: r = self.docker_cli.get_image(imageId) except: try: r = self.docker_cli.get_image("sha256:"+imageId) except: raise chunk_size = 1024 * 100000 with open(imagetar, 'w') as OFH: chunk = r.read(chunk_size) while chunk: OFH.write(chunk) chunk = r.read(chunk_size) sout = subprocess.check_output(["tar", "-C", imagedir, "-x", "-f", imagetar], stderr=DEVNULL) # store some metadata and dockerfile if present self.meta['sizebytes'] = str(os.path.getsize(imagetar)) if self.dockerfile_contents: anchore_utils.update_file_str(self.dockerfile_contents, os.path.join(imagedir, "Dockerfile"), backup=False) if self.docker_data: anchore_utils.update_file_str(json.dumps(self.docker_data), os.path.join(imagedir, "docker_inspect.json"), backup=False) if self.docker_history: anchore_utils.update_file_str(json.dumps(self.docker_history), os.path.join(imagedir, "docker_history.json"), backup=False) # cleanup if os.path.exists(imagetar): os.remove(imagetar) # squash the image layers into unpacked rootfs rc = self.squash(imagedir) if not rc: self._logger.error("image squash operation failed") return(False) #if self.squashtar and os.path.exists(self.squashtar): # self.meta['sizebytes'] = str(os.path.getsize(self.squashtar)) return (imagedir)
def save_image(self): # Dockerfile handling if self.dockerfile_contents: if self.dockerfile_mode == 'Guessed': anchore_utils.update_file_str(self.dockerfile_contents, self.anchore_imagedir + "/Dockerfile.guessed", backup=False) elif self.dockerfile_mode == 'Actual': anchore_utils.update_file_str(self.dockerfile_contents, self.anchore_imagedir + "/Dockerfile", backup=False) if os.path.exists(self.anchore_imagedir + "/Dockerfile.guessed"): os.remove(self.anchore_imagedir + "/Dockerfile.guessed") # Image output dir populate imageoutputdir = self.anchore_imagedir + "/image_output/image_info" if not os.path.exists(imageoutputdir): os.makedirs(imageoutputdir) anchore_utils.write_kvfile_fromdict(imageoutputdir + "/image.meta", self.meta) level = 0 tagdict = {} for t in self.anchore_current_tags: tagdict[t] = str(level) level = level + 1 anchore_utils.write_kvfile_fromdict(imageoutputdir + "/image_current.tags", tagdict) level = 0 tagdict = {} for t in self.anchore_all_tags: tagdict[t] = str(level) level = level + 1 anchore_utils.write_kvfile_fromdict(imageoutputdir + "/image_all.tags", tagdict) dfile = self.get_dockerfile() if dfile: shutil.copy(dfile, imageoutputdir + "/Dockerfile") if not os.path.exists(self.anchore_imagedir + "/image_output/image_familytree/"): os.makedirs(self.anchore_imagedir + "/image_output/image_familytree/") level = 0 ldict = {} for fid in self.get_layers(): ldict[fid] = str(level) level = level + 1 anchore_utils.write_kvfile_fromdict(self.anchore_imagedir + "/image_output/image_familytree/layers", ldict) level = 0 ldict = {} for fid in self.get_familytree(): ldict[fid] = str(level) src = '/'.join([self.anchore_image_datadir, fid]) dst = '/'.join([self.anchore_imagedir, "/image_output/image_familytree/", fid]) try: os.remove(dst) except: pass os.symlink(src, dst) level = level + 1 if self.get_earliest_base() == fid: src = '/'.join([self.anchore_image_datadir, fid]) dst = '/'.join([self.anchore_imagedir, "/image_output/image_familytree/base"]) try: os.remove(dst) except: pass os.symlink(src, dst) anchore_utils.write_kvfile_fromdict(self.anchore_imagedir + "/image_output/image_familytree/familytree", ldict) # generate and save image report report = self.generate_image_report() self.anchore_db.save_image_report(self.meta['imageId'], report)