def get_os_release(): '''Given the base layer object, determine if an os-release file exists and return the PRETTY_NAME string from it. If no release file exists, return an empty string. Assume that the layer filesystem is mounted''' # os-release may exist under /etc/ or /usr/lib. We should first check # for the preferred /etc/os-release and fall back on /usr/lib/os-release # if it does not exist under /etc etc_path = os.path.join(rootfs.get_working_dir(), constants.mergedir, constants.etc_release_path) lib_path = os.path.join(rootfs.get_working_dir(), constants.mergedir, constants.lib_release_path) if not os.path.exists(etc_path): if not os.path.exists(lib_path): return '' etc_path = lib_path # file exists at this point, try to read it with open(etc_path, 'r') as f: lines = f.readlines() pretty_name = '' # iterate through os-release file to find OS for l in lines: key, val = l.rstrip().split('=', 1) if key == "PRETTY_NAME": pretty_name = val break return pretty_name.strip('"')
def get_untar_dir(self): """Get the directory where contents of the image layer are untarred""" # the untar directory is based on the image layout if self.image_layout == 'docker': return os.path.join(rootfs.get_working_dir(), os.path.dirname(self.tar_file), constants.untar_dir) # For OCI layouts, the tar file may be at the root of the directory # So we will return a path one level deeper return os.path.join(rootfs.get_working_dir(), self.layer_index, constants.untar_dir)
def testGetUntarDir(self): self.layer.image_layout = "oci" self.assertEqual(self.layer.image_layout, "oci") self.layer.image_layout = "docker" self.assertEqual(self.layer.image_layout, "docker") self.layer.image_layout = "" self.assertEqual(self.layer.image_layout, "oci") self.layer.layer_index = 1 self.assertEqual(self.layer.layer_index, "1") expected_path = os.path.join(rootfs.get_working_dir(), '1/contents') self.assertEqual(self.layer.get_untar_dir(), expected_path) self.layer.image_layout = "docker" expected_path = os.path.join(rootfs.get_working_dir(), 'path/to/contents') self.assertEqual(self.layer.get_untar_dir(), expected_path)
def extract_image(args): """The image can either be downloaded from a container registry or provided as an image tarball. Extract the image into a working directory accordingly Return an image name and tag and an image digest if it exists""" if args.docker_image: # extract the docker image image_attrs = docker_api.dump_docker_image(args.docker_image) if image_attrs: # repo name and digest is preferred, but if that doesn't exist # the repo name and tag will do. If neither exist use repo Id. if image_attrs['Id']: image_string = image_attrs['Id'] if image_attrs['RepoTags']: image_string = image_attrs['RepoTags'][0] if image_attrs['RepoDigests']: image_string = image_attrs['RepoDigests'][0] return image_string logger.critical("Cannot extract Docker image") if args.raw_image: # for now we assume that the raw image tarball is always # the product of "docker save", hence it will be in # the docker style layout if rootfs.extract_tarfile(args.raw_image, rootfs.get_working_dir()): return args.raw_image logger.critical("Cannot extract raw image") return None
def get_image_manifest(self): '''Assuming that there is a temp folder with a manifest.json of an image inside, get a dict of the manifest.json file''' temp_path = rootfs.get_working_dir() with general.pushd(temp_path): with open(manifest_file) as f: json_obj = json.loads(f.read()) return json_obj
def teardown(keep=False): """Tear down the environment setup""" logger.debug("Tearing down...") # save the cache cache.save() # clean up the working directory if user has not asked to keep it if not keep: clean_working_dir() else: logger.debug( "Working directory available at: %s", rootfs.get_working_dir())
def gen_fs_hash(self): '''Get the filesystem hash if the image class was created with a tar_file''' if self.tar_file: fs_dir = self.get_untar_dir() # make directory structure if it doesn't exist if not os.path.exists(fs_dir): os.makedirs(fs_dir) tar_file = os.path.join(rootfs.get_working_dir(), self.tar_file) rootfs.extract_tarfile(tar_file, fs_dir) self.__fs_hash = rootfs.calc_fs_hash(fs_dir)
def get_image_config(self, manifest): '''Assuming there now exists a working directory where the image metadata exists, return the image config''' config_file = self.get_image_config_file(manifest) # assuming that the config file path is in the same root path as the # manifest file temp_path = rootfs.get_working_dir() with general.pushd(temp_path): with open(config_file) as f: json_obj = json.loads(f.read()) return json_obj
def get_base_bin(): '''Given the base layer object, find the binary used to identify the base OS layer. Assume that the layer filesystem is mounted''' binary = '' # the path to where the filesystem is mounted # look at utils/rootfs.py mount_base_layer module cwd = os.path.join(rootfs.get_working_dir(), constants.mergedir) for key, value in command_lib.command_lib['base'].items(): for path in value['path']: if os.path.exists(os.path.join(cwd, path)): binary = key break return binary
def recover(): """Undo all the mounts and clean up directories""" try: rootfs.undo_mount() except subprocess.CalledProcessError: pass try: rootfs.unmount_rootfs() except subprocess.CalledProcessError: pass # we nuke all the directories after mounting rootfs.clean_up() working_dir = rootfs.get_working_dir() if os.path.exists(working_dir): rootfs.root_command(rootfs.remove, working_dir)
def clean_working_dir(bind_mount): '''Clean up the working directory If bind_mount is true then leave the upper level directory''' path = rootfs.get_working_dir() if os.path.exists(path): if bind_mount: # clean whatever is in temp_folder without removing the folder inodes = os.listdir(path) for inode in inodes: dir_path = os.path.join(path, inode) if os.path.isdir(dir_path): shutil.rmtree(dir_path) else: os.remove(dir_path) else: shutil.rmtree(path)
def extract_image(args): """The image can either be downloaded from a container registry or provided as an image tarball. Extract the image into a working directory accordingly Return an image name and tag and an image digest if it exists""" if args.image: # download the image result = skopeo.pull_image(args.image, args.no_tls) if result: return 'oci', args.image logger.critical("Cannot download Container image: \"%s\"", args.image) if args.raw_image: # for now we assume that the raw image tarball is always # the product of "docker save", hence it will be in # the docker style layout if rootfs.extract_tarfile(args.raw_image, rootfs.get_working_dir()): return 'docker', args.raw_image logger.critical("Cannot extract raw Docker image") return None, None
def analyze_subsequent_layers(image_obj, prereqs, master_list, options): """Assuming we have completed analyzing the first layer of the given image object, we now analyze the remaining layers. While we have layers: 1. Check if the layer is empty. If it is, then we can't do anything and we should continue 2. See if we can load the layer from cache. If we can't then do a fresh analysis package information and bundle it into the image object 3. Update the master list""" # if the driver is 'default' then the first layer needs to be extracted mergepath = os.path.join(rootfs.get_working_dir(), constants.mergedir) if not os.listdir(mergepath): prep_layers(image_obj, 0, 'default') curr_layer = 1 # get list of environment variables prereqs.envs = lock.get_env_vars(image_obj) while curr_layer < len(image_obj.layers): # If work_dir changes, update value accordingly # so we can later execute base.yml commands from the work_dir if image_obj.layers[curr_layer].get_layer_workdir(): prereqs.layer_workdir = \ image_obj.layers[curr_layer].get_layer_workdir() # make a notice for each layer origin_next_layer = 'Layer {}'.format( image_obj.layers[curr_layer].layer_index) # check if this is an empty layer if common.is_empty_layer(image_obj.layers[curr_layer]): # we continue to the next layer logger.warning(errors.empty_layer) image_obj.layers[curr_layer].origins.add_notice_to_origins( origin_next_layer, Notice(errors.empty_layer, 'warning')) curr_layer = curr_layer + 1 continue if not common.load_from_cache(image_obj.layers[curr_layer], options.redo): fresh_analysis(image_obj, curr_layer, prereqs, options) # If the driver is 'default' apply the current layer anyway if options.driver == 'default': apply_layers(image_obj, curr_layer) # update the master list dcom.update_master_list(master_list, image_obj.layers[curr_layer]) curr_layer = curr_layer + 1
def execute_docker_image(args): # pylint: disable=too-many-branches '''Execution path if given a Docker image''' logger.debug('Starting analysis...') image_string = '' image_digest = '' if args.docker_image: # extract the docker image image_attrs = docker_api.dump_docker_image(args.docker_image) if image_attrs: if image_attrs['RepoTags']: image_string = image_attrs['RepoTags'][0] if image_attrs['RepoDigests']: image_digest = image_attrs['RepoDigests'][0] else: logger.critical("Cannot extract Docker image") elif args.raw_image: # for now we assume that the raw image tarball is always # the product of "docker save", hence it will be in # the docker style layout if rootfs.extract_tarfile(args.raw_image, rootfs.get_working_dir()): image_string = args.raw_image else: logger.critical("Cannot extract raw image") # If the image has been extracted, load the metadata if image_string: full_image = report.load_full_image(image_string, image_digest) # check if the image was loaded successfully if full_image.origins.is_empty(): # Add an image origin here full_image.origins.add_notice_origin( formats.docker_image.format(imagetag=image_string)) # analyze image analyze(full_image, args) # report out report.report_out(args, full_image) else: # we cannot load the full image logger.error('Cannot retrieve full image metadata') # cleanup if not args.keep_wd: prep.clean_image_tars(full_image)
def extract_image_metadata(image_tag_string): '''Run docker save and extract the files in a temporary directory''' temp_path = rootfs.get_working_dir() placeholder = os.path.join(general.get_top_dir(), temp_tarfile) try: if general.check_tar(image_tag_string) is True: # image_tag_string is the path to the tar file for raw images rootfs.extract_tarfile(image_tag_string, temp_path) else: image = client.images.get(image_tag_string) result = image.save(chunk_size=2097152, named=True) # write all of the tar byte stream into temporary tar file with open(placeholder, 'wb') as f: for chunk in result: f.write(chunk) # extract tarfile into folder rootfs.extract_tarfile(placeholder, temp_path) # remove temporary tar file os.remove(placeholder) if not os.listdir(temp_path): raise IOError('Unable to untar Docker image') except docker.errors.APIError: # pylint: disable=try-except-raise raise
def create_script(command, prereqs, method): """Create the script to execute in an unshared environment""" chroot_script = """#!{host_shell} mount -t proc /proc {mnt}/proc chroot {mnt} {fs_shell} -c "{snip}" """ host_script = """#!{host_shell} {host_shell} -c "{snip}" """ script = '' script_path = os.path.join(rootfs.get_working_dir(), constants.script_file) if method == 'container': script = chroot_script.format(host_shell=prereqs.host_shell, mnt=prereqs.host_path, fs_shell=prereqs.fs_shell, snip=command) if method == 'host': script = host_script.format(host_shell=prereqs.host_shell, snip=command) with open(script_path, 'w', encoding='utf-8') as f: f.write(script) os.chmod(script_path, 0o700) return script_path
def apply_layers(image_obj, top_layer): """Apply image diff layers without using a kernel snapshot driver""" # All merging happens in the merge directory target = os.path.join(rootfs.get_working_dir(), constants.mergedir) layer_dir = image_obj.layers[top_layer].get_untar_dir() layer_contents = layer_dir + '/*' # Account for whiteout files for fd in image_obj.layers[top_layer].files: if fd.is_whiteout: # delete the corresponding file or directory in the target # directory as well as the layer contents directory deleted = fd.name.replace('.wh.', '') delpath = os.path.join(target, os.path.dirname(fd.path), deleted) if os.path.exists(delpath): if os.path.isfile(delpath): rootfs.root_command(['rm'], delpath) else: rootfs.root_command(rootfs.remove, delpath) os.remove(os.path.join(layer_dir, fd.path)) # Finally, bulk copy the layer contents into the target directory # if there are any files to move if os.listdir(layer_dir): rootfs.root_command(['cp', '-r'] + glob.glob(layer_contents), target) return target
def extract_image(image_obj): """Run docker save and extract the resulting tarball into the working directory.""" temp_path = rootfs.get_working_dir() placeholder = os.path.join(general.get_top_dir(), constants.temp_tarfile) # try to save the image try: result = image_obj.save(chunk_size=2097152, named=True) # write all of the tar byte stream into temporary tar file with open(placeholder, 'wb') as f: for chunk in result: f.write(chunk) # extract temporary tar file into the working directory rootfs.extract_tarfile(placeholder, temp_path) # remove the tar file os.remove(placeholder) # If these operations didn't work, return False if not os.listdir(temp_path): logger.critical('Unable to extract Docker image') return False return True except docker.errors.APIError as e: logger.critical('Something happened with the Docker client: %s', e) return False
def setup(): """For the setup, we will just need to create the working directory""" op_dir = rootfs.get_working_dir() if not os.path.isdir(op_dir): os.mkdir(op_dir)
def clean_working_dir(): """Clean up the working directory""" path = rootfs.get_working_dir() if os.path.exists(path): shutil.rmtree(path)
def get_mount_path(): """Get the path where the filesystem is mounted""" return os.path.join(rootfs.get_working_dir(), constants.mergedir)
def cleanup(): """Clean up the working directory""" rootfs.clean_up() rootfs.root_command(rootfs.remove, rootfs.get_working_dir())
def clean_working_dir(): '''Clean up the working directory If bind_mount is true then leave the upper level directory''' path = rootfs.get_working_dir() if os.path.exists(path): shutil.rmtree(path)