def get_commands_from_history(image_layer): '''Given the image layer object and the shell, get the list of command objects that created the layer''' # set up notice origin for the layer origin_layer = 'Layer: ' + image_layer.fs_hash[:10] if image_layer.created_by: instruction = created_to_instruction(image_layer.created_by) image_layer.origins.add_notice_to_origins( origin_layer, Notice( formats.dockerfile_line.format( dockerfile_instruction=instruction), 'info')) else: image_layer.origins.add_notice_to_origins( origin_layer, Notice(formats.no_created_by, 'warning')) command_line = instruction.split(' ', 1)[1] # Image layers are created with the directives RUN, ADD and COPY # For ADD and COPY instructions, there is no information about the # packages added if 'ADD' in instruction or 'COPY' in instruction: image_layer.origins.add_notice_to_origins( origin_layer, Notice(errors.unknown_content.format(files=command_line), 'warning')) # return an empty list as we cannot find any commands return [] else: # for RUN instructions we can return a list of commands command_list, msg = common.filter_install_commands(command_line) if msg: image_layer.origins.add_notice_to_origins(origin_layer, Notice(msg, 'warning')) return command_list
def add_snippet_packages(image_layer, command, pkg_listing, shell): '''Given an image layer object, a command object, the package listing and the shell used to invoke commands, add package metadata to the layer object. We assume the filesystem is already mounted and ready 1. Get the packages installed by the command 3. For each package get the dependencies 4. For each unique package name, find the metadata and add to the layer''' # set up a notice origin for the layer origin_layer = 'Layer: ' + image_layer.fs_hash[:10] # find packages for the command cmd_msg = formats.invoke_for_snippets + '\n' + \ content.print_package_invoke(command.name) image_layer.origins.add_notice_to_origins(origin_layer, Notice( cmd_msg, 'info')) pkg_list = get_installed_package_names(command) # collect all the dependencies for each package name all_pkgs = [] for pkg_name in pkg_list: pkg_invoke = command_lib.check_for_unique_package( pkg_listing, pkg_name) deps, deps_msg = get_package_dependencies( pkg_invoke, pkg_name, shell) if deps_msg: logger.warning(deps_msg) image_layer.origins.add_notice_to_origins( origin_layer, Notice(deps_msg, 'error')) all_pkgs.append(pkg_name) all_pkgs.extend(deps) unique_pkgs = list(set(all_pkgs)) # get package metadata for each package name for pkg_name in unique_pkgs: pkg = Package(pkg_name) fill_package_metadata(pkg, pkg_invoke, shell) image_layer.add_package(pkg)
def load_base_image(): '''Create base image from dockerfile instructions and return the image''' base_image, dockerfile_lines = dhelper.get_dockerfile_base() # try to get image metadata if not container.check_image(base_image.repotag): # if no base image is found, give a warning and continue if not container.pull_image(base_image.repotag): logger.warning( errors.cannot_find_image.format(imagetag=base_image.repotag)) try: base_image.load_image() except NameError as error: logger.warning('Error in loading base image: ' + str(error)) base_image.origins.add_notice_to_origins(dockerfile_lines, Notice(str(error), 'error')) except subprocess.CalledProcessError as error: logger.warning('Error in loading base image: ' + str(error.output, 'utf-8')) base_image.origins.add_notice_to_origins( dockerfile_lines, Notice(str(error.output, 'utf-8'), 'error')) except IOError as error: logger.warning('Error in loading base image: ' + str(error)) base_image.origins.add_notice_to_origin(dockerfile_lines, Notice(str(error), 'error')) return base_image
def testAddNotice(self): n = Notice() n.origin = 'FROM' n.message = 'no image' n.level = 'error' self.package.add_notice(n) self.assertEqual(len(self.package.notices), 1) self.assertEqual(self.package.notices[0].origin, 'FROM')
def add_base_packages(image_layer, binary, shell): '''Given the image layer, the binary to invoke and shell: 1. get the listing from the base.yml 2. Invoke any commands against the base layer 3. Make a list of packages and add them to the layer''' origin_layer = 'Layer: ' + image_layer.fs_hash[:10] if image_layer.created_by: image_layer.origins.add_notice_to_origins( origin_layer, Notice( formats.layer_created_by.format( created_by=image_layer.created_by), 'info')) else: image_layer.origins.add_notice_to_origins( origin_layer, Notice(formats.no_created_by, 'warning')) origin_command_lib = formats.invoking_base_commands # find the binary listing = command_lib.get_base_listing(binary) if listing: # put info notice about what is going to be invoked snippet_msg = formats.invoke_for_base + '\n' + \ content.print_base_invoke(binary) image_layer.origins.add_notice_to_origins(origin_layer, Notice(snippet_msg, 'info')) shell, msg = command_lib.get_image_shell(listing) if not shell: shell = constants.shell # get all the packages in the base layer names, n_msg = command_lib.get_pkg_attr_list(shell, listing['names']) versions, v_msg = command_lib.get_pkg_attr_list( shell, listing['versions']) licenses, l_msg = command_lib.get_pkg_attr_list( shell, listing['licenses']) src_urls, u_msg = command_lib.get_pkg_attr_list( shell, listing['src_urls']) # add a notice to the image if something went wrong invoke_msg = n_msg + v_msg + l_msg + u_msg if invoke_msg: image_layer.origins.add_notice_to_origins( origin_layer, Notice(invoke_msg, 'error')) if names and len(names) > 1: for index in range(0, len(names)): pkg = Package(names[index]) if len(versions) == len(names): pkg.version = versions[index] if len(licenses) == len(names): pkg.license = licenses[index] if len(src_urls) == len(names): pkg.src_url = src_urls[index] image_layer.add_package(pkg) # if there is no listing add a notice else: image_layer.origins.add_notice_to_origins( origin_command_lib, Notice(errors.no_listing_for_base_key.format(listing_key=binary), 'error'))
def setUp(self): self.notice_info = Notice("info") self.notice_warn = Notice("warning", "warning") self.notice_errr = Notice("error", "error") self.notice_hint = Notice("hint", "hint") self.notices = [ self.notice_info, self.notice_warn, self.notice_errr, self.notice_hint ] self.notice_origin = NoticeOrigin('origin_str')
def load_full_image(): '''Create image object from test image and return the object''' test_image = DockerImage(docker.get_dockerfile_image_tag()) try: test_image.load_image() except NameError as error: test_image.origins.add_notice_to_origins(test_image.repotag, Notice(str(error), 'error')) except subprocess.CalledProcessError as error: test_image.origins.add_notice_to_origins( test_image.repotag, Notice(str(error.output, 'utf-8'), 'error')) except IOError as error: test_image.origins.add_notice_to_origins(test_image.repotag, Notice(str(error), 'error')) return test_image
def load_from_cache(image): '''Given an image object, check against cache to see if a layer id exists if yes then get the package list and load it in the image layer. If it doesn't exist continue. If not all the layers have packages, return False else return True''' is_full = True # check if we can use repotag origin_str = '' if image.repotag: origin_str = image.repotag else: origin_str = 'Image ID - ' + image.id[:10] for layer in image.layers: if not layer.packages: # create an origin for this layer origin_str = origin_str + ': ' + layer.diff_id[:10] # there are no packages in this layer # try to get it from the cache raw_pkg_list = cache.get_packages(layer.diff_id) if not raw_pkg_list: is_full = False else: logger.debug('Loaded from cache: layer {}'.format( layer.diff_id[:10])) message = formats.loading_from_cache.format( layer_id=layer.diff_id[:10]) # add notice to the origin layer.origins.add_notice_to_origins(origin_str, Notice(message, 'info')) for pkg_dict in raw_pkg_list: pkg = Package(pkg_dict['name']) pkg.fill(pkg_dict) layer.add_package(pkg) return is_full
class TestClassNotice(unittest.TestCase): def setUp(self): self.notice = Notice() def tearDown(self): del self.notice def testInstance(self): self.assertFalse(self.notice.message) self.assertEqual(self.notice.level, 'info') def testSetters(self): self.notice.message = 'tag' self.assertEqual(self.notice.message, 'tag') with self.assertRaises(NoticeException): self.notice.level = 'something' self.notice.level = 'warning' def testGetters(self): self.notice.message = 'tag' self.notice.level = 'warning' self.assertEqual(self.notice.message, 'tag') self.assertEqual(self.notice.level, 'warning') def testToDict(self): self.notice.message = 'tag' self.notice.level = 'warning' dict = self.notice.to_dict() self.assertEqual(dict.message, 'tag') self.assertEqual(dict.level, 'warning')
def load_from_cache(layer): '''Given a layer object, check against cache to see if that layer id exists if yes then get the package list and load it in the layer and return true. If it doesn't exist return false Add notices to the layer's origins matching the origin_str''' loaded = False origin_layer = 'Layer: ' + layer.diff_id[:10] if not layer.packages: # there are no packages in this layer # try to get it from the cache raw_pkg_list = cache.get_packages(layer.diff_id) if raw_pkg_list: logger.debug('Loaded from cache: layer {}'.format( layer.diff_id[:10])) message = formats.loading_from_cache.format( layer_id=layer.diff_id[:10]) # add notice to the origin layer.origins.add_notice_to_origins(origin_layer, Notice( message, 'info')) for pkg_dict in raw_pkg_list: pkg = Package(pkg_dict['name']) pkg.fill(pkg_dict) layer.add_package(pkg) loaded = True return loaded
def get_dockerfile_packages(): '''Given a Dockerfile return an approximate image object. This is mosty guess work and shouldn't be relied on for accurate information. Add Notice messages indicating as such: 1. Create an image with a placeholder repotag 2. For each RUN command, create a package list 3. Create layer objects with incremental integers and add the package list to that layer with a Notice about parsing 4. Return stub image''' stub_image = Image('easteregg:cookie') layer_count = 0 for inst in docker.docker_commands: if inst[0] == 'RUN': layer_count = layer_count + 1 layer = ImageLayer(layer_count) install_commands, msg = common.filter_install_commands(inst[1]) if msg: layer.origins.add_notice_to_origins( inst[1], Notice(msg, 'info')) pkg_names = [] for command in install_commands: pkg_names.append(common.get_installed_package_names(command)) for pkg_name in pkg_names: pkg = Package(pkg_name) # shell parser does not parse version pins yet # when that is enabled, Notices for no versions need to be # added here layer.add_package(pkg) return stub_image
def load_full_image(image_tag_string): '''Create image object from image name and tag and return the object''' test_image = DockerImage(image_tag_string) failure_origin = formats.image_load_failure.format( testimage=test_image.repotag) try: test_image.load_image() except NameError as error: test_image.origins.add_notice_to_origins( failure_origin, Notice(str(error), 'error')) except subprocess.CalledProcessError as error: test_image.origins.add_notice_to_origins( failure_origin, Notice(str(error.output, 'utf-8'), 'error')) except IOError as error: test_image.origins.add_notice_to_origins( failure_origin, Notice(str(error), 'error')) return test_image
def image_setup(image_obj): '''Add notices for each layer''' for layer in image_obj.layers: origin_str = 'Layer: ' + layer.fs_hash[:10] layer.origins.add_notice_origin(origin_str) if layer.import_str: layer.origins.add_notice_to_origins(origin_str, Notice( 'Imported in Dockerfile using: ' + layer.import_str, 'info'))
def add_diff_packages(diff_layer, command_line, shell): '''Given a layer object, command line string that created it, and the shell used to invoke commands, add package metadata to the layer object 1. Parse the command line to get individual install commands 2. For each command get the packages installed 3. For each package get the dependencies 4. For each unique package name, find the metadata and add to the layer''' origin_layer = 'Layer: ' + diff_layer.diff_id[:10] # parse all installed commands cmd_list, msg = filter_install_commands(command_line) if msg: diff_layer.origins.add_notice_to_origins( origin_layer, Notice(msg, 'warning')) # find packages for each command for command in cmd_list: cmd_msg = formats.invoke_for_snippets + '\n' + \ content.print_package_invoke(command.name) diff_layer.origins.add_notice_to_origins(origin_layer, Notice( cmd_msg, 'info')) pkg_list = get_installed_package_names(command) # collect all the dependencies for each package name all_pkgs = [] for pkg_name in pkg_list: pkg_listing = command_lib.get_package_listing( command.name, pkg_name) deps, deps_msg = get_package_dependencies( pkg_listing, pkg_name, shell) if deps_msg: logger.warning(deps_msg) diff_layer.origins.add_notice_to_origins( origin_layer, Notice(deps_msg, 'error')) all_pkgs.append(pkg_name) all_pkgs.extend(deps) unique_pkgs = list(set(all_pkgs)) # get package metadata for each package name for pkg_name in unique_pkgs: pkg = Package(pkg_name) pkg_listing = command_lib.get_package_listing( command.name, pkg_name) fill_package_metadata(pkg, pkg_listing, shell) diff_layer.add_package(pkg)
def get_dockerfile_base(): '''Get the base image object from the dockerfile base instructions 1. get the instructions around FROM 2. get the base image and tag 3. Make notes based on what the image and tag rules are 4. Return an image object and the base instructions string''' try: base_instructions = dockerfile.get_base_instructions(docker_commands) base_image_tag = dockerfile.get_base_image_tag(base_instructions) dockerfile_lines = print_dockerfile_base(base_instructions) # check for scratch if base_image_tag[0] == 'scratch': # there is no base image - return no image object return None # there should be some image object here repotag = base_image_tag[0] + dockerfile.tag_separator + \ base_image_tag[1] from_line = 'FROM ' + repotag base_image = DockerImage(repotag) base_image.origins.add_notice_origin(dockerfile_lines) base_image.name = base_image_tag[0] # check if there is a tag if not base_image_tag[1]: message_string = errors.dockerfile_no_tag.format( dockerfile_line=from_line) base_image.origins.add_notice_to_origins( dockerfile_lines, Notice(message_string, 'warning')) base_image.tag = 'latest' else: base_image.tag = base_image_tag[1] # check if the tag is 'latest' if base_image_tag[1] == 'latest': message_string = errors.dockerfile_using_latest.format( dockerfile_line=from_line) base_image.origins.add_notice_to_origins( dockerfile_lines, Notice(message_string, 'warning')) return base_image, dockerfile_lines except ValueError as e: logger.warning( errors.cannot_parse_base_image.format(dockerfile=dockerfile_global, error_msg=e)) return None
def add_packages_from_history(diff_layer, shell): '''Given a layer object, get package objects installed in each layer At this time, Docker keeps a history of commands that created non-empty layers. Use that to find possible install commands and packages. This will not work for OCI compatible images as created_by is not mandated.''' origin_layer = 'Layer: ' + diff_layer.diff_id[:10] if diff_layer.created_by: instruction = created_to_instruction(diff_layer.created_by) diff_layer.origins.add_notice_to_origins( origin_layer, Notice( formats.dockerfile_line.format( dockerfile_instruction=instruction), 'info')) else: diff_layer.origins.add_notice_to_origins( origin_layer, Notice(formats.no_created_by, 'warning')) if 'RUN' in instruction: # for Docker the created_by comes from the instruction in the # dockerfile run_command_line = instruction.split(' ', 1)[1] common.add_diff_packages(diff_layer, run_command_line, shell)
def send_email_notice(): ''' send email notification and errolog if exists ''' email_notice = Notice('Task done successfully.') if os.path.exists('error.log'): logfile = 'error.log' email_notice.content_type = 'ETL Warning!' email_notice.content = 'Error(s) occurred in ETL. Please see the log file for details.' else: logfile = None msg = email_notice.email_msg(logfile=logfile) email_notice.send_email(msg)
def add_packages_from_history(image_obj, shell): '''Given a DockerImage object, get package objects installed in each layer Assume that the imported images have already gone through this process and have their layer's packages populated. So collecting package object occurs from the last linked layer: 1. For each layer get a list of package names 2. For each package name get a list of dependencies 3. Create a list of package objects with metadata 4. Add this to the layer''' image_layers = image_obj.layers[image_obj.get_last_import_layer() + 1:] logger.debug('Retrieving metadata for remaining {} layers'.format( len(image_layers))) for layer in image_layers: instruction = created_to_instruction(layer.created_by) origin_layer = instruction + ' -> ' + layer.diff_id[:10] layer.origins.add_notice_origin(origin_layer) origin_info = formats.invoke_for_snippets layer.origins.add_notice_origin(origin_info) if 'RUN' in instruction: # for Docker the created_by comes from the instruction in the # dockerfile run_command_line = instruction.split(' ', 1)[1] cmd_list, msg = common.filter_install_commands(run_command_line) if msg: layer.origins.add_notice_to_origins(origin_info, Notice(msg, 'warning')) for command in cmd_list: origin_cmd = content.print_package_invoke(command.name) layer.origins.add_notice_origin(origin_cmd) pkg_list = common.get_installed_package_names(command) all_pkgs = [] for pkg_name in pkg_list: pkg_listing = command_lib.get_package_listing( command.name, pkg_name) deps, deps_msg = common.get_package_dependencies( pkg_listing, pkg_name, shell) if deps_msg: logger.warning(deps_msg) all_pkgs.append(pkg_name) all_pkgs.extend(deps) unique_pkgs = list(set(all_pkgs)) for pkg_name in unique_pkgs: pkg = Package(pkg_name) pkg_listing = command_lib.get_package_listing( command.name, pkg_name) common.fill_package_metadata(pkg, pkg_listing, shell) layer.add_package(pkg)
def fill_package_metadata(pkg_obj, pkg_listing, shell): '''Given a Package object and the Package listing from the command library, fill in the attribute value returned from looking up the data and methods of the package listing. Fill out: version, license and src_url If there are errors, fill out notices''' # create a NoticeOrigin for the package origin_str = 'command_lib/snippets.yml' # version version_listing, listing_msg = command_lib.check_library_key( pkg_listing, 'version') if version_listing: version_list, invoke_msg = command_lib.get_pkg_attr_list( shell, version_listing, package_name=pkg_obj.name) if version_list: pkg_obj.version = version_list[0] else: pkg_obj.origins.add_notice_to_origins( origin_str, Notice(invoke_msg, 'error')) else: pkg_obj.origins.add_notice_to_origins( origin_str, Notice(listing_msg, 'warning')) # license license_listing, listing_msg = command_lib.check_library_key( pkg_listing, 'license') if license_listing: license_list, invoke_msg = command_lib.get_pkg_attr_list( shell, license_listing, package_name=pkg_obj.name) if license_list: pkg_obj.license = license_list[0] else: pkg_obj.origins.add_notice_to_origins( origin_str, Notice(invoke_msg, 'error')) else: pkg_obj.origins.add_notice_to_origins( origin_str, Notice(listing_msg, 'warning')) # src_urls url_listing, listing_msg = command_lib.check_library_key( pkg_listing, 'src_url') if url_listing: url_list, invoke_msg = command_lib.get_pkg_attr_list( shell, url_listing, package_name=pkg_obj.name) if url_list: pkg_obj.src_url = url_list[0] else: pkg_obj.origins.add_notice_to_origins( origin_str, Notice(invoke_msg, 'error')) else: pkg_obj.origins.add_notice_to_origins( origin_str, Notice(listing_msg, 'warning'))
def execute_dockerfile(args): '''Execution path if given a dockerfile''' check_docker_daemon() logger.debug('Setting up...') setup(dockerfile=args.dockerfile) # attempt to build the image logger.debug('Building Docker image...') # placeholder to check if we can analyze the full image completed = True build, msg = docker.is_build() if build: # attempt to get built image metadata image_tag_string = docker.get_dockerfile_image_tag() full_image = load_full_image(image_tag_string) if full_image.origins.is_empty(): # image loading was successful # Add an image origin here full_image.origins.add_notice_origin( formats.dockerfile_image.format(dockerfile=args.dockerfile)) # analyze image analyze_docker_image(full_image, True) else: # we cannot load the full image logger.warning('Cannot retrieve full image metadata') completed = False # clean up image container.remove_image(full_image.repotag) if not args.keep_working_dir: clean_image_tars(full_image) else: # cannot build the image logger.warning('Cannot build image') completed = False # check if we have analyzed the full image or not if not completed: # get the base image logger.debug('Loading base image...') base_image = load_base_image() if base_image.origins.is_empty(): # image loading was successful # add a notice stating failure to build image base_image.origins.add_notice_to_origins( args.dockerfile, Notice( formats.image_build_failure, 'warning')) # analyze image analyze_docker_image(base_image) else: # we cannot load the base image logger.warning('Cannot retrieve base image metadata') # run through commands in the Dockerfile logger.debug('Parsing Dockerfile to generate report...') stub_image = get_dockerfile_packages() # clean up image container.remove_image(base_image.repotag) if not args.keep_working_dir: clean_image_tars(base_image) # generate report based on what images were created if completed: generate_report(args, full_image) else: generate_report(args, base_image, stub_image) logger.debug('Teardown...') teardown() if not args.keep_working_dir: shutil.rmtree(os.path.abspath(constants.temp_folder))
def analyze_docker_image(image_obj, dockerfile=False): '''Given a DockerImage object, for each layer, retrieve the packages, first looking up in cache and if not there then looking up in the command library. For looking up in command library first mount the filesystem and then look up the command library for commands to run in chroot''' # find the layers that are imported if dockerfile: docker.set_imported_layers(image_obj) # add notices for each layer if it is imported image_setup(image_obj) shell = '' # set up empty master list of package names master_list = [] # find the binary by mounting the base layer target = rootfs.mount_base_layer(image_obj.layers[0].tar_file) binary = common.get_base_bin(image_obj.layers[0]) # set up a notice origin referring to the base command library listing origin_command_lib = formats.invoking_base_commands # find the shell to invoke commands in shell, msg = command_lib.get_image_shell( command_lib.get_base_listing(binary)) if not shell: # add a warning notice for no shell in the command library logger.warning('No shell listing in command library. ' 'Using default shell') no_shell_message = errors.no_shell_listing.format( binary, default_shell=constants.shell) image_obj.layers[0].origins.add_notice_to_origins( origin_command_lib, Notice(no_shell_message, 'warning')) # add a hint notice to add the shell to the command library add_shell_message = errors.no_listing_for_base_key.format( listing_key='shell') image_obj.layers[0].origins.add_notice_origins( origin_command_lib, Notice(add_shell_message, 'hint')) shell = constants.shell # only extract packages if there is a known binary and the layer is not # cached if binary: if not common.load_from_cache(image_obj.layers[0]): # get the packages of the first layer rootfs.prep_rootfs(target) common.add_base_packages(image_obj.layers[0], binary, shell) # unmount proc, sys and dev rootfs.undo_mount() else: logger.warning(errors.unrecognized_base.format( image_name=image_obj.name, image_tag=image_obj.tag)) # unmount the first layer rootfs.unmount_rootfs() # populate the master list with all packages found in the first layer # can't use assignment as that will just point to the image object's layer for p in image_obj.layers[0].get_package_names(): master_list.append(p) # get packages for subsequent layers curr_layer = 1 while curr_layer < len(image_obj.layers): if not common.load_from_cache(image_obj.layers[curr_layer]): # get commands that created the layer # for docker images this is retrieved from the image history command_list = docker.get_commands_from_history( image_obj.layers[curr_layer]) if command_list: # mount diff layers from 0 till the current layer mount_overlay_fs(image_obj, curr_layer) # for each command look up the snippet library for command in command_list: pkg_listing = command_lib.get_package_listing(command.name) if type(pkg_listing) is str: common.add_base_packages( image_obj.layers[curr_layer], pkg_listing, shell) else: common.add_snippet_packages( image_obj.layers[curr_layer], command, pkg_listing, shell) if command_list: rootfs.undo_mount() rootfs.unmount_rootfs() # update the master list common.update_master_list(master_list, image_obj.layers[curr_layer]) curr_layer = curr_layer + 1 common.save_to_cache(image_obj)
def testAddNotice(self): n = Notice() self.layer.add_notice(n) self.assertEqual(len(self.layer.notices), 1)
def setUp(self): self.notice = Notice()
def analyze_docker_image(image_obj, dockerfile=False): '''Given a DockerImage object, for each layer, retrieve the packages, first looking up in cache and if not there then looking up in the command library. For looking up in command library first mount the filesystem and then look up the command library for commands to run in chroot''' # find the layers that are imported if dockerfile: docker.set_imported_layers(image_obj) # add notices for each layer if it is imported for layer in image_obj.layers: origin_str = 'Layer: ' + layer.diff_id[:10] layer.origins.add_notice_origin(origin_str) if layer.import_str: layer.origins.add_notice_to_origins( origin_str, Notice('Imported in Dockerfile using: ' + layer.import_str, 'info')) shell = '' # set the layer that is mounted. In the beginning this is 0 mounted = 0 # set up empty master list of package names master_list = [] # find the shell by mounting the base layer target = rootfs.mount_base_layer(image_obj.layers[0].tar_file) binary = common.get_base_bin(image_obj.layers[0]) # find the shell to invoke commands in shell, _ = command_lib.get_image_shell( command_lib.get_base_listing(binary)) if not shell: shell = constants.shell # only extract packages if there is a known binary and the layer is not # cached if binary: if not common.load_from_cache(image_obj.layers[0]): # get the packages of the first layer rootfs.prep_rootfs(target) common.add_base_packages(image_obj.layers[0], binary) # unmount proc, sys and dev rootfs.undo_mount() else: logger.warning( errors.unrecognized_base.format(image_name=image_obj.name, image_tag=image_obj.tag)) # populate the master list with all packages found in the first layer # can't use assignment as that will just point to the image object's layer for p in image_obj.layers[0].get_package_names(): master_list.append(p) # get packages for subsequent layers curr_layer = 1 while curr_layer < len(image_obj.layers): if not common.load_from_cache(image_obj.layers[curr_layer]): # mount from the layer after the mounted layer till the current # layer for index in range(mounted + 1, curr_layer + 1): target = rootfs.mount_diff_layer( image_obj.layers[index].tar_file) mounted = curr_layer # mount dev, sys and proc after mounting diff layers rootfs.prep_rootfs(target) docker.add_packages_from_history(image_obj.layers[curr_layer], shell) rootfs.undo_mount() # update the master list common.update_master_list(master_list, image_obj.layers[curr_layer]) curr_layer = curr_layer + 1 # undo all the mounts rootfs.unmount_rootfs(mounted + 1) common.save_to_cache(image_obj)
def add_base_packages(base_layer, binary): '''Given the base layer and the binary found in layer fs: 1. get the listing from the base.yml 2. Invoke any commands against the base layer 3. Make a list of packages and add them to the layer''' origin_layer = 'Layer: ' + base_layer.diff_id[:10] if base_layer.created_by: base_layer.origins.add_notice_to_origins(origin_layer, Notice( formats.layer_created_by.format(created_by=base_layer.created_by), 'info')) else: base_layer.origins.add_notice_to_origins(origin_layer, Notice( formats.no_created_by, 'warning')) origin_command_lib = formats.invoking_base_commands # find the binary listing = command_lib.get_base_listing(binary) if listing: # put info notice about what is going to be invoked snippet_msg = formats.invoke_for_base + '\n' + \ content.print_base_invoke(binary) base_layer.origins.add_notice_to_origins( origin_layer, Notice(snippet_msg, 'info')) shell, msg = command_lib.get_image_shell(listing) if not shell: # add a warning notice for no shell in the command library logger.warning('No shell listing in command library. ' 'Using default shell') no_shell_message = errors.no_shell_listing.format( binary, default_shell=constants.shell) base_layer.origins.add_notice_to_origins( origin_command_lib, Notice(no_shell_message, 'warning')) # add a hint notice to add the shell to the command library add_shell_message = errors.no_listing_for_base_key.format( listing_key='shell') base_layer.origins.add_notice_origins( origin_command_lib, Notice(add_shell_message, 'hint')) shell = constants.shell # get all the packages in the base layer names, n_msg = command_lib.get_pkg_attr_list(shell, listing['names']) versions, v_msg = command_lib.get_pkg_attr_list( shell, listing['versions']) licenses, l_msg = command_lib.get_pkg_attr_list( shell, listing['licenses']) src_urls, u_msg = command_lib.get_pkg_attr_list( shell, listing['src_urls']) # add a notice to the image if something went wrong invoke_msg = n_msg + v_msg + l_msg + u_msg if invoke_msg: base_layer.origins.add_notice_to_origins( origin_layer, Notice(invoke_msg, 'error')) if names and len(names) > 1: for index in range(0, len(names)): pkg = Package(names[index]) if len(versions) == len(names): pkg.version = versions[index] if len(licenses) == len(names): pkg.license = licenses[index] if len(src_urls) == len(names): pkg.src_url = src_urls[index] base_layer.add_package(pkg) # if there is no listing add a notice else: base_layer.origins.add_notice_to_origins( origin_command_lib, Notice(errors.no_listing_for_base_key.format( listing_key=binary), 'error'))
def add_base_packages(image): '''Given an image object, get a list of package objects from invoking the commands in the command library base section: 1. For the image and tag name find if there is a list of package names 2. If there is an invoke dictionary, invoke the commands 3. Create a list of packages 4. Add them to the image''' # information under the base image tag in the command library listing = command_lib.get_base_listing(image.name, image.tag) # create the origin for the base image origin_info = formats.invoking_base_commands + '\n' + \ content.print_base_invoke(image.name, image.tag) image.origins.add_notice_origin(origin_info) origin_str = 'command_lib/base.yml' if listing: shell, msg = command_lib.get_image_shell(listing) if not shell: # add a warning notice for no shell in the command library logger.warning('No shell listing in command library. ' 'Using default shell') no_shell_message = errors.no_shell_listing.format( image_name=image.name, image_tag=image.tag, default_shell=constants.shell) image.origins.add_notice_to_origins( origin_str, Notice(no_shell_message, 'warning')) # add a hint notice to add the shell to the command library add_shell_message = errors.no_listing_for_base_key.format( listing_key='shell') image.origins.add_notice_origins(origin_str, Notice(add_shell_message, 'hint')) shell = constants.shell # check if a container is running first # eventually this needs to change to use derivatives that have # more than 1 layer # for now, we add the list of packages to all the layers in a # starting base image if check_container(): names, n_msg = command_lib.get_pkg_attr_list( shell, listing['names']) versions, v_msg = command_lib.get_pkg_attr_list( shell, listing['versions']) licenses, l_msg = command_lib.get_pkg_attr_list( shell, listing['licenses']) src_urls, u_msg = command_lib.get_pkg_attr_list( shell, listing['src_urls']) # add a notice to the image if something went wrong invoke_msg = n_msg + v_msg + l_msg + u_msg if invoke_msg: image.origins.add_notice_to_origins( origin_str, Notice(invoke_msg, 'error')) if names and len(names) > 1: for index in range(0, len(names)): pkg = Package(names[index]) if len(versions) == len(names): pkg.version = versions[index] if len(licenses) == len(names): pkg.license = licenses[index] if len(src_urls) == len(names): pkg.src_url = src_urls[index] for layer in image.layers: layer.add_package(pkg) # if no container is running give a logging error else: logger.error(errors.no_running_docker_container) # if there is no listing add a notice else: image.origins.add_notice_to_origins( origin_str, Notice( errors.no_image_tag_listing.format(image_name=image.name, image_tag=image.tag), 'error'))