def analyze_first_layer(image_obj, master_list, redo): # set up a notice origin for the first layer origin_first_layer = 'Layer {}'.format(image_obj.layers[0].layer_index) # check if the layer is empty if common.is_empty_layer(image_obj.layers[0]): logger.warning(errors.empty_layer) image_obj.layers[0].origins.add_notice_to_origins( origin_first_layer, Notice(errors.empty_layer, 'warning')) return '' # find the shell from the first layer shell = common.get_shell(image_obj.layers[0]) if not shell: logger.warning(errors.no_shell) image_obj.layers[0].origins.add_notice_to_origins( origin_first_layer, Notice(errors.no_shell, 'warning')) # find the binary from the first layer binary = common.get_base_bin(image_obj.layers[0]) if not binary: logger.warning(errors.no_package_manager) image_obj.layers[0].origins.add_notice_to_origins( origin_first_layer, Notice(errors.no_package_manager, 'warning')) # try to load packages from cache if not common.load_from_cache(image_obj.layers[0], redo): # set a possible OS common.get_os_style(image_obj.layers[0], binary) # if there is a binary, extract packages if shell and binary: execute_base_layer(image_obj.layers[0], binary, shell) # populate the master list with all packages found in the first layer for p in image_obj.layers[0].packages: master_list.append(p) return shell
def analyze_subsequent_layers(image_obj, shell, master_list, redo, driver=None): """Assuming that we have a shell and have completed analyzing the first layer of the given image object, we now analyze the remaining layers. While we have layers: 1. Check if the layer is empty. If it is, then we can't do anything and we should continue 2. See if we can load the layer from cache. If we can't then do a fresh analysis package information and bundle it into the image object 3. Update the master list""" curr_layer = 1 # get list of environment variables envs = lock.get_env_vars(image_obj) while curr_layer < len(image_obj.layers): # make a notice for each layer origin_next_layer = 'Layer {}'.format( image_obj.layers[curr_layer].layer_index) # check if this is an empty layer if common.is_empty_layer(image_obj.layers[curr_layer]): # we continue to the next layer logger.warning(errors.empty_layer) image_obj.layers[curr_layer].origins.add_notice_to_origins( origin_next_layer, Notice(errors.empty_layer, 'warning')) curr_layer = curr_layer + 1 continue if not common.load_from_cache(image_obj.layers[curr_layer], redo): fresh_analysis(image_obj, curr_layer, shell, driver, envs) # update the master list dcom.update_master_list(master_list, image_obj.layers[curr_layer]) curr_layer = curr_layer + 1
def analyze_first_layer(image_obj, master_list, options): """Analyze the first layer of an image. Return a Prereqs object for the next layer. 1. Check if the layer is empty. If it is not, return None 2. See if we can load the layer from cache 3. If we can't load from cache 3.1 See if we can find any information about the rootfs 3.2 If we are able to find any information, use any prescribed methods to extract package information 4. Process and bundle that information into the image object 5. Return a Prereqs object for subsequent layer processing""" # set up a notice origin for the first layer origin_first_layer = 'Layer {}'.format(image_obj.layers[0].layer_index) # check if the layer is empty if com.is_empty_layer(image_obj.layers[0]): logger.warning(errors.empty_layer) image_obj.layers[0].origins.add_notice_to_origins( origin_first_layer, Notice(errors.empty_layer, 'warning')) return None # create a Prereqs object prereqs = core.Prereqs() # find the shell from the first layer prereqs.fs_shell = dcom.get_shell(image_obj.layers[0]) # find a shell for the host prereqs.host_shell = host.check_shell() if not prereqs.fs_shell and not prereqs.host_shell: logger.warning(errors.no_shell) image_obj.layers[0].origins.add_notice_to_origins( origin_first_layer, Notice(errors.no_shell, 'warning')) # find the binary from the first layer prereqs.binary = dcom.get_base_bin(image_obj.layers[0]) # try to load packages from cache if not com.load_from_cache(image_obj.layers[0], options.redo): # add a notice if there is a "created by" image_obj.layers[0].origins.add_notice_to_origins( origin_first_layer, Notice(formats.layer_created_by.format( created_by=image_obj.layers[0].created_by), 'info')) # set a possible OS and package format get_os_style(image_obj.layers[0], prereqs.binary) # if there is a binary, extract packages if prereqs.binary: # mount the first layer target_dir = prep_first_layer(image_obj.layers[0]) # set the host path to the mount point if target_dir: prereqs.host_path = target_dir # core default execution on the first layer core.execute_base(image_obj.layers[0], prereqs) else: logger.warning(errors.no_package_manager) image_obj.layers[0].origins.add_notice_to_origins( origin_first_layer, Notice( errors.no_package_manager, 'warning')) return None # populate the master list with all packages found in the first layer for p in image_obj.layers[0].packages: master_list.append(p) return prereqs
def analyze_first_layer(image_obj, master_list, options): """Analyze the first layer of an image. Return the installed shell. If there is no installed shell, return None 1. Check if the layer is empty. If it is then we can't find a shell 2. See if we can load the layer from cache 3. If we can't load from cache 3.1 See if we can find any information about the rootfs 3.2 If we are able to find any information, use any prescribed methods to extract package information 4. Process and bundle that information into the image object 5. Return the shell for subsequent layer processing""" # set up a notice origin for the first layer origin_first_layer = 'Layer {}'.format(image_obj.layers[0].layer_index) # check if the layer is empty if com.is_empty_layer(image_obj.layers[0]): logger.warning(errors.empty_layer) image_obj.layers[0].origins.add_notice_to_origins( origin_first_layer, Notice(errors.empty_layer, 'warning')) return None # create a Prereqs object prereqs = core.Prereqs() # find the shell from the first layer prereqs.shell = dcom.get_shell(image_obj.layers[0]) if not prereqs.shell: logger.warning(errors.no_shell) image_obj.layers[0].origins.add_notice_to_origins( origin_first_layer, Notice(errors.no_shell, 'warning')) # find the binary from the first layer prereqs.binary = dcom.get_base_bin(image_obj.layers[0]) if not prereqs.binary: logger.warning(errors.no_package_manager) image_obj.layers[0].origins.add_notice_to_origins( origin_first_layer, Notice(errors.no_package_manager, 'warning')) # try to load packages from cache if not com.load_from_cache(image_obj.layers[0], options.redo): # set a possible OS get_os_style(image_obj.layers[0], prereqs.binary) # if there is a binary, extract packages if prereqs.shell and prereqs.binary: # mount the first layer mount_first_layer(image_obj.layers[0]) # core default execution on the first layer core.execute_base(image_obj.layers[0], prereqs) # unmount rootfs.undo_mount() rootfs.unmount_rootfs() # populate the master list with all packages found in the first layer for p in image_obj.layers[0].packages: master_list.append(p) return prereqs.shell
def analyze_subsequent_layers(image_obj, prereqs, master_list, options): """Assuming we have completed analyzing the first layer of the given image object, we now analyze the remaining layers. While we have layers: 1. Check if the layer is empty. If it is, then we can't do anything and we should continue 2. See if we can load the layer from cache. If we can't then do a fresh analysis package information and bundle it into the image object 3. Update the master list""" # if the driver is 'default' then the first layer needs to be extracted mergepath = os.path.join(rootfs.get_working_dir(), constants.mergedir) if not os.listdir(mergepath): prep_layers(image_obj, 0, 'default') curr_layer = 1 # get list of environment variables prereqs.envs = lock.get_env_vars(image_obj) while curr_layer < len(image_obj.layers): # If work_dir changes, update value accordingly # so we can later execute base.yml commands from the work_dir if image_obj.layers[curr_layer].get_layer_workdir(): prereqs.layer_workdir = \ image_obj.layers[curr_layer].get_layer_workdir() # make a notice for each layer origin_next_layer = 'Layer {}'.format( image_obj.layers[curr_layer].layer_index) # check if this is an empty layer if common.is_empty_layer(image_obj.layers[curr_layer]): # we continue to the next layer logger.warning(errors.empty_layer) image_obj.layers[curr_layer].origins.add_notice_to_origins( origin_next_layer, Notice(errors.empty_layer, 'warning')) curr_layer = curr_layer + 1 continue if not common.load_from_cache(image_obj.layers[curr_layer], options.redo): fresh_analysis(image_obj, curr_layer, prereqs, options) # If the driver is 'default' apply the current layer anyway if options.driver == 'default': apply_layers(image_obj, curr_layer) # update the master list dcom.update_master_list(master_list, image_obj.layers[curr_layer]) curr_layer = curr_layer + 1
def analyze_subsequent_layers( image_obj, shell, master_list, redo, dfobj=None, # noqa: R0912,R0913 dfile_lock=False, driver=None): # get packages for subsequent layers curr_layer = 1 work_dir = None while curr_layer < len(image_obj.layers): # pylint:disable=too-many-nested-blocks # make a notice for each layer origin_next_layer = 'Layer {}'.format( image_obj.layers[curr_layer].layer_index) # check if this is an empty layer if common.is_empty_layer(image_obj.layers[curr_layer]): # we continue to the next layer logger.warning(errors.empty_layer) image_obj.layers[curr_layer].origins.add_notice_to_origins( origin_next_layer, Notice(errors.empty_layer, 'warning')) curr_layer = curr_layer + 1 continue # If workdir changes, update value accordingly # so we can later execute base.yml commands from the workdir. if image_obj.layers[curr_layer].get_layer_workdir() is not None: work_dir = image_obj.layers[curr_layer].get_layer_workdir() # if there is no shell, try to see if it exists in the current layer if not shell: shell = common.get_shell(image_obj.layers[curr_layer]) if not common.load_from_cache(image_obj.layers[curr_layer], redo): # get commands that created the layer # for docker images this is retrieved from the image history command_list = dhelper.get_commands_from_history( image_obj.layers[curr_layer]) if command_list: # mount diff layers from 0 till the current layer target = mount_overlay_fs(image_obj, curr_layer, driver) # mount dev, sys and proc after mounting diff layers rootfs.prep_rootfs(target) # for each command look up the snippet library for command in command_list: pkg_listing = command_lib.get_package_listing(command.name) # get list of environment variables envs = dhelper.get_env_vars(image_obj) if isinstance(pkg_listing, str): try: common.add_base_packages(image_obj.layers[curr_layer], pkg_listing, shell, work_dir, envs) except KeyboardInterrupt: logger.critical(errors.keyboard_interrupt) abort_analysis() else: try: common.add_snippet_packages( image_obj.layers[curr_layer], command, pkg_listing, shell, work_dir, envs) except KeyboardInterrupt: logger.critical(errors.keyboard_interrupt) abort_analysis() # pin any installed packages to a locked dockerfile. if dfile_lock: # collect list of RUN commands that could install pkgs run_dict = d_file.get_run_layers(dfobj) # use the run_dict to get list of packages being installed install_list = d_file.get_install_packages( run_dict[curr_layer - 1]) for install_pkg in install_list: for layer_pkg in image_obj.layers[curr_layer].packages: if install_pkg == layer_pkg.name: # dockerfile package in layer, let's pin it d_file.expand_package( run_dict[curr_layer - 1], install_pkg, layer_pkg.version, command_lib.check_pinning_separator( pkg_listing)) if command_list: rootfs.undo_mount() rootfs.unmount_rootfs() # update the master list common.update_master_list(master_list, image_obj.layers[curr_layer]) curr_layer = curr_layer + 1