Пример #1
0
def analyze_first_layer(image_obj, master_list, redo):
    # find the binary and shell by mounting the base layer
    target = rootfs.mount_base_layer(image_obj.layers[0].tar_file)
    binary = common.get_base_bin()
    shell = get_shell(image_obj, binary)
    # set up a notice origin for the first layer
    origin_first_layer = 'Layer: ' + image_obj.layers[0].fs_hash[:10]
    # only extract packages if there is a known binary and the layer is not
    # cached
    if binary:
        if not common.load_from_cache(image_obj.layers[0], redo):
            # Determine pacakge/os style from binary in the image layer
            common.get_os_style(image_obj.layers[0], binary)
            # get the packages of the first layer
            rootfs.prep_rootfs(target)
            common.add_base_packages(image_obj.layers[0], binary, shell)
            # unmount proc, sys and dev
            rootfs.undo_mount()
    else:
        logger.warning(errors.no_package_manager)
        # /etc/os-release may still be present even if binary is not
        common.get_os_style(image_obj.layers[0], None)
        image_obj.layers[0].origins.add_notice_to_origins(
            origin_first_layer, Notice(errors.no_package_manager, 'warning'))
        # no binary means there is no shell so set to default shell
        logger.warning('Unknown filesystem. Using default shell')
        shell = constants.shell
    # unmount the first layer
    rootfs.unmount_rootfs()
    # populate the master list with all packages found in the first layer
    for p in image_obj.layers[0].packages:
        master_list.append(p)
    return shell
Пример #2
0
def analyze_subsequent_layers(image_obj, shell, master_list, redo):
    # get packages for subsequent layers
    curr_layer = 1
    while curr_layer < len(image_obj.layers):
        if not common.load_from_cache(image_obj.layers[curr_layer], redo):
            # get commands that created the layer
            # for docker images this is retrieved from the image history
            command_list = dhelper.get_commands_from_history(
                image_obj.layers[curr_layer])
            if command_list:
                # mount diff layers from 0 till the current layer
                target = mount_overlay_fs(image_obj, curr_layer)
                # mount dev, sys and proc after mounting diff layers
                rootfs.prep_rootfs(target)
            # for each command look up the snippet library
            for command in command_list:
                pkg_listing = command_lib.get_package_listing(command.name)
                if isinstance(pkg_listing, str):
                    common.add_base_packages(image_obj.layers[curr_layer],
                                             pkg_listing, shell)
                else:
                    common.add_snippet_packages(image_obj.layers[curr_layer],
                                                command, pkg_listing, shell)
            if command_list:
                rootfs.undo_mount()
                rootfs.unmount_rootfs()
        # update the master list
        common.update_master_list(master_list, image_obj.layers[curr_layer])
        curr_layer = curr_layer + 1
Пример #3
0
def analyze_subsequent_layers(
        image_obj,
        shell,
        master_list,
        redo,
        dfobj=None,  # noqa: R0912,R0913
        dfile_lock=False):
    # get packages for subsequent layers
    curr_layer = 1
    while curr_layer < len(image_obj.layers):  # pylint:disable=too-many-nested-blocks
        # if there is no shell, try to see if it exists in the current layer
        if not shell:
            shell = common.get_shell(image_obj.layers[curr_layer])
        if not common.load_from_cache(image_obj.layers[curr_layer], redo):
            # get commands that created the layer
            # for docker images this is retrieved from the image history
            command_list = dhelper.get_commands_from_history(
                image_obj.layers[curr_layer])
            if command_list:
                # mount diff layers from 0 till the current layer
                target = mount_overlay_fs(image_obj, curr_layer)
                # mount dev, sys and proc after mounting diff layers
                rootfs.prep_rootfs(target)
            # for each command look up the snippet library
            for command in command_list:
                pkg_listing = command_lib.get_package_listing(command.name)
                if isinstance(pkg_listing, str):
                    try:
                        common.add_base_packages(image_obj.layers[curr_layer],
                                                 pkg_listing, shell)
                    except KeyboardInterrupt:
                        logger.critical(errors.keyboard_interrupt)
                        abort_analysis()
                else:
                    try:
                        common.add_snippet_packages(
                            image_obj.layers[curr_layer], command, pkg_listing,
                            shell)
                    except KeyboardInterrupt:
                        logger.critical(errors.keyboard_interrupt)
                        abort_analysis()
                # pin any installed packages to a locked dockerfile.
                if dfile_lock:
                    # collect list of RUN commands that could install pkgs
                    run_dict = d_file.get_run_layers(dfobj)
                    for package in image_obj.layers[curr_layer].packages:
                        # check that package is in current dfobj RUN line
                        if d_file.package_in_dockerfile(
                                run_dict[curr_layer - 1], package.name):
                            d_file.expand_package(
                                run_dict[curr_layer - 1], package.name,
                                package.version,
                                command_lib.check_pinning_separator(
                                    pkg_listing))
            if command_list:
                rootfs.undo_mount()
                rootfs.unmount_rootfs()
        # update the master list
        common.update_master_list(master_list, image_obj.layers[curr_layer])
        curr_layer = curr_layer + 1
Пример #4
0
def analyze_first_layer(image_obj, master_list, redo):
    # find the binary of the first layer
    binary = common.get_base_bin(image_obj.layers[0])
    # see if there is an associated shell
    # if there is no binary, this will be set to the default shell
    shell = get_shell(image_obj, binary)
    # try to load packages from cache
    if not common.load_from_cache(image_obj.layers[0], redo):
        # set a possible OS
        common.get_os_style(image_obj.layers[0], binary)
        # set up a notice origin for the first layer
        origin_first_layer = 'Layer: ' + image_obj.layers[0].fs_hash[:10]
        # if there is a binary, extract packages
        if binary:
            try:
                target = rootfs.mount_base_layer(image_obj.layers[0].tar_file)
                rootfs.prep_rootfs(target)
                common.add_base_packages(image_obj.layers[0], binary, shell)
                # unmount proc, sys and dev
                rootfs.undo_mount()
                rootfs.unmount_rootfs()
            except KeyboardInterrupt:
                logger.critical(errors.keyboard_interrupt)
                abort_analysis()
        else:
            logger.warning(errors.no_package_manager)
            image_obj.layers[0].origins.add_notice_to_origins(
                origin_first_layer, Notice(errors.no_package_manager,
                                           'warning'))
    # populate the master list with all packages found in the first layer
    for p in image_obj.layers[0].packages:
        master_list.append(p)
    return shell
Пример #5
0
def fresh_analysis(image_obj, curr_layer, shell, driver, envs):
    """This is a subroutine that is run if there is no chached results or if
    the user wants to redo the analysis
    1 Check if we have a shell, if not then see if we can find a shell
    2 Check if we can find any info in the container history (created_by)
    3 If this exists then check if we can parse the command to figure out
      what package managers were used.
    4 Use the prescribed methods for the package managers to retrieve
    """
    # if there is no shell, try to see if it exists in the current layer
    if not shell:
        shell = dcom.get_shell(image_obj.layers[curr_layer])
    # get commands that created the layer
    # for docker images this is retrieved from the image history
    command_list = dcom.get_commands_from_metadata(
        image_obj.layers[curr_layer])
    if command_list:
        # mount diff layers from 0 till the current layer
        target = mount_overlay_fs(image_obj, curr_layer, driver)
        # mount dev, sys and proc after mounting diff layers
        rootfs.prep_rootfs(target)
        # for each command look up the snippet library
        for command in command_list:
            pkg_listing = command_lib.get_package_listing(command.name)
            if isinstance(pkg_listing, str):
                core.execute_base(image_obj.layers[curr_layer], shell,
                                  pkg_listing, envs)
            else:
                core.execute_snippets(image_obj.layers[curr_layer], command,
                                      pkg_listing, shell, envs)
        rootfs.undo_mount()
        rootfs.unmount_rootfs()
Пример #6
0
def drop_into_layer(image_obj, layer_index):
    """Given the image object and the layer index, mount all the layers
    upto the specified layer index and drop into a shell session"""
    rootfs.set_up()
    if layer_index == 0:
        # mount only one layer
        target = rootfs.mount_base_layer(
            image_obj.layers[layer_index].tar_file)
    else:
        # mount all layers uptil the provided layer index
        target = multi_layer.mount_overlay_fs(image_obj, layer_index)
    mount_path = get_mount_path()
    print("\nWorking directory is: {}\n".format(mount_path))
    # check if there is a shell
    shell = check_shell()
    if shell:
        rootfs.prep_rootfs(target)
        print("\nRun 'cd {} && sudo chroot . {}' to look around".format(
            mount_path, shell))
    else:
        print("\nRun 'cd {}' to look around".format(mount_path))
        print("A shell binary doesn't exist in the filesystem. You're on "
              "your own.")
    print("\nAfter exiting from your session, run 'cd -' to go back "
          "and 'tern debug --recover' to clean up.\n")
    sys.exit(0)
Пример #7
0
def mount_container_image(image_obj, driver=None):
    """Mount the container image to make it ready to invoke scripts"""
    if len(image_obj.layers) > 1:
        target = multi_layer.prep_layers(image_obj,
                                         len(image_obj.layers) - 1, driver)
        rootfs.prep_rootfs(target)
    else:
        single_layer.prep_first_layer(image_obj.layers[0])
Пример #8
0
def mount_overlay_fs(image_obj, top_layer):
    '''Given the image object and the top most layer, mount all the layers
    until the top layer using overlayfs'''
    tar_layers = []
    for index in range(0, top_layer + 1):
        tar_layers.append(image_obj.layers[index].tar_file)
    target = rootfs.mount_diff_layers(tar_layers)
    # mount dev, sys and proc after mounting diff layers
    rootfs.prep_rootfs(target)
Пример #9
0
def mount_first_layer(layer_obj):
    try:
        target = rootfs.mount_base_layer(layer_obj.tar_file)
        rootfs.prep_rootfs(target)
        return target
    except subprocess.CalledProcessError as e:  # nosec
        logger.critical("Cannot mount filesystem and/or device nodes: %s", e)
        dcom.abort_analysis()
    except KeyboardInterrupt:
        logger.critical(errors.keyboard_interrupt)
        dcom.abort_analysis()
Пример #10
0
def execute_base_layer(base_layer, binary, shell):
    '''Execute retrieving base layer packages'''
    try:
        target = rootfs.mount_base_layer(base_layer.tar_file)
        rootfs.prep_rootfs(target)
        common.add_base_packages(base_layer, binary, shell)
    except KeyboardInterrupt:
        logger.critical(errors.keyboard_interrupt)
        abort_analysis()
    finally:
        # unmount proc, sys and dev
        rootfs.undo_mount()
        rootfs.unmount_rootfs()
Пример #11
0
def fresh_analysis(image_obj, curr_layer, prereqs, options):
    """This is a subroutine that is run if there is no chached results or if
    the user wants to redo the analysis
    1 Check if we have a shell, if not then see if we can find a shell
    2 Check if we can find any info in the container history (created_by)
    3 If this exists then check if we can parse the command to figure out
      what package managers were used.
    4 Use the prescribed methods for the package managers to retrieve
    """
    # set up a notice origin for the current layer
    origin_curr_layer = 'Layer {}'.format(
        image_obj.layers[curr_layer].layer_index)
    image_obj.layers[curr_layer].origins.add_notice_to_origins(
        origin_curr_layer, Notice(formats.layer_created_by.format(
            created_by=image_obj.layers[curr_layer].created_by), 'info'))
    # if there is no shell, try to see if it exists in the current layer
    if not prereqs.fs_shell:
        prereqs.shell = dcom.get_shell(image_obj.layers[curr_layer])
    # mount diff layers from 0 till the current layer
    target = mount_overlay_fs(image_obj, curr_layer, options.driver)
    # set this layer's host path
    prereqs.host_path = target
    # mount dev, sys and proc after mounting diff layers
    rootfs.prep_rootfs(target)
    # get commands that created the layer
    # for docker images this is retrieved from the image history
    command_list = dcom.get_commands_from_metadata(
        image_obj.layers[curr_layer])
    if command_list:
        # for each command look up the snippet library
        for command in command_list:
            pkg_listing = command_lib.get_package_listing(command.name)
            if isinstance(pkg_listing, str):
                prereqs.binary = pkg_listing
                core.execute_base(
                    image_obj.layers[curr_layer], prereqs)
            else:
                prereqs.listing = pkg_listing
                core.execute_snippets(
                    image_obj.layers[curr_layer], command, prereqs)
    else:
        # fall back to executing what we know
        core.execute_base(image_obj.layers[curr_layer], prereqs)
    rootfs.undo_mount()
    rootfs.unmount_rootfs()
Пример #12
0
def drop_into_layer(image_obj, layer_index):
    """Given the image object and the layer index, mount all the layers
    upto the specified layer index and drop into a shell session"""
    rootfs.set_up()
    if layer_index == 0:
        # mount only one layer
        target = rootfs.mount_base_layer(
            image_obj.layers[layer_index].tar_file)
    else:
        # mount all layers uptil the provided layer index
        target = analyze.mount_overlay_fs(image_obj, layer_index)
    # check if there is a shell
    shell = check_shell()
    if shell:
        rootfs.prep_rootfs(target)
        print("Done. Run 'sudo chroot . {}' to look around.".format(shell))
    else:
        print("A shell binary doesn't exist in the filesystem. You're on "
              "your own.")
    print("Working directory is: {}".format(get_mount_path()))
    sys.exit(0)
Пример #13
0
 container.check_docker_setup()
 # set some global variables
 rootfs.set_mount_dir()
 # try to load the image
 image_obj = report.load_full_image(args.image)
 if image_obj.origins.is_empty():
     # image loading was successful
     # proceed mounting diff filesystems
     rootfs.set_up()
     if len(image_obj.layers) == 1:
         # mount only one layer
         target = rootfs.mount_base_layer(image_obj.layers[0].tar_file)
     else:
         target = analyze.mount_overlay_fs(image_obj,
                                           len(image_obj.layers) - 1)
     rootfs.prep_rootfs(target)
     # invoke commands in chroot
     # if we're looking up the snippets library
     # we should see 'snippets' in the keys
     if 'snippets' in args.keys and 'packages' in args.keys:
         # get the package info that corresponds to the package name
         # or get the default
         last = args.keys.pop()
         info_list = look_up_lib(args.keys)
         info_dict = command_lib.check_for_unique_package(
             info_list, args.package)[last]
     else:
         info_dict = look_up_lib(args.keys)
     # try to invoke the commands
     try:
         result = command_lib.get_pkg_attr_list(args.shell, info_dict,
Пример #14
0
def analyze_docker_image(image_obj, redo=False, dockerfile=False):  # pylint: disable=too-many-locals
    '''Given a DockerImage object, for each layer, retrieve the packages, first
    looking up in cache and if not there then looking up in the command
    library. For looking up in command library first mount the filesystem
    and then look up the command library for commands to run in chroot'''
    # find the layers that are imported
    if dockerfile:
        dhelper.set_imported_layers(image_obj)
    # add notices for each layer if it is imported
    image_setup(image_obj)
    shell = ''
    # set up empty master list of packages
    master_list = []
    # find the binary by mounting the base layer
    target = rootfs.mount_base_layer(image_obj.layers[0].tar_file)
    binary = common.get_base_bin()
    # set up a notice origin referring to the base command library listing
    origin_command_lib = formats.invoking_base_commands
    # set up a notice origin for the first layer
    origin_first_layer = 'Layer: ' + image_obj.layers[0].fs_hash[:10]
    # find the shell to invoke commands in
    shell, _ = command_lib.get_image_shell(
        command_lib.get_base_listing(binary))
    if not shell:
        # add a warning notice for no shell in the command library
        logger.warning('No shell listing in command library. '
                       'Using default shell')
        no_shell_message = errors.no_shell_listing.format(
            binary=binary, default_shell=constants.shell)
        image_obj.layers[0].origins.add_notice_to_origins(
            origin_command_lib, Notice(no_shell_message, 'warning'))
        # add a hint notice to add the shell to the command library
        add_shell_message = errors.no_listing_for_base_key.format(
            listing_key='shell')
        image_obj.layers[0].origins.add_notice_to_origins(
            origin_command_lib, Notice(add_shell_message, 'hint'))
        shell = constants.shell
    # only extract packages if there is a known binary and the layer is not
    # cached
    if binary:
        if not common.load_from_cache(image_obj.layers[0], redo):
            # get the packages of the first layer
            rootfs.prep_rootfs(target)
            common.add_base_packages(image_obj.layers[0], binary, shell)
            # unmount proc, sys and dev
            rootfs.undo_mount()
    else:
        no_base = errors.unrecognized_base.format(
            image_name=image_obj.name, image_tag=image_obj.tag)
        logger.warning(no_base)
        image_obj.layers[0].origins.add_notice_to_origins(
            origin_first_layer, Notice(no_base, 'warning'))
        # no binary means there is no shell so set to default shell
        logger.warning('Unknown filesystem. Using default shell')
        shell = constants.shell
    # unmount the first layer
    rootfs.unmount_rootfs()
    # populate the master list with all packages found in the first layer
    for p in image_obj.layers[0].packages:
        master_list.append(p)
    # get packages for subsequent layers
    curr_layer = 1
    while curr_layer < len(image_obj.layers):
        if not common.load_from_cache(image_obj.layers[curr_layer], redo):
            # get commands that created the layer
            # for docker images this is retrieved from the image history
            command_list = dhelper.get_commands_from_history(
                image_obj.layers[curr_layer])
            if command_list:
                # mount diff layers from 0 till the current layer
                target = mount_overlay_fs(image_obj, curr_layer)
                # mount dev, sys and proc after mounting diff layers
                rootfs.prep_rootfs(target)
            # for each command look up the snippet library
            for command in command_list:
                pkg_listing = command_lib.get_package_listing(command.name)
                if isinstance(pkg_listing, str):
                    common.add_base_packages(
                        image_obj.layers[curr_layer], pkg_listing, shell)
                else:
                    common.add_snippet_packages(
                        image_obj.layers[curr_layer], command, pkg_listing,
                        shell)
            if command_list:
                rootfs.undo_mount()
                rootfs.unmount_rootfs()
        # update the master list
        common.update_master_list(master_list, image_obj.layers[curr_layer])
        curr_layer = curr_layer + 1
    common.save_to_cache(image_obj)
Пример #15
0
def analyze_subsequent_layers(
        image_obj,
        shell,
        master_list,
        redo,
        dfobj=None,  # noqa: R0912,R0913
        dfile_lock=False,
        driver=None):
    # get packages for subsequent layers
    curr_layer = 1
    work_dir = None
    while curr_layer < len(image_obj.layers):  # pylint:disable=too-many-nested-blocks
        # make a notice for each layer
        origin_next_layer = 'Layer {}'.format(
            image_obj.layers[curr_layer].layer_index)
        # check if this is an empty layer
        if common.is_empty_layer(image_obj.layers[curr_layer]):
            # we continue to the next layer
            logger.warning(errors.empty_layer)
            image_obj.layers[curr_layer].origins.add_notice_to_origins(
                origin_next_layer, Notice(errors.empty_layer, 'warning'))
            curr_layer = curr_layer + 1
            continue
        # If workdir changes, update value accordingly
        # so we can later execute base.yml commands from the workdir.
        if image_obj.layers[curr_layer].get_layer_workdir() is not None:
            work_dir = image_obj.layers[curr_layer].get_layer_workdir()
        # if there is no shell, try to see if it exists in the current layer
        if not shell:
            shell = common.get_shell(image_obj.layers[curr_layer])
        if not common.load_from_cache(image_obj.layers[curr_layer], redo):
            # get commands that created the layer
            # for docker images this is retrieved from the image history
            command_list = dhelper.get_commands_from_history(
                image_obj.layers[curr_layer])
            if command_list:
                # mount diff layers from 0 till the current layer
                target = mount_overlay_fs(image_obj, curr_layer, driver)
                # mount dev, sys and proc after mounting diff layers
                rootfs.prep_rootfs(target)
            # for each command look up the snippet library
            for command in command_list:
                pkg_listing = command_lib.get_package_listing(command.name)
                # get list of environment variables
                envs = dhelper.get_env_vars(image_obj)
                if isinstance(pkg_listing, str):
                    try:
                        common.add_base_packages(image_obj.layers[curr_layer],
                                                 pkg_listing, shell, work_dir,
                                                 envs)
                    except KeyboardInterrupt:
                        logger.critical(errors.keyboard_interrupt)
                        abort_analysis()
                else:
                    try:
                        common.add_snippet_packages(
                            image_obj.layers[curr_layer], command, pkg_listing,
                            shell, work_dir, envs)
                    except KeyboardInterrupt:
                        logger.critical(errors.keyboard_interrupt)
                        abort_analysis()
                # pin any installed packages to a locked dockerfile.
                if dfile_lock:
                    # collect list of RUN commands that could install pkgs
                    run_dict = d_file.get_run_layers(dfobj)
                    # use the run_dict to get list of packages being installed
                    install_list = d_file.get_install_packages(
                        run_dict[curr_layer - 1])
                    for install_pkg in install_list:
                        for layer_pkg in image_obj.layers[curr_layer].packages:
                            if install_pkg == layer_pkg.name:
                                # dockerfile package in layer, let's pin it
                                d_file.expand_package(
                                    run_dict[curr_layer - 1], install_pkg,
                                    layer_pkg.version,
                                    command_lib.check_pinning_separator(
                                        pkg_listing))
            if command_list:
                rootfs.undo_mount()
                rootfs.unmount_rootfs()
        # update the master list
        common.update_master_list(master_list, image_obj.layers[curr_layer])
        curr_layer = curr_layer + 1