コード例 #1
0
ファイル: report.py プロジェクト: vladn90/tern
def execute_dockerfile(args):
    '''Execution path if given a dockerfile'''
    logger.debug('Setting up...')
    setup(args.dockerfile)
    dockerfile_parse = False
    # try to get Docker base image metadata
    logger.debug('Loading base image...')
    base_image = load_base_image()
    logger.debug('Base image loaded...')
    # check if the base image added any notices
    if base_image.origins.is_empty():
        # load any packages from cache
        logger.debug('Looking up cache for base image layers...')
        if not common.load_from_cache(base_image):
            # load any packages using the command library
            logger.debug('Retrieving metadata using scripts from base.yml')
            container.start_container(base_image.repotag)
            common.add_base_packages(base_image)
            container.remove_container()
            logger.debug('Saving base image layers...')
            common.save_to_cache(base_image)
            cache.save()
        # attempt to get the packages for the rest of the image
        # since we only have a dockerfile, we will attempt to build the
        # image first
        # This step actually needs to go to the beginning but since
        # there is no way of tracking imported images from within
        # the docker image history, we build after importing the base image
        shell, msg = cmdlib.get_image_shell(
            cmdlib.get_base_listing(base_image.name, base_image.tag))
        if not shell:
            shell = constants.shell
        logger.debug('Building image...')
        build, msg = docker.is_build()
        if build:
            # attempt to get built image metadata
            full_image = load_full_image()
            if full_image.origins.is_empty():
                # link layer to imported base image
                full_image.set_image_import(base_image)
                if not common.load_from_cache(full_image):
                    # find packages per layer
                    container.start_container(full_image.repotag)
                    logger.debug('Retrieving metadata using scripts from '
                                 'snippets.yml')
                    docker.add_packages_from_history(full_image, shell)
                    container.remove_container()
                    # record missing layers in the cache
                    common.save_to_cache(full_image)
                    cache.save()
                logger.debug('Cleaning up...')
                container.remove_image(full_image.repotag)
                container.remove_image(base_image.repotag)
                generate_report(args, full_image)
            else:
                # we cannot extract the built image's metadata
                dockerfile_parse = True
        else:
            # we cannot build the image
            common.save_to_cache(base_image)
            dockerfile_parse = True
    else:
        # something went wrong in getting the base image
        dockerfile_parse = True
    # check if the dockerfile needs to be parsed
    if dockerfile_parse:
        cache.save()
        logger.debug('Cleaning up...')
        container.remove_image(base_image.repotag)
        logger.debug('Parsing Dockerfile to generate report...')
        stub_image = get_dockerfile_packages()
        generate_report(args, base_image, stub_image)
コード例 #2
0
ファイル: report.py プロジェクト: xtreme-vikram-yadav/tern
def analyze_docker_image(image_obj, dockerfile=False):
    '''Given a DockerImage object, for each layer, retrieve the packages, first
    looking up in cache and if not there then looking up in the command
    library. For looking up in command library first mount the filesystem
    and then look up the command library for commands to run in chroot'''
    # find the layers that are imported
    if dockerfile:
        docker.set_imported_layers(image_obj)
    # add notices for each layer if it is imported
    image_setup(image_obj)
    shell = ''
    # set up empty master list of package names
    master_list = []
    # find the binary by mounting the base layer
    target = rootfs.mount_base_layer(image_obj.layers[0].tar_file)
    binary = common.get_base_bin(image_obj.layers[0])
    # set up a notice origin referring to the base command library listing
    origin_command_lib = formats.invoking_base_commands
    # find the shell to invoke commands in
    shell, msg = command_lib.get_image_shell(
        command_lib.get_base_listing(binary))
    if not shell:
        # add a warning notice for no shell in the command library
        logger.warning('No shell listing in command library. '
                       'Using default shell')
        no_shell_message = errors.no_shell_listing.format(
            binary, default_shell=constants.shell)
        image_obj.layers[0].origins.add_notice_to_origins(
            origin_command_lib, Notice(no_shell_message, 'warning'))
        # add a hint notice to add the shell to the command library
        add_shell_message = errors.no_listing_for_base_key.format(
            listing_key='shell')
        image_obj.layers[0].origins.add_notice_origins(
            origin_command_lib, Notice(add_shell_message, 'hint'))
        shell = constants.shell
    # only extract packages if there is a known binary and the layer is not
    # cached
    if binary:
        if not common.load_from_cache(image_obj.layers[0]):
            # get the packages of the first layer
            rootfs.prep_rootfs(target)
            common.add_base_packages(image_obj.layers[0], binary, shell)
            # unmount proc, sys and dev
            rootfs.undo_mount()
    else:
        logger.warning(errors.unrecognized_base.format(
            image_name=image_obj.name, image_tag=image_obj.tag))
    # unmount the first layer
    rootfs.unmount_rootfs()
    # populate the master list with all packages found in the first layer
    # can't use assignment as that will just point to the image object's layer
    for p in image_obj.layers[0].get_package_names():
        master_list.append(p)
    # get packages for subsequent layers
    curr_layer = 1
    while curr_layer < len(image_obj.layers):
        if not common.load_from_cache(image_obj.layers[curr_layer]):
            # get commands that created the layer
            # for docker images this is retrieved from the image history
            command_list = docker.get_commands_from_history(
                image_obj.layers[curr_layer])
            if command_list:
                # mount diff layers from 0 till the current layer
                mount_overlay_fs(image_obj, curr_layer)
            # for each command look up the snippet library
            for command in command_list:
                pkg_listing = command_lib.get_package_listing(command.name)
                if type(pkg_listing) is str:
                    common.add_base_packages(
                        image_obj.layers[curr_layer], pkg_listing, shell)
                else:
                    common.add_snippet_packages(
                        image_obj.layers[curr_layer], command, pkg_listing,
                        shell)
            if command_list:
                rootfs.undo_mount()
                rootfs.unmount_rootfs()
        # update the master list
        common.update_master_list(master_list, image_obj.layers[curr_layer])
        curr_layer = curr_layer + 1
    common.save_to_cache(image_obj)
コード例 #3
0
ファイル: report.py プロジェクト: johnmm/tern
def analyze_docker_image(image_obj, dockerfile=False):
    '''Given a DockerImage object, for each layer, retrieve the packages, first
    looking up in cache and if not there then looking up in the command
    library. For looking up in command library first mount the filesystem
    and then look up the command library for commands to run in chroot'''
    # find the layers that are imported
    if dockerfile:
        docker.set_imported_layers(image_obj)
    # add notices for each layer if it is imported
    for layer in image_obj.layers:
        origin_str = 'Layer: ' + layer.diff_id[:10]
        layer.origins.add_notice_origin(origin_str)
        if layer.import_str:
            layer.origins.add_notice_to_origins(
                origin_str,
                Notice('Imported in Dockerfile using: ' + layer.import_str,
                       'info'))
    shell = ''
    # set the layer that is mounted. In the beginning this is 0
    mounted = 0
    # set up empty master list of package names
    master_list = []
    # find the shell by mounting the base layer
    target = rootfs.mount_base_layer(image_obj.layers[0].tar_file)
    binary = common.get_base_bin(image_obj.layers[0])
    # find the shell to invoke commands in
    shell, _ = command_lib.get_image_shell(
        command_lib.get_base_listing(binary))
    if not shell:
        shell = constants.shell
    # only extract packages if there is a known binary and the layer is not
    # cached
    if binary:
        if not common.load_from_cache(image_obj.layers[0]):
            # get the packages of the first layer
            rootfs.prep_rootfs(target)
            common.add_base_packages(image_obj.layers[0], binary)
            # unmount proc, sys and dev
            rootfs.undo_mount()
    else:
        logger.warning(
            errors.unrecognized_base.format(image_name=image_obj.name,
                                            image_tag=image_obj.tag))
    # populate the master list with all packages found in the first layer
    # can't use assignment as that will just point to the image object's layer
    for p in image_obj.layers[0].get_package_names():
        master_list.append(p)
    # get packages for subsequent layers
    curr_layer = 1
    while curr_layer < len(image_obj.layers):
        if not common.load_from_cache(image_obj.layers[curr_layer]):
            # mount from the layer after the mounted layer till the current
            # layer
            for index in range(mounted + 1, curr_layer + 1):
                target = rootfs.mount_diff_layer(
                    image_obj.layers[index].tar_file)
            mounted = curr_layer
            # mount dev, sys and proc after mounting diff layers
            rootfs.prep_rootfs(target)
            docker.add_packages_from_history(image_obj.layers[curr_layer],
                                             shell)
            rootfs.undo_mount()
        # update the master list
        common.update_master_list(master_list, image_obj.layers[curr_layer])
        curr_layer = curr_layer + 1
    # undo all the mounts
    rootfs.unmount_rootfs(mounted + 1)
    common.save_to_cache(image_obj)