Exemple #1
0
def create_source_representation(logger, conf, abstract_task):
    """
    Create Source object.

    :param logger: Logger object.
    :param conf: Conf dict.
    :param abstract_task: Abstract task dict.
    :return: Source object.
    """
    # Initialize Clade client to make requests
    clade = Clade(conf['build base'])
    if not clade.work_dir_ok():
        raise RuntimeError('Build base is not OK')

    prefixes = _prefixes(conf, clade)

    # Ask for dependencies for each CC
    cfiles, dep_paths, files_map = _collect_file_dependencies(
        clade, abstract_task)

    # Read file with source analysis
    collection = Source(cfiles, prefixes, dep_paths)
    collection.c_full_paths = _c_full_paths(collection, cfiles)

    _import_code_analysis(logger, conf, clade, files_map, collection)
    if conf.get('dump types'):
        dump_types('type collection.json')
    if conf.get('dump source code analysis'):
        collection.dump('vars.json', 'functions.json', 'macros.json')
    return collection
Exemple #2
0
def main(args=sys.argv[1:]):
    parser = argparse.ArgumentParser(
        description=
        "Check that Clade working directory exists and not corrupted.")

    parser.add_argument(dest="work_dir",
                        help="path to the Clade working directory")

    args = parser.parse_args(args)

    c = Clade(args.work_dir)
    sys.exit(not c.work_dir_ok(log=True))
Exemple #3
0
    def weave(self):
        self.abstract_task_desc.setdefault('extra C files', dict())

        clade = Clade(self.conf['build base'])
        if not clade.work_dir_ok():
            raise RuntimeError('Build base is not OK')
        meta = clade.get_meta()

        # This is required to get compiler (Aspectator) specific stdarg.h since kernel C files are compiled
        # with "-nostdinc" option and system stdarg.h couldn't be used.
        aspectator_search_dir = '-isystem' + klever.core.utils.execute(
            self.logger, (klever.core.vtg.utils.get_cif_or_aspectator_exec(
                self.conf, 'aspectator'), '-print-file-name=include'),
            collect_all_stdout=True)[0]

        env = dict(os.environ)
        # Print stubs instead of inline Assembler since verifiers do not interpret it and even can fail.
        env['LDV_INLINE_ASM_STUB'] = ''

        for grp in self.abstract_task_desc['grps']:
            self.logger.info('Weave in C files of group "{0}"'.format(
                grp['id']))

            for extra_cc in grp['Extra CCs']:
                # Each CC is either pair (compiler command identifier, compiler command type) or JSON file name
                # with compiler command description.
                if isinstance(extra_cc['CC'], list):
                    cc = clade.get_cmd(*extra_cc['CC'], with_opts=True)
                    if "in file" in extra_cc:
                        # This is for CC commands with several input files
                        infile = extra_cc["in file"]
                    else:
                        infile = cc["in"][0]

                    infile = clade.get_storage_path(infile)
                    if meta['conf'].get('Compiler.preprocess_cmds', False):
                        infile = infile('.c')[0] + '.i'
                else:
                    with open(os.path.join(self.conf['main working directory'],
                                           extra_cc['CC']),
                              encoding='utf8') as fp:
                        cc = json.load(fp)

                    infile = cc["in"][0]

                # Distinguish source files having the same names.
                outfile_unique = '{0}.c'.format(
                    klever.core.utils.unique_file_name(
                        os.path.splitext(os.path.basename(infile))[0], '.c'))
                # This is used for storing/getting to/from cache where uniqueness is guaranteed by other means.
                outfile = '{0}.c'.format(
                    os.path.splitext(os.path.basename(infile))[0])
                self.logger.info('Weave in C file "{0}"'.format(infile))

                # Produce aspect to be weaved in.
                if 'plugin aspects' in extra_cc:
                    self.logger.info(
                        'Concatenate all aspects of all plugins together')

                    # Resulting aspect.
                    aspect = 'aspect'

                    # Get all aspects. Place RSG aspects at beginning since they can instrument entities added by
                    # aspects of other plugins while corresponding function declarations still need be at beginning
                    # of file.
                    aspects = []
                    for plugin_aspects in extra_cc['plugin aspects']:
                        if plugin_aspects['plugin'] == 'RSG':
                            aspects[0:0] = plugin_aspects['aspects']
                        else:
                            aspects.extend(plugin_aspects['aspects'])

                    # Concatenate aspects.
                    with open(aspect, 'w',
                              encoding='utf8') as fout, fileinput.input(
                                  [
                                      os.path.join(
                                          self.conf['main working directory'],
                                          aspect) for aspect in aspects
                                  ],
                                  openhook=fileinput.hook_encoded(
                                      'utf8')) as fin:
                        for line in fin:
                            fout.write(line)
                else:
                    # Instrumentation is not required when there is no aspects. But we will still pass source files
                    # through C-backend to make resulting code to look similarly and thus to avoid different issues
                    # at merging source files and models together.
                    aspect = None

                if aspect:
                    self.logger.info(
                        'Aspect to be weaved in is "{0}"'.format(aspect))
                else:
                    self.logger.info(
                        'C file will be passed through C Back-end only')

                cwd = clade.get_storage_path(cc['cwd'])

                is_model = (grp['id'] == 'models')

                # Original sources should be woven in and we do not need to get cross references for them since this
                # was already done before.
                if not is_model:
                    self.__weave(infile, cc['opts'], aspect, outfile_unique,
                                 clade, env, cwd, aspectator_search_dir,
                                 is_model)
                # For generated models we need to weave them in (actually, just pass through C Back-end) and to get
                # cross references always since most likely they all are different.
                elif 'generated' in extra_cc:
                    self.__weave(infile, cc['opts'], aspect, outfile_unique,
                                 clade, env, cwd, aspectator_search_dir,
                                 is_model)
                    if self.conf[
                            'code coverage details'] != 'Original C source files':
                        self.__get_cross_refs(infile, cc['opts'],
                                              outfile_unique, clade, cwd,
                                              aspectator_search_dir)
                # For non-generated models use results cache in addition.
                else:
                    cache_dir = os.path.join(
                        self.conf['cache directory'],
                        klever.core.utils.get_file_name_checksum(infile))
                    with klever.core.utils.LockedOpen(cache_dir + '.tmp', 'w'):
                        if os.path.exists(cache_dir):
                            self.logger.info('Get woven in C file from cache')
                            self.abstract_task_desc['extra C files'].append({
                                'C file':
                                os.path.relpath(
                                    os.path.join(cache_dir,
                                                 os.path.basename(outfile)),
                                    self.conf['main working directory'])
                            })
                            if self.conf[
                                    'code coverage details'] != 'Original C source files':
                                self.logger.info(
                                    'Get cross references from cache')
                                self.__merge_additional_srcs(
                                    os.path.join(cache_dir,
                                                 'additional sources'))
                        else:
                            os.makedirs(cache_dir)
                            self.__weave(infile, cc['opts'], aspect,
                                         outfile_unique, clade, env, cwd,
                                         aspectator_search_dir, is_model)
                            self.logger.info('Store woven in C file to cache')
                            shutil.copy(outfile_unique,
                                        os.path.join(cache_dir, outfile))

                            if self.conf[
                                    'code coverage details'] != 'Original C source files':
                                self.__get_cross_refs(infile, cc['opts'],
                                                      outfile_unique, clade,
                                                      cwd,
                                                      aspectator_search_dir)
                                self.logger.info(
                                    'Store cross references to cache')
                                shutil.copytree(
                                    outfile_unique + ' additional sources',
                                    os.path.join(cache_dir,
                                                 'additional sources'))

        # For auxiliary files there is no cross references since it is rather hard to get them from Aspectator. But
        # there still highlighting.
        if self.conf['code coverage details'] == 'All source files':
            for aux_file in glob.glob('*.aux'):
                new_file = os.path.join(
                    'additional sources', 'generated models',
                    os.path.relpath(aux_file,
                                    self.conf['main working directory']))

                os.makedirs(os.path.dirname(new_file), exist_ok=True)
                shutil.copy(aux_file, new_file)

                cross_refs = CrossRefs(self.conf, self.logger, clade, aux_file,
                                       new_file, self.search_dirs)
                cross_refs.get_cross_refs()

        self.abstract_task_desc['additional sources'] = os.path.relpath('additional sources',
                                                                        self.conf['main working directory']) \
            if os.path.isdir('additional sources') else None

        # Copy additional sources for total code coverage.
        if self.conf['code coverage details'] != 'Original C source files':
            with klever.core.utils.Cd('additional sources'):
                for root, dirs, files in os.walk(os.path.curdir):
                    for file in files:
                        # These files are handled below in addition to corresponding source files.
                        if file.endswith('.json'):
                            continue

                        if self.conf['code coverage details'] == 'C source files including models' \
                                and not file.endswith('.c'):
                            continue

                        file = os.path.join(root, file)
                        new_file = os.path.join(
                            self.conf['additional sources directory'], file)
                        os.makedirs(os.path.dirname(new_file), exist_ok=True)

                        with klever.core.utils.LockedOpen(
                                new_file + '.tmp', 'w'):
                            if os.path.isfile(new_file):
                                os.remove(new_file + '.tmp')
                                continue

                            shutil.copy(file, new_file)
                            shutil.copy(file + '.idx.json',
                                        new_file + '.idx.json')

                            os.remove(new_file + '.tmp')

        # These sections won't be refereed any more.
        del (self.abstract_task_desc['grps'])
        del (self.abstract_task_desc['deps'])
Exemple #4
0
    def __get_cross_refs(self, infile, opts, outfile, clade, cwd,
                         aspectator_search_dir):
        # Get cross references and everything required for them.
        # Limit parallel workers in Clade by 4 since at this stage there may be several parallel task generators and we
        # prefer their parallelism over the Clade default one.
        clade_extra = Clade(work_dir=os.path.realpath(outfile + ' clade'),
                            preset=self.conf['Clade']['preset'],
                            conf={'cpu_count': 4})
        # TODO: this can be incorporated into instrumentation above but it will need some Clade changes.
        # Emulate normal compilation (indeed just parsing thanks to "-fsyntax-only") to get additional
        # dependencies (model source files) and information on them.
        clade_extra.intercept([
            klever.core.vtg.utils.get_cif_or_aspectator_exec(
                self.conf, 'aspectator'),
            '-I' + os.path.join(
                os.path.dirname(self.conf['specifications base']), 'include')
        ] + klever.core.vtg.utils.prepare_cif_opts(opts, clade, True) +
                              [aspectator_search_dir, '-fsyntax-only', infile],
                              cwd=cwd)
        clade_extra.parse_list(["CrossRef"])

        if not clade_extra.work_dir_ok():
            raise RuntimeError('Build base is not OK')

        # Like in klever.core.job.Job#__upload_original_sources.
        os.makedirs(outfile + ' additional sources')
        for root, dirs, files in os.walk(clade_extra.storage_dir):
            for file in files:
                file = os.path.join(root, file)

                storage_file = klever.core.utils.make_relative_path(
                    [clade_extra.storage_dir], file)

                # Do not treat those source files that were already processed and uploaded as original sources.
                if os.path.commonpath([
                        os.path.join(os.path.sep, storage_file),
                        clade.storage_dir
                ]) == clade.storage_dir:
                    continue

                new_file = klever.core.utils.make_relative_path(
                    self.search_dirs, storage_file, absolutize=True)

                # These source files do not belong neither to original sources nor to models, e.g. there are compiler
                # headers.
                if os.path.isabs(new_file):
                    continue

                # We treat all remaining source files which paths do not start with "specifications" as generated
                # models. This is not correct for all cases, e.g. when users put some files within $KLEVER_DATA_DIR.
                if not new_file.startswith('specifications'):
                    new_file = os.path.join('generated models', new_file)

                new_file = os.path.join(outfile + ' additional sources',
                                        new_file)
                os.makedirs(os.path.dirname(new_file), exist_ok=True)
                shutil.copy(file, new_file)

                cross_refs = CrossRefs(self.conf, self.logger, clade_extra,
                                       os.path.join(os.path.sep, storage_file),
                                       new_file, self.search_dirs)
                cross_refs.get_cross_refs()

        self.__merge_additional_srcs(outfile + ' additional sources')

        if not self.conf['keep intermediate files']:
            shutil.rmtree(outfile + ' clade')
Exemple #5
0
class Tracer:
    def __init__(self, build_base):
        self.clade = Clade(build_base)

        if not self.clade.work_dir_ok():
            raise RuntimeError("Specified Clade build base is not valid")

    def find_functions(self, func_names: List[str]):
        functions = []

        s_regex = re.compile("^" + ("$|^".join(func_names)) + "$")

        for func in self.clade.functions:
            if s_regex.match(func):
                for file in self.clade.functions[func]:
                    functions.append(Function(func, file))

        if not functions:
            raise RuntimeError(
                "Specified functions were not found in the Clade build base")
        elif len(functions) < len(func_names):
            for func_name in [
                    x for x in func_names
                    if x not in [y.name for y in functions]
            ]:
                raise RuntimeError(
                    "{!r} function was not found in the Clade build base".
                    format(func_name))

        return functions

    def find_functions_with_prefix(self, prefix):
        functions = []

        for func in self.clade.functions:
            if re.search(prefix, func, flags=re.I):
                for file in self.clade.functions[func]:
                    functions.append(Function(func, file))

        if not functions:
            raise RuntimeError(
                "Functions with prefix {!r} were not found in the Clade build base"
                .format(prefix))

        return functions

    def trace(self, from_func: Function, to_func: Function):
        return self.trace_list([from_func], [to_func])

    def trace_list(self, from_funcs: List[Function], to_funcs: List[Function]):
        trace = dict()

        queue = collections.deque()
        queue.extend(from_funcs)
        visited = set()

        while len(queue) > 0:
            func = queue.pop()

            visited.add(func)

            if not self.__calls_somebody(func):
                continue

            calls = self.clade.callgraph[func.path][func.name]["calls"]
            for called_file in calls:
                for called_func_name in calls[called_file]:
                    called_func = Function(called_func_name, called_file)

                    if func in trace:
                        trace[func].append(called_func)
                    else:
                        trace[func] = [called_func]

                    if called_func not in to_funcs and called_func not in visited:
                        queue.append(called_func)

        trace = self.__reverse_trace(trace)
        trace = self.__filter_trace(trace, to_funcs)
        return trace

    @staticmethod
    def __reverse_trace(trace):
        reversed_trace = dict()

        for func in trace:
            for called_func in trace[func]:
                if called_func in reversed_trace:
                    reversed_trace[called_func].append(func)
                else:
                    reversed_trace[called_func] = [func]

        return reversed_trace

    @staticmethod
    def __filter_trace(trace, to_funcs):
        filtered_trace = dict()

        queue = collections.deque()
        queue.extend(to_funcs)
        visited = set()

        while len(queue) > 0:
            called_func = queue.pop()

            visited.add(called_func)

            if called_func not in trace:
                continue

            for func in trace[called_func]:
                if func in filtered_trace:
                    filtered_trace[func].append(called_func)
                else:
                    filtered_trace[func] = [called_func]

                if func not in visited:
                    queue.append(func)

        return filtered_trace

    @staticmethod
    def print_dot(trace, filename):
        dot = graphviz.Digraph(graph_attr={'rankdir': 'LR'},
                               node_attr={'shape': 'rectangle'})

        nodes = set()

        for func in trace:
            for called_func in trace[func]:
                if func.name not in nodes:
                    nodes.add(func.name)
                    dot.node(func.name)

                if called_func.name not in nodes:
                    nodes.add(called_func.name)
                    dot.node(called_func.name)

                dot.edge(func.name, called_func.name)

        dot.render(filename)

    def __calls_somebody(self, func):
        if func.path not in self.clade.callgraph:
            return False

        if func.name not in self.clade.callgraph[func.path]:
            return False

        if "calls" not in self.clade.callgraph[func.path][func.name]:
            return False

        return True
Exemple #6
0
def test_check_work_dir_bad(tmpdir, cmds_file):
    c = Clade(tmpdir, cmds_file)

    assert not c.work_dir_ok(log=True)
Exemple #7
0
def test_check_work_dir(clade_api: Clade):
    assert clade_api.work_dir_ok(log=True)
Exemple #8
0
def test_check_work_dir_fail(tmpdir):
    c = Clade(tmpdir)

    assert not c.work_dir_ok()
Exemple #9
0
class Job(klever.core.components.Component):
    CORE_COMPONENTS = ['PFG', 'VTG', 'VRP']

    def __init__(self,
                 conf,
                 logger,
                 parent_id,
                 callbacks,
                 mqs,
                 vals,
                 id=None,
                 work_dir=None,
                 attrs=None,
                 separate_from_parent=True,
                 include_child_resources=False,
                 components_common_conf=None):
        super(Job,
              self).__init__(conf, logger, parent_id, callbacks, mqs, vals, id,
                             work_dir, attrs, separate_from_parent,
                             include_child_resources)
        self.common_components_conf = components_common_conf

        if work_dir:
            self.common_components_conf[
                'additional sources directory'] = os.path.join(
                    os.path.realpath(work_dir), 'additional sources')

        self.clade = None
        self.components = []
        self.component_processes = []

    def decide_job_or_sub_job(self):
        self.logger.info('Decide job/sub-job "{0}"'.format(self.id))

        # This is required to associate verification results with particular sub-jobs.
        # Skip leading "/" since this identifier is used in os.path.join() that returns absolute path otherwise.
        self.common_components_conf['sub-job identifier'] = self.id[1:]

        # Check and set build base here since many Core components need it.
        self.__set_build_base()
        self.clade = Clade(self.common_components_conf['build base'])
        if not self.clade.work_dir_ok():
            raise RuntimeError('Build base is not OK')

        self.__retrieve_working_src_trees()
        self.__get_original_sources_basic_info()
        self.__upload_original_sources()

        # Create directory where files will be cached and remember absolute path to it for components.
        os.mkdir('cache')
        self.common_components_conf['cache directory'] = os.path.realpath(
            'cache')

        if self.common_components_conf['keep intermediate files']:
            self.logger.debug(
                'Create components configuration file "conf.json"')
            with open('conf.json', 'w', encoding='utf8') as fp:
                json.dump(self.common_components_conf,
                          fp,
                          ensure_ascii=False,
                          sort_keys=True,
                          indent=4)

        self.__get_job_or_sub_job_components()
        self.callbacks = klever.core.components.get_component_callbacks(
            self.logger, [type(self)] + self.components)
        self.launch_sub_job_components()

        self.clean_dir = True
        self.logger.info("All components finished")
        if self.conf.get('collect total code coverage', None):
            self.logger.debug('Waiting for a collecting coverage')
            while not self.vals['coverage_finished'].get(
                    self.common_components_conf['sub-job identifier'], True):
                time.sleep(1)
            self.logger.debug("Coverage collected")

    main = decide_job_or_sub_job

    def __set_build_base(self):
        if 'build base' not in self.common_components_conf:
            raise KeyError(
                "Provide 'build base' configuration option to start verification"
            )

        common_advice = 'please, fix "job.json" (attribute "build base")'
        common_advice += ' or/and deployment configuration file (attribute "Klever Build Bases")'

        # Try to find specified build base either in normal way or additionally in directory "build bases" that is
        # convenient to use when working with many build bases.
        try:
            build_base = klever.core.utils.find_file_or_dir(
                self.logger, os.path.curdir,
                self.common_components_conf['build base'])
        except FileNotFoundError:
            try:
                build_base = klever.core.utils.find_file_or_dir(
                    self.logger, os.path.curdir,
                    os.path.join('build bases',
                                 self.common_components_conf['build base']))
            except FileNotFoundError:
                raise FileNotFoundError(
                    'Specified build base "{0}" does not exist, {1}'.format(
                        self.common_components_conf['build base'],
                        common_advice)) from None

        # Extract build base from archive. There should not be any intermediate directories in archives.
        if os.path.isfile(build_base) and (tarfile.is_tarfile(build_base)
                                           or zipfile.is_zipfile(build_base)):
            if tarfile.is_tarfile(build_base):
                self.logger.debug(
                    'Build base "{0}" is provided in form of TAR archive'.
                    format(build_base))
                with tarfile.open(build_base) as TarFile:
                    TarFile.extractall('build base')
            else:
                self.logger.debug(
                    'Build base "{0}" is provided in form of ZIP archive'.
                    format(build_base))
                with zipfile.ZipFile(build_base) as zfp:
                    zfp.extractall('build base')

            # Directory contains extracted build base.
            extracted_from = ' extracted from "{0}"'.format(
                os.path.realpath(build_base))
            build_base = 'build base'
        else:
            extracted_from = ''

        # We need to specify absolute path to build base since it will be used in different Klever components. Besides,
        # this simplifies troubleshooting.
        build_base = os.path.realpath(build_base)

        # TODO: fix after https://github.com/17451k/clade/issues/108.
        if not os.path.isdir(build_base):
            raise FileExistsError(
                'Build base "{0}" is not a directory, {1}'.format(
                    build_base, extracted_from, common_advice))

        if not os.path.isfile(os.path.join(build_base, 'meta.json')):
            raise FileExistsError(
                'Directory "{0}"{1} is not a build base since it does not contain file "meta.json", {2}'
                .format(build_base, extracted_from, common_advice))

        self.common_components_conf['build base'] = build_base

        self.logger.debug('Klever components will use build base "{0}"'.format(
            self.common_components_conf['build base']))

    # Klever will try to cut off either working source trees (if specified) or at least build directory (otherwise)
    # from referred file names. Sometimes this is rather optional like for source files referred by error traces, but,
    # say, for program fragment identifiers this is strictly necessary, e.g. because of otherwise expert assessment will
    # not work as expected.
    def __retrieve_working_src_trees(self):
        clade_meta = self.clade.get_meta()
        self.common_components_conf['working source trees'] = clade_meta['working source trees'] \
            if 'working source trees' in clade_meta else [clade_meta['build_dir']]

    def __refer_original_sources(self, src_id):
        klever.core.utils.report(self.logger, 'patch', {
            'identifier': self.id,
            'original_sources': src_id
        }, self.mqs['report files'], self.vals['report id'],
                                 self.conf['main working directory'])

    def __process_source_files(self):
        for file_name in self.clade.src_info:
            self.mqs['file names'].put(file_name)

        for i in range(self.workers_num):
            self.mqs['file names'].put(None)

    def __process_source_file(self):
        while True:
            file_name = self.mqs['file names'].get()

            if not file_name:
                return

            src_file_name = klever.core.utils.make_relative_path(
                self.common_components_conf['working source trees'], file_name)

            if src_file_name != file_name:
                src_file_name = os.path.join('source files', src_file_name)

            new_file_name = os.path.join('original sources',
                                         src_file_name.lstrip(os.path.sep))
            os.makedirs(os.path.dirname(new_file_name), exist_ok=True)
            shutil.copy(self.clade.get_storage_path(file_name), new_file_name)

            cross_refs = CrossRefs(
                self.common_components_conf, self.logger, self.clade,
                file_name, new_file_name,
                self.common_components_conf['working source trees'],
                'source files')
            cross_refs.get_cross_refs()

    def __get_original_sources_basic_info(self):
        self.logger.info(
            'Get information on original sources for following visualization of uncovered source files'
        )

        # For each source file we need to know the total number of lines and places where functions are defined.
        src_files_info = dict()
        for file_name, file_size in self.clade.src_info.items():
            src_file_name = klever.core.utils.make_relative_path(
                self.common_components_conf['working source trees'], file_name)

            # Skip non-source files.
            if src_file_name == file_name:
                continue

            src_file_name = os.path.join('source files', src_file_name)

            src_files_info[src_file_name] = list()

            # Store source file size.
            src_files_info[src_file_name].append(file_size['loc'])

            # Store source file function definition lines.
            func_def_lines = list()
            funcs = self.clade.get_functions_by_file([file_name], False)

            if funcs:
                for func_name, func_info in list(funcs.values())[0].items():
                    func_def_lines.append(int(func_info['line']))

            src_files_info[src_file_name].append(sorted(func_def_lines))

        # Dump obtain information (huge data!) to load it when reporting total code coverage if everything will be okay.
        with open('original sources basic information.json', 'w') as fp:
            klever.core.utils.json_dump(src_files_info, fp,
                                        self.conf['keep intermediate files'])

    def __upload_original_sources(self):
        # Use Clade UUID to distinguish various original sources. It is pretty well since this UUID is uuid.uuid4().
        src_id = self.clade.get_uuid()

        session = klever.core.session.Session(self.logger,
                                              self.conf['Klever Bridge'],
                                              self.conf['identifier'])

        if session.check_original_sources(src_id):
            self.logger.info('Original sources were uploaded already')
            self.__refer_original_sources(src_id)
            return

        self.logger.info(
            'Cut off working source trees or build directory from original source file names and convert index data'
        )
        os.makedirs('original sources')
        self.mqs['file names'] = multiprocessing.Queue()
        self.workers_num = klever.core.utils.get_parallel_threads_num(
            self.logger, self.conf)
        subcomponents = [('PSFS', self.__process_source_files)]
        for i in range(self.workers_num):
            subcomponents.append(('RSF', self.__process_source_file))
        self.launch_subcomponents(False, *subcomponents)
        self.mqs['file names'].close()

        self.logger.info('Compress original sources')
        klever.core.utils.ArchiveFiles(['original sources'
                                        ]).make_archive('original sources.zip')

        self.logger.info('Upload original sources')
        try:
            session.upload_original_sources(src_id, 'original sources.zip')
        # Do not fail if there are already original sources. There may be complex data races because of checking and
        # uploading original sources archive are not atomic.
        except klever.core.session.BridgeError:
            if "original sources with this identifier already exists." not in list(
                    session.error.values())[0]:
                raise

        self.__refer_original_sources(src_id)

        if not self.conf['keep intermediate files']:
            shutil.rmtree('original sources')
            os.remove('original sources.zip')

    def __get_job_or_sub_job_components(self):
        self.logger.info('Get components for sub-job "{0}"'.format(self.id))

        self.components = [
            getattr(
                importlib.import_module('.{0}'.format(component.lower()),
                                        'klever.core'), component)
            for component in self.CORE_COMPONENTS
        ]

        self.logger.debug('Components to be launched: "{0}"'.format(', '.join(
            [component.__name__ for component in self.components])))

    def launch_sub_job_components(self):
        """Has callbacks"""
        self.logger.info('Launch components for sub-job "{0}"'.format(self.id))

        for component in self.components:
            p = component(self.common_components_conf,
                          self.logger,
                          self.id,
                          self.callbacks,
                          self.mqs,
                          self.vals,
                          separate_from_parent=True)
            self.component_processes.append(p)

        klever.core.components.launch_workers(self.logger,
                                              self.component_processes)
Exemple #10
0
    def add_models(self, generated_models):
        self.logger.info(
            'Add models to abstract verification task description')

        models = []
        if 'environment model' in self.abstract_task_desc:
            models.append({
                'model':
                os.path.relpath(
                    os.path.join(self.conf['main working directory'],
                                 self.abstract_task_desc['environment model']),
                    os.path.curdir),
                'options': {},
                'generated':
                True
            })

        if 'extra C files' in self.abstract_task_desc:
            self.abstract_task_desc['extra C files'] = []
            for c_file in (extra_c_file["C file"] for extra_c_file in
                           self.abstract_task_desc['extra C files']
                           if "C file" in extra_c_file):
                models.append(
                    os.path.relpath(
                        os.path.join(self.conf['main working directory'],
                                     c_file), os.path.curdir))

        def get_model_c_file(model):
            # Model may be a C file or a dictionary with model file and option attributes.
            if isinstance(model, dict):
                return model['model']
            else:
                return model

        # Get common and requirement specific models.
        if 'exclude common models' in self.conf:
            self.logger.info('Common models to be excluded:\n{0}'.format(
                '\n'.join([
                    '  {0}'.format(m)
                    for m in self.conf['exclude common models']
                ])))
            common_models = [
                m for m in self.conf['common models']
                if m not in self.conf['exclude common models']
            ]
        else:
            common_models = self.conf['common models']

        if common_models and 'models' in self.conf:
            for common_model in common_models:
                common_model_c_file = get_model_c_file(common_model)
                for model in self.conf['models']:
                    if common_model_c_file == get_model_c_file(model):
                        raise KeyError(
                            'C file "{0}" is specified in both common and requirement specific models'
                            .format(common_model_c_file))

        def add_model(model, model_c_file_realpath):
            if isinstance(model, dict):
                models.append({
                    'model': model_c_file_realpath,
                    'options': model['options']
                })
            else:
                models.append(model_c_file_realpath)

        if 'models' in self.conf:
            # Find out actual C files.
            for model in self.conf['models']:
                model_c_file = get_model_c_file(model)

                # Handle generated models which C files start with "$".
                if model_c_file.startswith('$'):
                    is_generated_model_c_file_found = False
                    for generated_model_c_file in generated_models:
                        if generated_model_c_file.endswith(model_c_file[1:]):
                            if isinstance(model, dict):
                                # Specify model options for generated models that can not have model options themselves.
                                models.append({
                                    'model': generated_model_c_file,
                                    'options': model['options'],
                                    'generated': True
                                })
                            else:
                                models.append({
                                    'model': generated_model_c_file,
                                    'options': {},
                                    'generated': True
                                })
                            is_generated_model_c_file_found = True

                    if not is_generated_model_c_file_found:
                        raise KeyError(
                            'Model C file "{0}" was not generated'.format(
                                model_c_file[1:]))
                # Handle non-generated models.
                else:
                    model_c_file_realpath = klever.core.vtg.utils.find_file_or_dir(
                        self.logger, self.conf['main working directory'],
                        model_c_file)
                    self.logger.debug('Get model with C file "{0}"'.format(
                        model_c_file_realpath))
                    add_model(model, model_c_file_realpath)

        # Like for models above.
        for common_model in common_models:
            common_model_c_file = get_model_c_file(common_model)
            common_model_c_file_realpath = klever.core.vtg.utils.find_file_or_dir(
                self.logger, self.conf['main working directory'],
                common_model_c_file)
            self.logger.debug('Get common model with C file "{0}"'.format(
                common_model_c_file_realpath))
            add_model(common_model, common_model_c_file_realpath)

        self.logger.debug('Resulting models are: {0}'.format(models))

        if not models:
            self.logger.warning('No models are specified')
            return

        # CC extra full description files will be put to this directory as well as corresponding intermediate and final
        # output files.
        os.makedirs('models'.encode('utf-8'))

        self.logger.info(
            'Add aspects to abstract verification task description')
        aspects = []
        for model in models:
            aspect = '{}.aspect'.format(
                os.path.splitext(get_model_c_file(model))[0])

            # Aspects are not mandatory. There may be pure C models, e.g. when one needs to provide some definitions
            # without any weaving.
            if not os.path.isfile(aspect):
                continue

            if not os.stat(aspect).st_size:
                raise ValueError(
                    'Aspect "{0}" is empty and should be removed from the verification job'
                    .format(aspect))

            self.logger.debug('Get aspect "{0}"'.format(aspect))

            aspects.append(aspect)

        # Sort aspects to apply them in the deterministic order.
        aspects.sort()

        # Always specify either specific model sets model or common one.
        opts = [
            '-DLDV_SETS_MODEL_' +
            (model['options']['sets model']
             if isinstance(model, dict) and 'sets model' in model['options']
             else self.conf['common sets model']).upper()
        ]
        if self.conf.get('memory safety'):
            opts += ['-DLDV_MEMORY_SAFETY']
        if 'specifications set' in self.conf:
            opts += [
                '-DLDV_SPECS_SET_{0}'.format(
                    self.conf['specifications set'].replace('.', '_'))
            ]

        for grp in self.abstract_task_desc['grps']:
            self.logger.info('Add aspects to C files of group "{0}"'.format(
                grp['id']))
            for extra_cc in grp['Extra CCs']:
                if 'plugin aspects' not in extra_cc:
                    extra_cc['plugin aspects'] = []
                extra_cc['plugin aspects'].append({
                    'plugin':
                    self.name,
                    'aspects': [
                        os.path.relpath(aspect,
                                        self.conf['main working directory'])
                        for aspect in aspects
                    ]
                })
                extra_cc['opts'] = opts

        # Generate CC full description file per each model and add it to abstract task description.
        # First of all obtain CC options to be used to compile models.
        clade = Clade(self.conf['build base'])
        if not clade.work_dir_ok():
            raise RuntimeError('Build base is not OK')
        meta = clade.get_meta()

        if not meta['conf'].get('Compiler.preprocess_cmds', False):
            # Model compiler input file represents input file which compiler options and CWD should be used for
            # compiling models. This input file is relative to one of source paths.
            compiler_cmds = None
            for path in self.conf['working source trees']:
                try:
                    compiler_cmds = list(
                        clade.get_compilation_cmds_by_file(
                            os.path.normpath(
                                os.path.join(
                                    path,
                                    self.conf['model compiler input file']))))
                    break
                except KeyError:
                    pass

            if not compiler_cmds:
                raise RuntimeError(
                    "There is no compiler commands for {!r}".format(
                        self.conf['model compiler input file']))
            elif len(compiler_cmds) > 1:
                self.logger.warning(
                    "There are more than one compiler command for {!r}".format(
                        self.conf['model compiler input file']))

            model_compiler_opts = clade.get_cmd_opts(compiler_cmds[0]['id'])
            model_compiler_cwd = compiler_cmds[0]['cwd']
        else:
            # No specific compiler options are necessary for models.
            model_compiler_opts = []
            if len(self.conf['working source trees']) != 1:
                raise NotImplementedError(
                    'There are several working source trees!')
            model_compiler_cwd = self.conf['working source trees'][0]

        model_grp = {'id': 'models', 'Extra CCs': []}
        for model in sorted(models, key=get_model_c_file):
            model_c_file = get_model_c_file(model)
            file, ext = os.path.splitext(
                os.path.join('models', os.path.basename(model_c_file)))
            base_name = klever.core.utils.unique_file_name(
                file, '{0}.json'.format(ext))
            full_desc_file = '{0}{1}.json'.format(base_name, ext)
            out_file = '{0}.c'.format(base_name)

            self.logger.debug('Dump CC full description to file "{0}"'.format(
                full_desc_file))
            with open(full_desc_file, 'w', encoding='utf-8') as fp:
                klever.core.utils.json_dump(
                    {
                        'cwd': model_compiler_cwd,
                        'in': [os.path.realpath(model_c_file)],
                        'out': [os.path.realpath(out_file)],
                        'opts': model_compiler_opts + opts
                    }, fp, self.conf['keep intermediate files'])

            extra_cc = {
                'CC':
                os.path.relpath(full_desc_file,
                                self.conf['main working directory'])
            }

            if 'generated' in model:
                extra_cc['generated'] = True

            if isinstance(model, dict):
                if model['options'].get('weave in model aspect'):
                    aspect = '{}.aspect'.format(
                        os.path.splitext(get_model_c_file(model))[0])

                    if not os.path.isfile(aspect):
                        raise FileNotFoundError(
                            'Aspect "{0}" to be weaved in model does not exist'
                            .format(aspect))

                    extra_cc['plugin aspects'] = [{
                        'plugin':
                        self.name,
                        'aspects': [
                            os.path.relpath(
                                aspect, self.conf['main working directory'])
                        ]
                    }]
                elif model['options'].get('weave in all aspects'):
                    extra_cc['plugin aspects'] = [{
                        'plugin':
                        self.name,
                        'aspects': [
                            os.path.relpath(
                                aspect, self.conf['main working directory'])
                            for aspect in aspects
                        ]
                    }]

            model_grp['Extra CCs'].append(extra_cc)

        self.abstract_task_desc['grps'].append(model_grp)
        for dep in self.abstract_task_desc['deps'].values():
            dep.append(model_grp['id'])
Exemple #11
0
    def request_arg_signs(self):
        self.logger.info('Request argument signatures')

        clade = Clade(work_dir=self.conf['build base'])
        if not clade.work_dir_ok():
            raise RuntimeError('Build base is not OK')
        meta = clade.get_meta()

        for request_aspect in self.conf['request aspects']:
            request_aspect = klever.core.vtg.utils.find_file_or_dir(
                self.logger, self.conf['main working directory'],
                request_aspect)
            self.logger.debug('Request aspect is "{0}"'.format(request_aspect))

            # This is required to get compiler (Aspectator) specific stdarg.h since kernel C files are compiled with
            # "-nostdinc" option and system stdarg.h couldn't be used.
            aspectator_search_dir = '-isystem' + klever.core.utils.execute(
                self.logger, ('aspectator', '-print-file-name=include'),
                collect_all_stdout=True)[0]

            for grp in self.abstract_task_desc['grps']:
                self.logger.info(
                    'Request argument signatures for C files of group "{0}"'.
                    format(grp['id']))

                for extra_cc in grp['Extra CCs']:
                    infile = extra_cc['in file']
                    self.logger.info(
                        'Request argument signatures for C file "{0}"'.format(
                            infile))

                    cc = clade.get_cmd(*extra_cc['CC'], with_opts=True)

                    env = dict(os.environ)
                    env['LDV_ARG_SIGNS_FILE'] = os.path.realpath(
                        os.path.splitext(
                            os.path.splitext(
                                os.path.basename(request_aspect))[0])[0])
                    self.logger.debug(
                        'Argument signature file is "{0}"'.format(
                            os.path.relpath(env['LDV_ARG_SIGNS_FILE'])))

                    # Add plugin aspects produced thus far (by EMG) since they can include additional headers for which
                    # additional argument signatures should be extracted. Like in Weaver.
                    if 'plugin aspects' in extra_cc:
                        self.logger.info(
                            'Concatenate all aspects of all plugins together')

                        # Resulting request aspect.
                        aspect = '{0}.aspect'.format(
                            klever.core.utils.unique_file_name(
                                os.path.splitext(os.path.basename(infile))[0],
                                '.aspect'))

                        # Get all aspects. Place original request aspect at beginning since it can instrument entities
                        # added by aspects of other plugins while corresponding function declarations still need be at
                        # beginning of file.
                        aspects = [
                            os.path.relpath(
                                request_aspect,
                                self.conf['main working directory'])
                        ]
                        for plugin_aspects in extra_cc['plugin aspects']:
                            aspects.extend(plugin_aspects['aspects'])

                        # Concatenate aspects.
                        with open(
                                aspect, 'w', encoding='utf8'
                        ) as fout, fileinput.input(
                            [
                                os.path.join(
                                    self.conf['main working directory'],
                                    aspect) for aspect in aspects
                            ],
                                openhook=fileinput.hook_encoded(
                                    'utf8')) as fin:
                            for line in fin:
                                fout.write(line)
                    else:
                        aspect = request_aspect

                    storage_path = clade.get_storage_path(infile)
                    if meta['conf'].get('Compiler.preprocess_cmds', False) and \
                            'klever-core-work-dir' not in storage_path:
                        storage_path = storage_path.split('.c')[0] + '.i'

                    klever.core.utils.execute(
                        self.logger,
                        tuple([
                            'cif', '--in', storage_path, '--aspect',
                            os.path.realpath(aspect), '--stage',
                            'instrumentation', '--out',
                            os.path.realpath('{0}.c'.format(
                                klever.core.utils.unique_file_name(
                                    os.path.splitext(os.path.basename(infile))
                                    [0], '.c.aux'))), '--debug', 'DEBUG'
                        ] + (['--keep'] if self.
                             conf['keep intermediate files'] else []) +
                              ['--'] + klever.core.vtg.utils.prepare_cif_opts(
                                  cc['opts'], clade) +
                              [
                                  # Like in Weaver.
                                  '-I' + os.path.join(
                                      os.path.dirname(self.conf[
                                          'specifications base']), 'include'),
                                  aspectator_search_dir
                              ]),
                        env,
                        cwd=clade.get_storage_path(cc['cwd']),
                        timeout=0.01,
                        filter_func=klever.core.vtg.utils.CIFErrorFilter())
Exemple #12
0
class FragmentationAlgorythm:
    """
    This is a generic class to implement fragmentation strategies for particular programs. This is not a fully abstract
    class and sometimes can be directly used for verification without adaptation to program specifics.
    """
    CLADE_PRESET = 'base'

    def __init__(self, logger, conf, tactic, pf_dir):
        """
        The strategy needs a logger and configuration as the rest Klever components but also it requires Clade interface
        object (uninitialized yet) and the description of the fragmentation set.

        :param logger: logging Logger object.
        :param conf: Dictionary.
        :param tactic: Dictionary with options.
        :param pf_dir: program fragments descriptions storage dir.
        """
        # Simple attributes
        self.logger = logger
        self.conf = conf
        self.tactic = tactic
        self.pf_dir = pf_dir
        self.files_to_keep = list()
        self.project_attrs = list()

        self.source_paths = self.conf['working source trees']

        # Import clade
        self.clade = Clade(work_dir=self.conf['build base'],
                           preset=self.CLADE_PRESET)
        if not self.clade.work_dir_ok():
            raise RuntimeError('Build base is not OK')

        self.__get_project_attrs()

    def fragmentation(self, fragmentation_set, tactic_name, fset_name):
        """
        It is the main function for a fragmentation strategy. The workflow is the following: it determines logical
        components of the program called units, then chooses files and units that should be verified according to the
        configuration provided by the user, gets the fragmentation set and reconstruct fragments if necessary according
        to this manually provided description, then add dependencies if necessary to each fragment that should be
        verified and generate the description of each program fragment. The description contains in addition to the
        files names compilation commands to get their options and dependencies between files.

        :parameter fragmentation_set: Fragmentation set description dict.
        :parameter tactic_name: Fragmentation tactic name.
        :parameter fset_name: Fragmentation set name.
        """
        # Extract dependencies
        self.logger.info("Start program fragmentation")
        if self.tactic.get('ignore dependencies'):
            self.logger.info(
                "Use memory efficient mode with limitied dependencies extraction"
            )
            memory_efficient_mode = True
        else:
            self.logger.info(
                "Extract full dependencies between files and functions")
            memory_efficient_mode = False
        deps = Program(self.logger,
                       self.clade,
                       self.source_paths,
                       memory_efficient_mode=memory_efficient_mode)

        # Decompose using units
        self.logger.info("Determine units in the target program")
        self._determine_units(deps)

        # Prepare semifinal fragments according to strategy chosen manually
        self.logger.info(
            "Apply corrections of program fragments provided by a user")
        defined_groups = self._do_manual_correction(deps, fragmentation_set)

        # Mark dirs, units, files, functions
        self.logger.info("Select program fragments for verification")
        self._determine_targets(deps)

        # Prepare final optional addiction of fragments if necessary
        self.logger.info(
            "Collect dependencies if necessary for each fragment intended for verification"
        )
        grps = self._add_dependencies(deps)

        # Remove useless duplicates
        for manual in defined_groups:
            fragment = deps.get_fragment(manual)
            if fragment:
                allfiles = set()
                for item in defined_groups[manual]:
                    allfiles.update(item.files)
                fragment.files.difference_update(allfiles)

        # Before describing files add manually defined files
        for group in grps:
            update = True
            while update:
                update = False
                old = set(grps[group][1])
                for fragment in list(grps[group][1]):
                    if not fragment.files:
                        grps[group][1].remove(fragment)
                    grps[group][1].update(
                        defined_groups.get(str(fragment), set()))
                if old.symmetric_difference(grps[group][1]):
                    update = True

        # Prepare program fragments
        self.logger.info("Generate program fragments")
        fragments_files = self.__generate_program_fragments_descriptions(
            deps, grps)

        # Prepare data attributes
        self.logger.info("Prepare data attributes for generated fragments")
        attr_data = self.__prepare_data_files(grps, tactic_name, fset_name)

        # Print fragments
        if self.tactic.get('print fragments'):
            self.__print_fragments(deps)
            for fragment in deps.fragments:
                self.__draw_fragment(fragment)

        return attr_data, fragments_files

    def _determine_units(self, program):
        """
        Implement this function to extract logical components of the particular program. For programs for which nobody
        created a specific strategy, there is no units at all.

        :param program: Program object.
        """
        pass

    def _determine_targets(self, program):
        """
        Determine that program fragments that should be verified. We refer to these fragments as target fragments.

        :param program:
        :return:
        """
        add = set(self.conf.get('targets'))
        if not add:
            raise RuntimeError(
                "Set configuration property 'targets' to specify which functions, files or fragments"
                " you want to verify")
        exclude = set(self.conf.get('exclude targets', set()))

        # Search for files that are already added to several units and mark them as not unique
        self.logger.info(
            'Mark unique files that belong to no more than one fragment')
        summary = set()
        for fragment in program.fragments:
            for file in fragment.files:
                if file not in summary:
                    summary.add(file)
                else:
                    file.unique = False

        files = set()
        self.logger.info(
            "Find files matched by given by the user expressions ('add' configuration properties)"
        )
        new_files, matched = program.get_files_for_expressions(add)
        files.update(new_files)
        add.difference_update(matched)
        if len(add) > 0:
            raise ValueError(
                'Cannot find fragments, files or functions for the following expressions: {}'
                .format(', '.join(add)))
        self.logger.info(
            "Find files matched by given by the user expressions ('exclude' configuration properties)"
        )
        new_files, matched = program.get_files_for_expressions(exclude)
        files.difference_update(new_files)

        for file in files:
            self.logger.debug('Mark file {!r} as a target'.format(file.name))
            file.target = True

    def _do_manual_correction(self, program, fragments_desc):
        """
        According to the fragmentation set configuration we need to change the content of logically extracted units or
        create new ones.

        :param program: Program object.
        :param fragments_desc: Fragmentation set dictionary.
        """
        self.logger.info(
            "Adjust fragments according to the manually provided fragmentation set"
        )
        fragments = fragments_desc.get('fragments', dict())
        remove = set(fragments_desc.get('exclude from all fragments', set()))
        add = set(fragments_desc.get('add to all fragments', set()))
        defined_groups = dict()

        # Collect files
        new = dict()
        for identifier, frags_exprs in ((i, set(e))
                                        for i, e in fragments.items()):
            # First detect fragments and use them at description of manually defined groups
            frags, matched = program.get_fragments(frags_exprs)
            self.logger.debug(
                "Matched as fragments the following expressions for {!r}: {}".
                format(identifier, ', '.join(matched)))
            self_fragment = program.get_fragment(identifier)
            if self_fragment and self_fragment in frags and len(frags) == 1:
                pass
            elif self_fragment and self_fragment in frags:
                frags.remove(self_fragment)
                matched.remove(identifier)
                frags_exprs.difference_update(matched)
                defined_groups[identifier] = frags
            else:
                defined_groups[identifier] = frags

            files, matched = program.get_files_for_expressions(frags_exprs)
            frags_exprs.difference_update(matched)
            if len(frags_exprs) > 0:
                self.logger.warning(
                    'Cannot find fragments, files or functions for the following expressions: {}'
                    .format(', '.join(frags_exprs)))

            new[identifier] = files

        # Find relevant fragments
        all_files = set()
        for files in new.values():
            all_files.update(files)
        relevant_fragments = program.get_fragments_with_files(all_files)

        # Add all
        frags, matched = program.get_fragments(add)
        add.difference_update(matched)
        if frags:
            all_frgs = set(program.fragments).difference(frags)
            for fragment in all_frgs:
                defined_groups.setdefault(str(fragment), set())
                defined_groups[str(fragment)].update(frags)
        addiction, _ = program.get_files_for_expressions(add)

        # Remove all
        # First detect fragments and use them at description of manually defined groups
        frags, matched = program.get_fragments(remove)
        remove.difference_update(matched)
        if matched:
            for manual in defined_groups:
                defined_groups[manual].difference_update(frags)
            for frag in (str(f) for f in frags if str(f) in defined_groups):
                del defined_groups[frag]
        for fragment in frags:
            program.remove_fragment(fragment)

        removal, _ = program.get_files_for_expressions(remove)

        # Remove them
        for fragment in relevant_fragments:
            program.remove_fragment(fragment)

        # Create new fragments
        for name, files in new.items():
            program.create_fragment(name, files, add=True)

        # Do modification
        empty = set()
        for fragment in program.fragments:
            fragment.files.update(addiction)
            fragment.files.difference_update(removal)
            if not fragment.files:
                empty.add(fragment)

        # Remove empty
        for fragment in empty:
            program.remove_fragment(fragment)

        return defined_groups

    def _add_dependencies(self, program):
        """
        After we determined target fragments we may want to add dependent fragments. This should be implemented mostly
        by strategies variants for particular programs.

        :param program: Program object.
        :return: Dictionary with sets of fragments.
        """
        aggregator = Abstract(self.logger, self.conf, self.tactic, program)
        return aggregator.get_groups()

    def __prepare_data_files(self, grps, tactic, fragmentation_set):
        """
        Prepare data files that describe program fragments content.

        :param grps: Dictionary with program fragments with dependencies.
        :param tactic: Name of the tactic.
        :param grps: Name of the fragmentation set.
        :return: Attributes and dict a list of data files.
        """
        data = dict()
        for name, main_and_frgs in grps.items():
            main, frags = main_and_frgs
            data[name] = {
                "files": [
                    make_relative_path(self.source_paths, l.name)
                    for f in frags for l in f.files
                ],
                "size":
                str(sum(int(f.size) for f in frags))
            }

        with open('agregations description.json', 'w', encoding='utf8') as fp:
            ujson.dump(data,
                       fp,
                       sort_keys=True,
                       indent=4,
                       ensure_ascii=False,
                       escape_forward_slashes=False)

        return [{
            'name':
            'Program fragmentation',
            'value': [{
                'name': 'tactic',
                'value': tactic
            }, {
                'name': 'set',
                'value': fragmentation_set
            }]
        }], 'agregations description.json'

    def __get_project_attrs(self):
        """
        Extract attributes that describe the program from the build base storage.
        """
        clade_meta = self.clade.get_meta()

        if 'project attrs' in clade_meta:
            self.project_attrs = clade_meta['project attrs']
        else:
            self.logger.warning("There is no project attributes in build base")

    def __generate_program_fragments_descriptions(self, program, grps):
        """
        Generate json files with descriptions of each program fragment that should be verified.

        :param program: Program object.
        :param grps: Dictionary with program fragments with dependecnies.
        :return: A list of file names.
        """
        files = list()
        for name, grp in grps.items():
            files.append(self.__describe_program_fragment(program, name, grp))
        return files

    def __describe_program_fragment(self, program, name, grp):
        """
        Create the JSON file for the given program fragment with dependencies.

        :param program: Program object.
        :param name: Name of the fragment.
        :param grp: Set of fragments with dependencies.
        :return: The name of the created file.
        """
        # Determine fragment name
        main_fragment, fragments = grp
        self.logger.info('Generate fragment description {!r}'.format(name))

        pf_desc = {
            'id': name,
            'fragment': name,
            'targets': sorted([str(f) for f in main_fragment.target_files]),
            'grps': list(),
            'deps': dict(),
            'size': str(sum((int(f.size) for f in fragments)))
        }

        for frag in fragments:
            fragment_description = {
                'id':
                frag.name,
                'Extra CCs': [{
                    "CC": [file.cmd_id, file.cmd_type],
                    "in file": str(file)
                } for file in frag.files],
                'files':
                sorted(
                    make_relative_path(self.source_paths, str(f))
                    for f in frag.files),
                'abs files':
                sorted(str(f) for f in frag.files)
            }
            pf_desc['grps'].append(fragment_description)
            pf_desc['deps'][frag.name] = [
                succ.name for succ in program.get_fragment_successors(frag)
                if succ in fragments
            ]
        self.logger.debug('Program fragment dependencies are {}'.format(
            pf_desc['deps']))

        pf_desc_file = os.path.join(self.pf_dir, pf_desc['fragment'] + '.json')
        if os.path.isfile(pf_desc_file):
            raise FileExistsError(
                'Program fragment description file {!r} already exists'.format(
                    pf_desc_file))
        self.logger.debug(
            'Dump program fragment description {!r} to file {!r}'.format(
                pf_desc['fragment'], pf_desc_file))
        dir_path = os.path.dirname(pf_desc_file).encode('utf8')
        if dir_path:
            os.makedirs(dir_path, exist_ok=True)

        with open(pf_desc_file, 'w', encoding='utf8') as fp:
            ujson.dump(pf_desc,
                       fp,
                       sort_keys=True,
                       indent=4,
                       ensure_ascii=False,
                       escape_forward_slashes=False)
        return pf_desc_file

    def __print_fragments(self, program):
        """
        Print a graph to illustrate dependencies between all program fragments. For large projects such graph can be
        huge. By default this should be disabled.

        :param program: Program object.
        """
        self.logger.info('Print fragments to working directory {!r}'.format(
            str(os.path.abspath(os.path.curdir))))
        g = Digraph(graph_attr={'rankdir': 'LR'},
                    node_attr={'shape': 'rectangle'})
        for fragment in program.fragments:
            g.node(
                fragment.name, "{}".format(fragment.name) +
                (' (target)' if fragment.target else ''))

        for fragment in program.fragments:
            for suc in program.get_fragment_successors(fragment):
                g.edge(fragment.name, suc.name)
        g.render('program fragments')

    def __draw_fragment(self, fragment):
        """
        Print a graph with files and dependencies between them for a fragment.

        :param fragment: Fragment object.
        """
        g = Digraph(graph_attr={'rankdir': 'LR'},
                    node_attr={'shape': 'rectangle'})
        for file in fragment.files:
            g.node(
                file.name,
                make_relative_path(self.source_paths, file.name) +
                (' (target)' if fragment.target else ''))

        for file in fragment.files:
            for suc in file.successors:
                if suc in fragment.files:
                    g.edge(fragment.name, suc.name)
        if not os.path.exists('fragments'):
            os.makedirs('fragments')
        g.render(os.path.join('fragments', fragment.name))
Exemple #13
0
class Job(klever.core.components.Component):
    CORE_COMPONENTS = [
        'PFG',
        'VTG',
        'VRP'
    ]

    def __init__(self, conf, logger, parent_id, callbacks, mqs, vals, id=None, work_dir=None, attrs=None,
                 separate_from_parent=True, include_child_resources=False, components_common_conf=None):
        super(Job, self).__init__(conf, logger, parent_id, callbacks, mqs, vals, id, work_dir, attrs,
                                  separate_from_parent, include_child_resources)
        self.common_components_conf = components_common_conf

        if work_dir:
            self.common_components_conf['additional sources directory'] = os.path.join(os.path.realpath(work_dir),
                                                                                       'additional sources')

        self.clade = None
        self.components = []
        self.component_processes = []

    def decide_job_or_sub_job(self):
        self.logger.info('Decide job/sub-job "{0}"'.format(self.id))

        # This is required to associate verification results with particular sub-jobs.
        # Skip leading "/" since this identifier is used in os.path.join() that returns absolute path otherwise.
        self.common_components_conf['sub-job identifier'] = self.id[1:]

        self.logger.info('Get specifications set')
        if 'specifications set' in self.common_components_conf:
            spec_set = self.common_components_conf['specifications set']
        else:
            raise KeyError('Specify attribute "specifications set" within job.json')
        self.logger.debug('Specifications set is "{0}"'.format(spec_set))

        # Check that specifications set is supported.
        with open(self.common_components_conf['specifications base'], encoding='utf-8') as fp:
            req_spec_base = json.load(fp)
        spec_set = self.common_components_conf['specifications set']
        if spec_set not in req_spec_base['specification sets']:
            raise ValueError("Klever does not support specifications set {!r} yet, available options are: {}"
                             .format(spec_set, ', '.join(req_spec_base['specification sets'])))

        # Check and set build base here since many Core components need it.
        self.__set_build_base()
        self.clade = Clade(self.common_components_conf['build base'])
        if not self.clade.work_dir_ok():
            raise RuntimeError(f'Build base "{self.common_components_conf["build base"]}" is not OK')

        self.__retrieve_working_src_trees()
        self.__get_original_sources_basic_info()
        self.__upload_original_sources()

        # Create directory where files will be cached and remember absolute path to it for components.
        os.mkdir('cache')
        self.common_components_conf['cache directory'] = os.path.realpath('cache')

        if self.common_components_conf['keep intermediate files']:
            self.logger.debug('Create components configuration file "conf.json"')
            with open('conf.json', 'w', encoding='utf-8') as fp:
                json.dump(self.common_components_conf, fp, ensure_ascii=False, sort_keys=True, indent=4)

        self.__get_job_or_sub_job_components()
        self.callbacks = klever.core.components.get_component_callbacks(self.logger, [type(self)] + self.components)
        self.launch_sub_job_components()

        self.clean_dir = True
        self.logger.info("All components finished")
        if self.conf.get('collect total code coverage', None):
            self.logger.debug('Waiting for a collecting coverage')
            while not self.vals['coverage_finished'].get(self.common_components_conf['sub-job identifier'], True):
                time.sleep(1)
            self.logger.debug("Coverage collected")

    main = decide_job_or_sub_job

    def __set_build_base(self):
        if 'build base' not in self.common_components_conf:
            raise KeyError("Provide 'build base' configuration option to start verification")

        common_advice = 'please, fix "job.json" (attribute "build base")'
        common_advice += ' or/and deployment configuration file (attribute "Klever Build Bases")'

        # Try to find specified build base either in normal way or additionally in directory "build bases" that is
        # convenient to use when working with many build bases.
        try:
            build_base = klever.core.utils.find_file_or_dir(self.logger,
                                                            self.common_components_conf['main working directory'],
                                                            self.common_components_conf['build base'])
        except FileNotFoundError as e:
            self.logger.warning('Failed to find build base:\n{}'.format(traceback.format_exc().rstrip()))
            try:
                build_base = klever.core.utils.find_file_or_dir(
                    self.logger, self.common_components_conf['main working directory'],
                    os.path.join('build bases', self.common_components_conf['build base']))
            except FileNotFoundError as e:
                self.logger.warning('Failed to find build base:\n{}'.format(traceback.format_exc().rstrip()))
                raise FileNotFoundError(
                    'Specified build base "{0}" does not exist, {1}'.format(self.common_components_conf['build base'],
                                                                            common_advice)) from None

        # Extract build base from archive. There should not be any intermediate directories in archives.
        if os.path.isfile(build_base) and (tarfile.is_tarfile(build_base) or zipfile.is_zipfile(build_base)):
            if tarfile.is_tarfile(build_base):
                self.logger.debug('Build base "{0}" is provided in form of TAR archive'.format(build_base))
                with tarfile.open(build_base) as TarFile:
                    TarFile.extractall('build base')
            else:
                self.logger.debug('Build base "{0}" is provided in form of ZIP archive'.format(build_base))
                with zipfile.ZipFile(build_base) as zfp:
                    zfp.extractall('build base')

            # Directory contains extracted build base.
            extracted_from = ' extracted from "{0}"'.format(os.path.realpath(build_base))
            build_base = 'build base'
        else:
            extracted_from = ''

        # We need to specify absolute path to build base since it will be used in different Klever components. Besides,
        # this simplifies troubleshooting.
        build_base = os.path.realpath(build_base)

        # TODO: fix after https://github.com/17451k/clade/issues/108.
        if not os.path.isdir(build_base):
            raise FileExistsError('Build base "{0}" is not a directory, {1}'
                                  .format(build_base, extracted_from, common_advice))

        if not os.path.isfile(os.path.join(build_base, 'meta.json')):
            raise FileExistsError(
                'Directory "{0}"{1} is not a build base since it does not contain file "meta.json", {2}'
                .format(build_base, extracted_from, common_advice))

        self.common_components_conf['build base'] = build_base

        self.logger.debug('Klever components will use build base "{0}"'
                          .format(self.common_components_conf['build base']))

    # Klever will try to cut off either working source trees (if specified) or maximum common paths of CC/CL input files
    # and LD/Link output files (otherwise) from referred file names. Sometimes this is rather optional like for source
    # files referred by error traces, but, say, for program fragment identifiers this is strictly necessary, e.g.
    # because of otherwise expert assessment will not work as expected.
    def __retrieve_working_src_trees(self):
        clade_meta = self.clade.get_meta()

        # Best of all if users specify working source trees in build bases manually themselves. It is a most accurate
        # approach.
        if 'working source trees' in clade_meta:
            work_src_trees = clade_meta['working source trees']
        # Otherwise try to find out them automatically as described above.
        else:
            in_files = []
            for cmd in self.clade.get_all_cmds_by_type("CC") + self.clade.get_all_cmds_by_type("CL"):
                if cmd['in']:
                    for in_file in cmd['in']:
                        # Sometimes some auxiliary stuff is built in addition to normal C source files that are most
                        # likely located in a place we would like to get.
                        if not in_file.startswith('/tmp') and in_file != '/dev/null':
                            in_files.append(os.path.join(cmd['cwd'], in_file))
            in_files_prefix = os.path.dirname(os.path.commonprefix(in_files))
            self.logger.info('Common prefix of CC/CL input files is "{0}"'.format(in_files_prefix))

            out_files = []
            for cmd in self.clade.get_all_cmds_by_type("LD") + self.clade.get_all_cmds_by_type("Link"):
                if cmd['out']:
                    for out_file in cmd['out']:
                        # Like above.
                        if not out_file.startswith('/tmp') and out_file != '/dev/null':
                            out_files.append(os.path.join(cmd['cwd'], out_file))
            out_files_prefix = os.path.dirname(os.path.commonprefix(out_files))
            self.logger.info('Common prefix of LD/Link output files is "{0}"'.format(out_files_prefix))

            # Meaningful paths look like "/dir...".
            meaningful_paths = []
            for path in (in_files_prefix, out_files_prefix):
                if path and path != os.path.sep and path not in meaningful_paths:
                    meaningful_paths.append(path)

            if meaningful_paths:
                work_src_trees = meaningful_paths
            # At least consider build directory as working source tree if the automatic procedure fails.
            else:
                self.logger.warning(
                    'Consider build directory "{0}" as working source tree.'
                    'This may be dangerous and we recommend to specify appropriate working source trees manually!'
                    .format(clade_meta['build_dir']))
                work_src_trees = [clade_meta['build_dir']]

        # Consider minimal path if it is common prefix for other ones. For instance, if we have "/dir1/dir2" and "/dir1"
        # then "/dir1" will become the only working source tree.
        if len(work_src_trees) > 1:
            min_work_src_tree = min(work_src_trees)
            if os.path.commonprefix(work_src_trees) == min_work_src_tree:
                work_src_trees = [min_work_src_tree]

        self.logger.info(
            'Working source trees to be used are as follows:\n{0}'
            .format('\n'.join(['  {0}'.format(t) for t in work_src_trees])))
        self.common_components_conf['working source trees'] = work_src_trees

    def __refer_original_sources(self, src_id):
        klever.core.utils.report(
            self.logger,
            'patch',
            {
                'identifier': self.id,
                'original_sources': src_id
            },
            self.mqs['report files'],
            self.vals['report id'],
            self.conf['main working directory']
        )

    def __process_source_files(self):
        for file_name in self.clade.src_info:
            self.mqs['file names'].put(file_name)

        for i in range(self.workers_num):
            self.mqs['file names'].put(None)

    def __process_source_file(self):
        while True:
            file_name = self.mqs['file names'].get()

            if not file_name:
                return

            src_file_name = klever.core.utils.make_relative_path(self.common_components_conf['working source trees'],
                                                                 file_name)

            if src_file_name != file_name:
                src_file_name = os.path.join('source files', src_file_name)

            new_file_name = os.path.join('original sources', src_file_name.lstrip(os.path.sep))
            os.makedirs(os.path.dirname(new_file_name), exist_ok=True)
            shutil.copy(self.clade.get_storage_path(file_name), new_file_name)

            cross_refs = CrossRefs(self.common_components_conf, self.logger, self.clade,
                                   file_name, new_file_name,
                                   self.common_components_conf['working source trees'], 'source files')
            cross_refs.get_cross_refs()

    def __get_original_sources_basic_info(self):
        self.logger.info('Get information on original sources for following visualization of uncovered source files')

        # For each source file we need to know the total number of lines and places where functions are defined.
        src_files_info = dict()
        for file_name, file_size in self.clade.src_info.items():
            src_file_name = klever.core.utils.make_relative_path(self.common_components_conf['working source trees'],
                                                                 file_name)

            # Skip non-source files.
            if src_file_name == file_name:
                continue

            src_file_name = os.path.join('source files', src_file_name)

            src_files_info[src_file_name] = list()

            # Store source file size.
            src_files_info[src_file_name].append(file_size['loc'])

            # Store source file function definition lines.
            func_def_lines = list()
            funcs = self.clade.get_functions_by_file([file_name], False)

            if funcs:
                for func_name, func_info in list(funcs.values())[0].items():
                    func_def_lines.append(int(func_info['line']))

            src_files_info[src_file_name].append(sorted(func_def_lines))

        # Dump obtain information (huge data!) to load it when reporting total code coverage if everything will be okay.
        with open('original sources basic information.json', 'w') as fp:
            klever.core.utils.json_dump(src_files_info, fp, self.conf['keep intermediate files'])

    def __upload_original_sources(self):
        # Use Clade UUID to distinguish various original sources. It is pretty well since this UUID is uuid.uuid4().
        src_id = self.clade.get_uuid()
        # In addition, take into account a meta content as we like to change it manually often. In this case it may be
        # necessary to re-index the build base. It is not clear if this is the case actually, so, do this in case of
        # any changes in meta.
        src_id += '-' + klever.core.utils.get_file_name_checksum(json.dumps(self.clade.get_meta()))[:12]

        session = klever.core.session.Session(self.logger, self.conf['Klever Bridge'], self.conf['identifier'])

        if session.check_original_sources(src_id):
            self.logger.info('Original sources were uploaded already')
            self.__refer_original_sources(src_id)
            return

        self.logger.info(
            'Cut off working source trees or build directory from original source file names and convert index data')
        os.makedirs('original sources')
        self.mqs['file names'] = multiprocessing.Queue()
        self.workers_num = klever.core.utils.get_parallel_threads_num(self.logger, self.conf)
        subcomponents = [('PSFS', self.__process_source_files)]
        for i in range(self.workers_num):
            subcomponents.append(('PSF', self.__process_source_file))
        self.launch_subcomponents(False, *subcomponents)
        self.mqs['file names'].close()

        self.logger.info('Compress original sources')
        klever.core.utils.ArchiveFiles(['original sources']).make_archive('original sources.zip')

        self.logger.info('Upload original sources')
        try:
            session.upload_original_sources(src_id, 'original sources.zip')
        # Do not fail if there are already original sources. There may be complex data races because of checking and
        # uploading original sources archive are not atomic.
        except klever.core.session.BridgeError:
            if "original sources with this identifier already exists." not in list(session.error.values())[0]:
                raise

        self.__refer_original_sources(src_id)

        if not self.conf['keep intermediate files']:
            shutil.rmtree('original sources')
            os.remove('original sources.zip')

    def __get_job_or_sub_job_components(self):
        self.logger.info('Get components for sub-job "{0}"'.format(self.id))

        self.components = [getattr(importlib.import_module('.{0}'.format(component.lower()), 'klever.core'), component)
                           for component in self.CORE_COMPONENTS]

        self.logger.debug('Components to be launched: "{0}"'.format(
            ', '.join([component.__name__ for component in self.components])))

    def launch_sub_job_components(self):
        """Has callbacks"""
        self.logger.info('Launch components for sub-job "{0}"'.format(self.id))

        for component in self.components:
            p = component(self.common_components_conf, self.logger, self.id, self.callbacks, self.mqs,
                          self.vals, separate_from_parent=True)
            self.component_processes.append(p)

        klever.core.components.launch_workers(self.logger, self.component_processes)
Exemple #14
0
    def weave(self):
        self.abstract_task_desc.setdefault('extra C files', dict())

        search_dirs = klever.core.utils.get_search_dirs(
            self.conf['main working directory'], abs_paths=True)

        clade = Clade(self.conf['build base'])
        if not clade.work_dir_ok():
            raise RuntimeError('Build base is not OK')
        clade_meta = clade.get_meta()

        env = dict(os.environ)
        # Print stubs instead of inline Assembler since verifiers do not interpret it and even can fail.
        env['LDV_INLINE_ASM_STUB'] = ''
        # Get rid of all type qualifiers that are useless for verification most likely, but breaks generation or/and
        # solution of verification tasks from time to time.
        env['LDV_C_BACKEND_OMIT_TYPE_QUALS'] = "1"

        # It would be better to enable it in the development mode, but there is no any specific marker for it, so let's
        # use keeping intermediate files as an indicator.
        if self.conf['keep intermediate files']:
            env['LDV_PRINT_SIGNATURE_OF_MATCHED_BY_NAME'] = "1"

        # Put all extra CC descriptions into the queue prior to launching parallel workers.
        self.extra_ccs = []
        for grp in self.abstract_task_desc['grps']:
            self.logger.info('Weave in C files of group "{0}"'.format(
                grp['id']))

            for extra_cc in grp['Extra CCs']:
                self.extra_ccs.append((grp['id'], extra_cc))

        extra_cc_indexes_queue = multiprocessing.Queue()
        for i in range(len(self.extra_ccs)):
            extra_cc_indexes_queue.put(i)

        extra_cc_indexes_queue.put(None)

        self.logger.info('Start Weaver pull of workers')

        # Here workers will put their results, namely, paths to extra C files.
        vals = {'extra C files': multiprocessing.Manager().list()}

        # Lock to mutually exclude Weaver workers from each other.
        lock = multiprocessing.Manager().Lock()

        def constructor(extra_cc_index):
            weaver_worker = WeaverWorker(
                self.conf,
                self.logger,
                self.id,
                self.callbacks,
                self.mqs,
                vals,
                id=str(extra_cc_index),
                separate_from_parent=False,
                include_child_resources=True,
                search_dirs=search_dirs,
                clade=clade,
                clade_meta=clade_meta,
                env=env,
                grp_id=self.extra_ccs[extra_cc_index][0],
                extra_cc=self.extra_ccs[extra_cc_index][1],
                lock=lock)

            return weaver_worker

        workers_num = klever.core.utils.get_parallel_threads_num(
            self.logger, self.conf, 'Weaving')
        if klever.core.components.launch_queue_workers(self.logger,
                                                       extra_cc_indexes_queue,
                                                       constructor,
                                                       workers_num,
                                                       fail_tolerant=True):
            # One of Weaver workers has failed. We can not set fail_tolerant to False above since if one of Weaver
            # workers fail, killing other ones may result to invalid, infinitely locked cache entries. This can result
            # in deadlocks for other verification tasks (other groups of Weaver workers) that will expect that somebody
            # will fill these cache entries sooner or later. There were not such issues when Weaver operated
            # sequentially.
            # Raising SystemExit allows to avoid useless stack traces in Unknown reports of Weaver.
            raise SystemExit

        self.abstract_task_desc['extra C files'] = list(vals['extra C files'])
        extra_cc_indexes_queue.close()

        # For auxiliary files there is no cross references since it is rather hard to get them from Aspectator. But
        # there still highlighting.
        if self.conf['code coverage details'] == 'All source files':
            for aux_file in glob.glob('*.aux'):
                new_file = os.path.join(
                    'additional sources', 'generated models',
                    os.path.relpath(aux_file,
                                    self.conf['main working directory']))

                os.makedirs(os.path.dirname(new_file), exist_ok=True)
                shutil.copy(aux_file, new_file)

                cross_refs = CrossRefs(self.conf, self.logger, clade, aux_file,
                                       new_file, search_dirs)
                cross_refs.get_cross_refs()

        self.abstract_task_desc['additional sources'] = os.path.relpath('additional sources',
                                                                        self.conf['main working directory']) \
            if os.path.isdir('additional sources') else None

        # Copy additional sources for total code coverage.
        if self.conf['code coverage details'] != 'Original C source files':
            with klever.core.utils.Cd('additional sources'):
                for root, dirs, files in os.walk(os.path.curdir):
                    for file in files:
                        # These files are handled below in addition to corresponding source files.
                        if file.endswith('.json'):
                            continue

                        if self.conf['code coverage details'] == 'C source files including models' \
                                and not file.endswith('.c'):
                            continue

                        file = os.path.join(root, file)
                        new_file = os.path.join(
                            self.conf['additional sources directory'], file)
                        os.makedirs(os.path.dirname(new_file), exist_ok=True)

                        with klever.core.utils.LockedOpen(
                                new_file + '.tmp', 'w'):
                            if os.path.isfile(new_file):
                                # It looks weird but sometimes that file may not exist. Silently ignore that case.
                                try:
                                    os.remove(new_file + '.tmp')
                                except OSError:
                                    pass

                                continue

                            shutil.copy(file, new_file)
                            shutil.copy(file + '.idx.json',
                                        new_file + '.idx.json')

                            os.remove(new_file + '.tmp')

        # These sections won't be refereed any more.
        del (self.abstract_task_desc['grps'])
        del (self.abstract_task_desc['deps'])
Exemple #15
0
class RP(klever.core.components.Component):
    def __init__(self,
                 conf,
                 logger,
                 parent_id,
                 callbacks,
                 mqs,
                 vals,
                 id=None,
                 work_dir=None,
                 attrs=None,
                 separate_from_parent=False,
                 include_child_resources=False,
                 qos_resource_limits=None,
                 source_paths=None,
                 element=None):
        # Read this in a callback
        self.element = element
        self.verdict = None
        self.req_spec_id = None
        self.program_fragment_id = None
        self.task_error = None
        self.source_paths = source_paths
        self.__exception = None
        self.__qos_resource_limit = qos_resource_limits
        # Common initialization
        super(RP, self).__init__(conf, logger, parent_id, callbacks, mqs, vals,
                                 id, work_dir, attrs, separate_from_parent,
                                 include_child_resources)

        self.clean_dir = True
        self.session = klever.core.session.Session(self.logger,
                                                   self.conf['Klever Bridge'],
                                                   self.conf['identifier'])

        # Obtain file prefixes that can be removed from file paths.
        self.clade = Clade(self.conf['build base'])
        if not self.clade.work_dir_ok():
            raise RuntimeError('Build base is not OK')

        self.search_dirs = klever.core.utils.get_search_dirs(
            self.conf['main working directory'], abs_paths=True)

    def fetcher(self):
        self.logger.info("VRP instance is ready to work")
        element = self.element
        status, data = element
        task_id, opts, program_fragment_desc, req_spec_id, verifier, additional_srcs, verification_task_files = data
        self.program_fragment_id = program_fragment_desc['id']
        self.req_spec_id = req_spec_id
        self.results_key = '{}:{}'.format(self.program_fragment_id,
                                          self.req_spec_id)
        self.additional_srcs = additional_srcs
        self.verification_task_files = verification_task_files
        self.logger.debug("Process results of task {}".format(task_id))

        files_list_file = 'files list.txt'
        klever.core.utils.save_program_fragment_description(
            program_fragment_desc, files_list_file)
        klever.core.utils.report(self.logger,
                                 'patch', {
                                     'identifier':
                                     self.id,
                                     'attrs': [{
                                         "name": "Program fragment",
                                         "value": self.program_fragment_id,
                                         "data": files_list_file,
                                         "compare": True,
                                         "associate": True
                                     }, {
                                         "name": "Requirements specification",
                                         "value": req_spec_id,
                                         "compare": True,
                                         "associate": True
                                     }]
                                 },
                                 self.mqs['report files'],
                                 self.vals['report id'],
                                 self.conf['main working directory'],
                                 data_files=[files_list_file])

        # Update solution status
        data = list(self.vals['task solution triples'][self.results_key])
        data[0] = status
        self.vals['task solution triples'][self.results_key] = data

        try:
            if status == 'finished':
                self.process_finished_task(task_id, opts, verifier)
                # Raise exception just here sinse the method above has callbacks.
                if self.__exception:
                    self.logger.warning("Raising the saved exception")
                    raise self.__exception
            elif status == 'error':
                self.process_failed_task(task_id)
                # Raise exception just here sinse the method above has callbacks.
                raise RuntimeError(
                    'Failed to decide verification task: {0}'.format(
                        self.task_error))
            else:
                raise ValueError("Unknown task {!r} status {!r}".format(
                    task_id, status))
        finally:
            self.session.sign_out()

    main = fetcher

    def process_witness(self, witness):
        error_trace, attrs = import_error_trace(self.logger, witness,
                                                self.verification_task_files)
        trimmed_file_names = self.__trim_file_names(error_trace['files'])
        error_trace['files'] = [
            trimmed_file_names[file] for file in error_trace['files']
        ]

        # Distinguish multiple witnesses and error traces by using artificial unique identifiers encoded within witness
        # file names.
        match = re.search(r'witness\.(.+)\.graphml', witness)
        if match:
            error_trace_file = 'error trace {0}.json'.format(match.group(1))
        else:
            error_trace_file = 'error trace.json'

        self.logger.info(
            'Write processed witness to "{0}"'.format(error_trace_file))
        with open(error_trace_file, 'w', encoding='utf8') as fp:
            klever.core.utils.json_dump(error_trace, fp,
                                        self.conf['keep intermediate files'])

        return error_trace_file, attrs

    def report_unsafe(self, error_trace_file, attrs):
        klever.core.utils.report(
            self.logger, 'unsafe', {
                'parent':
                "{}/verification".format(self.id),
                'attrs':
                attrs,
                'error_trace':
                klever.core.utils.ArchiveFiles(
                    [error_trace_file],
                    arcnames={error_trace_file: 'error trace.json'})
            }, self.mqs['report files'], self.vals['report id'],
            self.conf['main working directory'])

    def process_single_verdict(self, decision_results, opts, log_file):
        """The function has a callback that collects verdicts to compare them with the ideal ones."""
        # Parse reports and determine status
        benchexec_reports = glob.glob(os.path.join('output', '*.results.xml'))
        if len(benchexec_reports) != 1:
            raise FileNotFoundError(
                'Expect strictly single BenchExec XML report file, but found {}'
                .format(len(benchexec_reports)))

        # Expect single report file
        with open(benchexec_reports[0], encoding="utf8") as fp:
            result = ElementTree.parse(fp).getroot()

            run = result.findall("run")[0]
            for column in run.iter("column"):
                name, value = [
                    column.attrib.get(name) for name in ("title", "value")
                ]
                if name == "status":
                    decision_results["status"] = value

        # Check that we have set status
        if "status" not in decision_results:
            raise KeyError(
                "There is no solution status in BenchExec XML report")

        self.logger.info('Verification task decision status is "{0}"'.format(
            decision_results['status']))

        # Do not fail immediately in case of witness processing failures that often take place. Otherwise we will
        # not upload all witnesses that can be properly processed as well as information on all such failures.
        # Necessary verificaiton finish report also won't be uploaded causing Bridge to corrupt the whole job.
        if re.search('true', decision_results['status']):
            klever.core.utils.report(
                self.logger,
                'safe',
                {
                    'parent': "{}/verification".format(self.id),
                    'attrs': []
                    # TODO: at the moment it is unclear what are verifier proofs.
                    # 'proof': None
                },
                self.mqs['report files'],
                self.vals['report id'],
                self.conf['main working directory'])
            self.verdict = 'safe'
        else:
            witnesses = glob.glob(os.path.join('output', 'witness.*.graphml'))
            self.logger.info("Found {} witnesses".format(len(witnesses)))

            # Create unsafe reports independently on status. Later we will create unknown report in addition if status
            # is not "unsafe".
            if "expect several witnesses" in opts and opts[
                    "expect several witnesses"] and len(witnesses) != 0:
                self.verdict = 'unsafe'
                for witness in witnesses:
                    try:
                        error_trace_file, attrs = self.process_witness(witness)
                        self.report_unsafe(error_trace_file, attrs)
                    except Exception as e:
                        self.logger.warning(
                            'Failed to process a witness:\n{}'.format(
                                traceback.format_exc().rstrip()))
                        self.verdict = 'non-verifier unknown'

                        if self.__exception:
                            try:
                                raise e from self.__exception
                            except Exception as e:
                                self.__exception = e
                        else:
                            self.__exception = e
            if re.search('false', decision_results['status']) and \
                    ("expect several witnesses" not in opts or not opts["expect several witnesses"]):
                self.verdict = 'unsafe'
                try:
                    if len(witnesses) != 1:
                        NotImplementedError(
                            'Just one witness is supported (but "{0}" are given)'
                            .format(len(witnesses)))

                    error_trace_file, attrs = self.process_witness(
                        witnesses[0])
                    self.report_unsafe(error_trace_file, attrs)
                except Exception as e:
                    self.logger.warning(
                        'Failed to process a witness:\n{}'.format(
                            traceback.format_exc().rstrip()))
                    self.verdict = 'non-verifier unknown'
                    self.__exception = e
            elif not re.search('false', decision_results['status']):
                self.verdict = 'unknown'

                # Prepare file to send it with unknown report.
                os.mkdir('verification')
                verification_problem_desc = os.path.join(
                    'verification', 'problem desc.txt')

                # Check resource limitiations
                if decision_results['status'] in ('OUT OF MEMORY', 'TIMEOUT'):
                    if decision_results['status'] == 'OUT OF MEMORY':
                        msg = "memory exhausted"
                    else:
                        msg = "CPU time exhausted"

                    with open(verification_problem_desc, 'w',
                              encoding='utf8') as fp:
                        fp.write(msg)

                    data = list(
                        self.vals['task solution triples'][self.results_key])
                    data[2] = decision_results['status']
                    self.vals['task solution triples'][self.results_key] = data
                else:
                    os.symlink(os.path.relpath(log_file, 'verification'),
                               verification_problem_desc)

                klever.core.utils.report(
                    self.logger, 'unknown', {
                        'parent':
                        "{}/verification".format(self.id),
                        'attrs': [],
                        'problem_description':
                        klever.core.utils.ArchiveFiles(
                            [verification_problem_desc],
                            {verification_problem_desc: 'problem desc.txt'})
                    }, self.mqs['report files'], self.vals['report id'],
                    self.conf['main working directory'], 'verification')

    def process_failed_task(self, task_id):
        """The function has a callback at Job module."""
        self.task_error = self.session.get_task_error(task_id)
        # We do not need task and its files anymore.
        self.session.remove_task(task_id)

        self.verdict = 'non-verifier unknown'

    def process_finished_task(self, task_id, opts, verifier):
        """Function has a callback at Job.py."""
        self.session.download_decision(task_id)

        with zipfile.ZipFile('decision result files.zip') as zfp:
            zfp.extractall()

        with open('decision results.json', encoding='utf8') as fp:
            decision_results = json.load(fp)

        # TODO: specify the computer where the verifier was invoked (this information should be get from BenchExec or VerifierCloud web client.
        log_files_dir = glob.glob(os.path.join('output',
                                               'benchmark*logfiles'))[0]
        log_files = os.listdir(log_files_dir)

        if len(log_files) != 1:
            raise NotImplementedError(
                'Exactly one log file should be outputted (but "{0}" are given)'
                .format(len(log_files)))

        log_file = os.path.join(log_files_dir, log_files[0])

        # Send an initial report
        report = {
            'identifier': "{}/verification".format(self.id),
            'parent': self.id,
            # TODO: replace with something meaningful, e.g. tool name + tool version + tool configuration.
            'attrs': [],
            'component': verifier,
            'wall_time': decision_results['resources']['wall time'],
            'cpu_time': decision_results['resources']['CPU time'],
            'memory': decision_results['resources']['memory size'],
            'original_sources': self.clade.get_uuid()
        }

        if self.additional_srcs:
            report['additional_sources'] = klever.core.utils.ArchiveFiles([
                os.path.join(self.conf['main working directory'],
                             self.additional_srcs)
            ])

        # Get coverage
        coverage_info_dir = os.path.join('total coverages',
                                         self.conf['sub-job identifier'],
                                         self.req_spec_id.replace('/', '-'))
        os.makedirs(os.path.join(self.conf['main working directory'],
                                 coverage_info_dir),
                    exist_ok=True)

        self.coverage_info_file = os.path.join(
            coverage_info_dir,
            "{0}_coverage_info.json".format(task_id.replace('/', '-')))

        # Update solution progress. It is necessary to update the whole list to sync changes
        data = list(self.vals['task solution triples'][self.results_key])
        data[1] = decision_results['resources']
        self.vals['task solution triples'][self.results_key] = data

        if not self.logger.disabled and log_file:
            report['log'] = klever.core.utils.ArchiveFiles(
                [log_file], {log_file: 'log.txt'})

        if self.conf['upload verifier input files']:
            report['task'] = task_id

        # Remember exception and raise it if verdict is not unknown
        exception = None
        if opts['code coverage details'] != "None":
            try:
                LCOV(
                    self.conf, self.logger,
                    os.path.join('output', 'coverage.info'), self.clade,
                    self.source_paths, self.search_dirs,
                    self.conf['main working directory'],
                    opts['code coverage details'],
                    os.path.join(self.conf['main working directory'],
                                 self.coverage_info_file),
                    os.path.join(self.conf['main working directory'],
                                 coverage_info_dir),
                    self.verification_task_files)
            except Exception as err:
                exception = err
            else:
                report['coverage'] = klever.core.utils.ArchiveFiles(
                    ['coverage'])
                self.vals['coverage_finished'][
                    self.conf['sub-job identifier']] = False

        # todo: This should be checked to guarantee that we can reschedule tasks
        klever.core.utils.report(self.logger, 'verification', report,
                                 self.mqs['report files'],
                                 self.vals['report id'],
                                 self.conf['main working directory'])

        try:
            # Submit a verdict
            self.process_single_verdict(decision_results, opts, log_file)
        finally:
            # Submit a closing report
            klever.core.utils.report(self.logger, 'verification finish',
                                     {'identifier': report['identifier']},
                                     self.mqs['report files'],
                                     self.vals['report id'],
                                     self.conf['main working directory'])

        # Check verdict
        if exception and self.verdict != 'unknown':
            raise exception
        elif exception:
            self.logger.exception('Could not parse coverage')

    def __trim_file_names(self, file_names):
        trimmed_file_names = {}

        for file_name in file_names:
            # Remove storage from file names if files were put there.
            storage_file = klever.core.utils.make_relative_path(
                [self.clade.storage_dir], file_name)
            # Caller expects a returned dictionary maps each file name, so, let's fill it anyway.
            trimmed_file_names[file_name] = storage_file
            # Try to make paths relative to source paths or standard search directories.
            tmp = klever.core.utils.make_relative_path(self.source_paths,
                                                       storage_file,
                                                       absolutize=True)

            # Append special directory name "source files" when cutting off source file names.
            if tmp != os.path.join(os.path.sep, storage_file):
                trimmed_file_names[file_name] = os.path.join(
                    'source files', tmp)
            else:
                # Like in klever.core.vtg.weaver.Weaver#weave.
                tmp = klever.core.utils.make_relative_path(self.search_dirs,
                                                           storage_file,
                                                           absolutize=True)
                if tmp != os.path.join(os.path.sep, storage_file):
                    if tmp.startswith('specifications'):
                        trimmed_file_names[file_name] = tmp
                    else:
                        trimmed_file_names[file_name] = os.path.join(
                            'generated models', tmp)

        return trimmed_file_names