Пример #1
0
class Job(klever.core.components.Component):
    CORE_COMPONENTS = [
        'PFG',
        'VTG',
        'VRP'
    ]

    def __init__(self, conf, logger, parent_id, callbacks, mqs, vals, id=None, work_dir=None, attrs=None,
                 separate_from_parent=True, include_child_resources=False, components_common_conf=None):
        super(Job, self).__init__(conf, logger, parent_id, callbacks, mqs, vals, id, work_dir, attrs,
                                  separate_from_parent, include_child_resources)
        self.common_components_conf = components_common_conf

        if work_dir:
            self.common_components_conf['additional sources directory'] = os.path.join(os.path.realpath(work_dir),
                                                                                       'additional sources')

        self.clade = None
        self.components = []
        self.component_processes = []

    def decide_job_or_sub_job(self):
        self.logger.info('Decide job/sub-job "{0}"'.format(self.id))

        # This is required to associate verification results with particular sub-jobs.
        # Skip leading "/" since this identifier is used in os.path.join() that returns absolute path otherwise.
        self.common_components_conf['sub-job identifier'] = self.id[1:]

        self.logger.info('Get specifications set')
        if 'specifications set' in self.common_components_conf:
            spec_set = self.common_components_conf['specifications set']
        else:
            raise KeyError('Specify attribute "specifications set" within job.json')
        self.logger.debug('Specifications set is "{0}"'.format(spec_set))

        # Check that specifications set is supported.
        with open(self.common_components_conf['specifications base'], encoding='utf-8') as fp:
            req_spec_base = json.load(fp)
        spec_set = self.common_components_conf['specifications set']
        if spec_set not in req_spec_base['specification sets']:
            raise ValueError("Klever does not support specifications set {!r} yet, available options are: {}"
                             .format(spec_set, ', '.join(req_spec_base['specification sets'])))

        # Check and set build base here since many Core components need it.
        self.__set_build_base()
        self.clade = Clade(self.common_components_conf['build base'])
        if not self.clade.work_dir_ok():
            raise RuntimeError(f'Build base "{self.common_components_conf["build base"]}" is not OK')

        self.__retrieve_working_src_trees()
        self.__get_original_sources_basic_info()
        self.__upload_original_sources()

        # Create directory where files will be cached and remember absolute path to it for components.
        os.mkdir('cache')
        self.common_components_conf['cache directory'] = os.path.realpath('cache')

        if self.common_components_conf['keep intermediate files']:
            self.logger.debug('Create components configuration file "conf.json"')
            with open('conf.json', 'w', encoding='utf-8') as fp:
                json.dump(self.common_components_conf, fp, ensure_ascii=False, sort_keys=True, indent=4)

        self.__get_job_or_sub_job_components()
        self.callbacks = klever.core.components.get_component_callbacks(self.logger, [type(self)] + self.components)
        self.launch_sub_job_components()

        self.clean_dir = True
        self.logger.info("All components finished")
        if self.conf.get('collect total code coverage', None):
            self.logger.debug('Waiting for a collecting coverage')
            while not self.vals['coverage_finished'].get(self.common_components_conf['sub-job identifier'], True):
                time.sleep(1)
            self.logger.debug("Coverage collected")

    main = decide_job_or_sub_job

    def __set_build_base(self):
        if 'build base' not in self.common_components_conf:
            raise KeyError("Provide 'build base' configuration option to start verification")

        common_advice = 'please, fix "job.json" (attribute "build base")'
        common_advice += ' or/and deployment configuration file (attribute "Klever Build Bases")'

        # Try to find specified build base either in normal way or additionally in directory "build bases" that is
        # convenient to use when working with many build bases.
        try:
            build_base = klever.core.utils.find_file_or_dir(self.logger,
                                                            self.common_components_conf['main working directory'],
                                                            self.common_components_conf['build base'])
        except FileNotFoundError as e:
            self.logger.warning('Failed to find build base:\n{}'.format(traceback.format_exc().rstrip()))
            try:
                build_base = klever.core.utils.find_file_or_dir(
                    self.logger, self.common_components_conf['main working directory'],
                    os.path.join('build bases', self.common_components_conf['build base']))
            except FileNotFoundError as e:
                self.logger.warning('Failed to find build base:\n{}'.format(traceback.format_exc().rstrip()))
                raise FileNotFoundError(
                    'Specified build base "{0}" does not exist, {1}'.format(self.common_components_conf['build base'],
                                                                            common_advice)) from None

        # Extract build base from archive. There should not be any intermediate directories in archives.
        if os.path.isfile(build_base) and (tarfile.is_tarfile(build_base) or zipfile.is_zipfile(build_base)):
            if tarfile.is_tarfile(build_base):
                self.logger.debug('Build base "{0}" is provided in form of TAR archive'.format(build_base))
                with tarfile.open(build_base) as TarFile:
                    TarFile.extractall('build base')
            else:
                self.logger.debug('Build base "{0}" is provided in form of ZIP archive'.format(build_base))
                with zipfile.ZipFile(build_base) as zfp:
                    zfp.extractall('build base')

            # Directory contains extracted build base.
            extracted_from = ' extracted from "{0}"'.format(os.path.realpath(build_base))
            build_base = 'build base'
        else:
            extracted_from = ''

        # We need to specify absolute path to build base since it will be used in different Klever components. Besides,
        # this simplifies troubleshooting.
        build_base = os.path.realpath(build_base)

        # TODO: fix after https://github.com/17451k/clade/issues/108.
        if not os.path.isdir(build_base):
            raise FileExistsError('Build base "{0}" is not a directory, {1}'
                                  .format(build_base, extracted_from, common_advice))

        if not os.path.isfile(os.path.join(build_base, 'meta.json')):
            raise FileExistsError(
                'Directory "{0}"{1} is not a build base since it does not contain file "meta.json", {2}'
                .format(build_base, extracted_from, common_advice))

        self.common_components_conf['build base'] = build_base

        self.logger.debug('Klever components will use build base "{0}"'
                          .format(self.common_components_conf['build base']))

    # Klever will try to cut off either working source trees (if specified) or maximum common paths of CC/CL input files
    # and LD/Link output files (otherwise) from referred file names. Sometimes this is rather optional like for source
    # files referred by error traces, but, say, for program fragment identifiers this is strictly necessary, e.g.
    # because of otherwise expert assessment will not work as expected.
    def __retrieve_working_src_trees(self):
        clade_meta = self.clade.get_meta()

        # Best of all if users specify working source trees in build bases manually themselves. It is a most accurate
        # approach.
        if 'working source trees' in clade_meta:
            work_src_trees = clade_meta['working source trees']
        # Otherwise try to find out them automatically as described above.
        else:
            in_files = []
            for cmd in self.clade.get_all_cmds_by_type("CC") + self.clade.get_all_cmds_by_type("CL"):
                if cmd['in']:
                    for in_file in cmd['in']:
                        # Sometimes some auxiliary stuff is built in addition to normal C source files that are most
                        # likely located in a place we would like to get.
                        if not in_file.startswith('/tmp') and in_file != '/dev/null':
                            in_files.append(os.path.join(cmd['cwd'], in_file))
            in_files_prefix = os.path.dirname(os.path.commonprefix(in_files))
            self.logger.info('Common prefix of CC/CL input files is "{0}"'.format(in_files_prefix))

            out_files = []
            for cmd in self.clade.get_all_cmds_by_type("LD") + self.clade.get_all_cmds_by_type("Link"):
                if cmd['out']:
                    for out_file in cmd['out']:
                        # Like above.
                        if not out_file.startswith('/tmp') and out_file != '/dev/null':
                            out_files.append(os.path.join(cmd['cwd'], out_file))
            out_files_prefix = os.path.dirname(os.path.commonprefix(out_files))
            self.logger.info('Common prefix of LD/Link output files is "{0}"'.format(out_files_prefix))

            # Meaningful paths look like "/dir...".
            meaningful_paths = []
            for path in (in_files_prefix, out_files_prefix):
                if path and path != os.path.sep and path not in meaningful_paths:
                    meaningful_paths.append(path)

            if meaningful_paths:
                work_src_trees = meaningful_paths
            # At least consider build directory as working source tree if the automatic procedure fails.
            else:
                self.logger.warning(
                    'Consider build directory "{0}" as working source tree.'
                    'This may be dangerous and we recommend to specify appropriate working source trees manually!'
                    .format(clade_meta['build_dir']))
                work_src_trees = [clade_meta['build_dir']]

        # Consider minimal path if it is common prefix for other ones. For instance, if we have "/dir1/dir2" and "/dir1"
        # then "/dir1" will become the only working source tree.
        if len(work_src_trees) > 1:
            min_work_src_tree = min(work_src_trees)
            if os.path.commonprefix(work_src_trees) == min_work_src_tree:
                work_src_trees = [min_work_src_tree]

        self.logger.info(
            'Working source trees to be used are as follows:\n{0}'
            .format('\n'.join(['  {0}'.format(t) for t in work_src_trees])))
        self.common_components_conf['working source trees'] = work_src_trees

    def __refer_original_sources(self, src_id):
        klever.core.utils.report(
            self.logger,
            'patch',
            {
                'identifier': self.id,
                'original_sources': src_id
            },
            self.mqs['report files'],
            self.vals['report id'],
            self.conf['main working directory']
        )

    def __process_source_files(self):
        for file_name in self.clade.src_info:
            self.mqs['file names'].put(file_name)

        for i in range(self.workers_num):
            self.mqs['file names'].put(None)

    def __process_source_file(self):
        while True:
            file_name = self.mqs['file names'].get()

            if not file_name:
                return

            src_file_name = klever.core.utils.make_relative_path(self.common_components_conf['working source trees'],
                                                                 file_name)

            if src_file_name != file_name:
                src_file_name = os.path.join('source files', src_file_name)

            new_file_name = os.path.join('original sources', src_file_name.lstrip(os.path.sep))
            os.makedirs(os.path.dirname(new_file_name), exist_ok=True)
            shutil.copy(self.clade.get_storage_path(file_name), new_file_name)

            cross_refs = CrossRefs(self.common_components_conf, self.logger, self.clade,
                                   file_name, new_file_name,
                                   self.common_components_conf['working source trees'], 'source files')
            cross_refs.get_cross_refs()

    def __get_original_sources_basic_info(self):
        self.logger.info('Get information on original sources for following visualization of uncovered source files')

        # For each source file we need to know the total number of lines and places where functions are defined.
        src_files_info = dict()
        for file_name, file_size in self.clade.src_info.items():
            src_file_name = klever.core.utils.make_relative_path(self.common_components_conf['working source trees'],
                                                                 file_name)

            # Skip non-source files.
            if src_file_name == file_name:
                continue

            src_file_name = os.path.join('source files', src_file_name)

            src_files_info[src_file_name] = list()

            # Store source file size.
            src_files_info[src_file_name].append(file_size['loc'])

            # Store source file function definition lines.
            func_def_lines = list()
            funcs = self.clade.get_functions_by_file([file_name], False)

            if funcs:
                for func_name, func_info in list(funcs.values())[0].items():
                    func_def_lines.append(int(func_info['line']))

            src_files_info[src_file_name].append(sorted(func_def_lines))

        # Dump obtain information (huge data!) to load it when reporting total code coverage if everything will be okay.
        with open('original sources basic information.json', 'w') as fp:
            klever.core.utils.json_dump(src_files_info, fp, self.conf['keep intermediate files'])

    def __upload_original_sources(self):
        # Use Clade UUID to distinguish various original sources. It is pretty well since this UUID is uuid.uuid4().
        src_id = self.clade.get_uuid()
        # In addition, take into account a meta content as we like to change it manually often. In this case it may be
        # necessary to re-index the build base. It is not clear if this is the case actually, so, do this in case of
        # any changes in meta.
        src_id += '-' + klever.core.utils.get_file_name_checksum(json.dumps(self.clade.get_meta()))[:12]

        session = klever.core.session.Session(self.logger, self.conf['Klever Bridge'], self.conf['identifier'])

        if session.check_original_sources(src_id):
            self.logger.info('Original sources were uploaded already')
            self.__refer_original_sources(src_id)
            return

        self.logger.info(
            'Cut off working source trees or build directory from original source file names and convert index data')
        os.makedirs('original sources')
        self.mqs['file names'] = multiprocessing.Queue()
        self.workers_num = klever.core.utils.get_parallel_threads_num(self.logger, self.conf)
        subcomponents = [('PSFS', self.__process_source_files)]
        for i in range(self.workers_num):
            subcomponents.append(('PSF', self.__process_source_file))
        self.launch_subcomponents(False, *subcomponents)
        self.mqs['file names'].close()

        self.logger.info('Compress original sources')
        klever.core.utils.ArchiveFiles(['original sources']).make_archive('original sources.zip')

        self.logger.info('Upload original sources')
        try:
            session.upload_original_sources(src_id, 'original sources.zip')
        # Do not fail if there are already original sources. There may be complex data races because of checking and
        # uploading original sources archive are not atomic.
        except klever.core.session.BridgeError:
            if "original sources with this identifier already exists." not in list(session.error.values())[0]:
                raise

        self.__refer_original_sources(src_id)

        if not self.conf['keep intermediate files']:
            shutil.rmtree('original sources')
            os.remove('original sources.zip')

    def __get_job_or_sub_job_components(self):
        self.logger.info('Get components for sub-job "{0}"'.format(self.id))

        self.components = [getattr(importlib.import_module('.{0}'.format(component.lower()), 'klever.core'), component)
                           for component in self.CORE_COMPONENTS]

        self.logger.debug('Components to be launched: "{0}"'.format(
            ', '.join([component.__name__ for component in self.components])))

    def launch_sub_job_components(self):
        """Has callbacks"""
        self.logger.info('Launch components for sub-job "{0}"'.format(self.id))

        for component in self.components:
            p = component(self.common_components_conf, self.logger, self.id, self.callbacks, self.mqs,
                          self.vals, separate_from_parent=True)
            self.component_processes.append(p)

        klever.core.components.launch_workers(self.logger, self.component_processes)
Пример #2
0
class Job(klever.core.components.Component):
    CORE_COMPONENTS = ['PFG', 'VTG', 'VRP']

    def __init__(self,
                 conf,
                 logger,
                 parent_id,
                 callbacks,
                 mqs,
                 vals,
                 id=None,
                 work_dir=None,
                 attrs=None,
                 separate_from_parent=True,
                 include_child_resources=False,
                 components_common_conf=None):
        super(Job,
              self).__init__(conf, logger, parent_id, callbacks, mqs, vals, id,
                             work_dir, attrs, separate_from_parent,
                             include_child_resources)
        self.common_components_conf = components_common_conf

        if work_dir:
            self.common_components_conf[
                'additional sources directory'] = os.path.join(
                    os.path.realpath(work_dir), 'additional sources')

        self.clade = None
        self.components = []
        self.component_processes = []

    def decide_job_or_sub_job(self):
        self.logger.info('Decide job/sub-job "{0}"'.format(self.id))

        # This is required to associate verification results with particular sub-jobs.
        # Skip leading "/" since this identifier is used in os.path.join() that returns absolute path otherwise.
        self.common_components_conf['sub-job identifier'] = self.id[1:]

        # Check and set build base here since many Core components need it.
        self.__set_build_base()
        self.clade = Clade(self.common_components_conf['build base'])
        if not self.clade.work_dir_ok():
            raise RuntimeError('Build base is not OK')

        self.__retrieve_working_src_trees()
        self.__get_original_sources_basic_info()
        self.__upload_original_sources()

        # Create directory where files will be cached and remember absolute path to it for components.
        os.mkdir('cache')
        self.common_components_conf['cache directory'] = os.path.realpath(
            'cache')

        if self.common_components_conf['keep intermediate files']:
            self.logger.debug(
                'Create components configuration file "conf.json"')
            with open('conf.json', 'w', encoding='utf8') as fp:
                json.dump(self.common_components_conf,
                          fp,
                          ensure_ascii=False,
                          sort_keys=True,
                          indent=4)

        self.__get_job_or_sub_job_components()
        self.callbacks = klever.core.components.get_component_callbacks(
            self.logger, [type(self)] + self.components)
        self.launch_sub_job_components()

        self.clean_dir = True
        self.logger.info("All components finished")
        if self.conf.get('collect total code coverage', None):
            self.logger.debug('Waiting for a collecting coverage')
            while not self.vals['coverage_finished'].get(
                    self.common_components_conf['sub-job identifier'], True):
                time.sleep(1)
            self.logger.debug("Coverage collected")

    main = decide_job_or_sub_job

    def __set_build_base(self):
        if 'build base' not in self.common_components_conf:
            raise KeyError(
                "Provide 'build base' configuration option to start verification"
            )

        common_advice = 'please, fix "job.json" (attribute "build base")'
        common_advice += ' or/and deployment configuration file (attribute "Klever Build Bases")'

        # Try to find specified build base either in normal way or additionally in directory "build bases" that is
        # convenient to use when working with many build bases.
        try:
            build_base = klever.core.utils.find_file_or_dir(
                self.logger, os.path.curdir,
                self.common_components_conf['build base'])
        except FileNotFoundError:
            try:
                build_base = klever.core.utils.find_file_or_dir(
                    self.logger, os.path.curdir,
                    os.path.join('build bases',
                                 self.common_components_conf['build base']))
            except FileNotFoundError:
                raise FileNotFoundError(
                    'Specified build base "{0}" does not exist, {1}'.format(
                        self.common_components_conf['build base'],
                        common_advice)) from None

        # Extract build base from archive. There should not be any intermediate directories in archives.
        if os.path.isfile(build_base) and (tarfile.is_tarfile(build_base)
                                           or zipfile.is_zipfile(build_base)):
            if tarfile.is_tarfile(build_base):
                self.logger.debug(
                    'Build base "{0}" is provided in form of TAR archive'.
                    format(build_base))
                with tarfile.open(build_base) as TarFile:
                    TarFile.extractall('build base')
            else:
                self.logger.debug(
                    'Build base "{0}" is provided in form of ZIP archive'.
                    format(build_base))
                with zipfile.ZipFile(build_base) as zfp:
                    zfp.extractall('build base')

            # Directory contains extracted build base.
            extracted_from = ' extracted from "{0}"'.format(
                os.path.realpath(build_base))
            build_base = 'build base'
        else:
            extracted_from = ''

        # We need to specify absolute path to build base since it will be used in different Klever components. Besides,
        # this simplifies troubleshooting.
        build_base = os.path.realpath(build_base)

        # TODO: fix after https://github.com/17451k/clade/issues/108.
        if not os.path.isdir(build_base):
            raise FileExistsError(
                'Build base "{0}" is not a directory, {1}'.format(
                    build_base, extracted_from, common_advice))

        if not os.path.isfile(os.path.join(build_base, 'meta.json')):
            raise FileExistsError(
                'Directory "{0}"{1} is not a build base since it does not contain file "meta.json", {2}'
                .format(build_base, extracted_from, common_advice))

        self.common_components_conf['build base'] = build_base

        self.logger.debug('Klever components will use build base "{0}"'.format(
            self.common_components_conf['build base']))

    # Klever will try to cut off either working source trees (if specified) or at least build directory (otherwise)
    # from referred file names. Sometimes this is rather optional like for source files referred by error traces, but,
    # say, for program fragment identifiers this is strictly necessary, e.g. because of otherwise expert assessment will
    # not work as expected.
    def __retrieve_working_src_trees(self):
        clade_meta = self.clade.get_meta()
        self.common_components_conf['working source trees'] = clade_meta['working source trees'] \
            if 'working source trees' in clade_meta else [clade_meta['build_dir']]

    def __refer_original_sources(self, src_id):
        klever.core.utils.report(self.logger, 'patch', {
            'identifier': self.id,
            'original_sources': src_id
        }, self.mqs['report files'], self.vals['report id'],
                                 self.conf['main working directory'])

    def __process_source_files(self):
        for file_name in self.clade.src_info:
            self.mqs['file names'].put(file_name)

        for i in range(self.workers_num):
            self.mqs['file names'].put(None)

    def __process_source_file(self):
        while True:
            file_name = self.mqs['file names'].get()

            if not file_name:
                return

            src_file_name = klever.core.utils.make_relative_path(
                self.common_components_conf['working source trees'], file_name)

            if src_file_name != file_name:
                src_file_name = os.path.join('source files', src_file_name)

            new_file_name = os.path.join('original sources',
                                         src_file_name.lstrip(os.path.sep))
            os.makedirs(os.path.dirname(new_file_name), exist_ok=True)
            shutil.copy(self.clade.get_storage_path(file_name), new_file_name)

            cross_refs = CrossRefs(
                self.common_components_conf, self.logger, self.clade,
                file_name, new_file_name,
                self.common_components_conf['working source trees'],
                'source files')
            cross_refs.get_cross_refs()

    def __get_original_sources_basic_info(self):
        self.logger.info(
            'Get information on original sources for following visualization of uncovered source files'
        )

        # For each source file we need to know the total number of lines and places where functions are defined.
        src_files_info = dict()
        for file_name, file_size in self.clade.src_info.items():
            src_file_name = klever.core.utils.make_relative_path(
                self.common_components_conf['working source trees'], file_name)

            # Skip non-source files.
            if src_file_name == file_name:
                continue

            src_file_name = os.path.join('source files', src_file_name)

            src_files_info[src_file_name] = list()

            # Store source file size.
            src_files_info[src_file_name].append(file_size['loc'])

            # Store source file function definition lines.
            func_def_lines = list()
            funcs = self.clade.get_functions_by_file([file_name], False)

            if funcs:
                for func_name, func_info in list(funcs.values())[0].items():
                    func_def_lines.append(int(func_info['line']))

            src_files_info[src_file_name].append(sorted(func_def_lines))

        # Dump obtain information (huge data!) to load it when reporting total code coverage if everything will be okay.
        with open('original sources basic information.json', 'w') as fp:
            klever.core.utils.json_dump(src_files_info, fp,
                                        self.conf['keep intermediate files'])

    def __upload_original_sources(self):
        # Use Clade UUID to distinguish various original sources. It is pretty well since this UUID is uuid.uuid4().
        src_id = self.clade.get_uuid()

        session = klever.core.session.Session(self.logger,
                                              self.conf['Klever Bridge'],
                                              self.conf['identifier'])

        if session.check_original_sources(src_id):
            self.logger.info('Original sources were uploaded already')
            self.__refer_original_sources(src_id)
            return

        self.logger.info(
            'Cut off working source trees or build directory from original source file names and convert index data'
        )
        os.makedirs('original sources')
        self.mqs['file names'] = multiprocessing.Queue()
        self.workers_num = klever.core.utils.get_parallel_threads_num(
            self.logger, self.conf)
        subcomponents = [('PSFS', self.__process_source_files)]
        for i in range(self.workers_num):
            subcomponents.append(('RSF', self.__process_source_file))
        self.launch_subcomponents(False, *subcomponents)
        self.mqs['file names'].close()

        self.logger.info('Compress original sources')
        klever.core.utils.ArchiveFiles(['original sources'
                                        ]).make_archive('original sources.zip')

        self.logger.info('Upload original sources')
        try:
            session.upload_original_sources(src_id, 'original sources.zip')
        # Do not fail if there are already original sources. There may be complex data races because of checking and
        # uploading original sources archive are not atomic.
        except klever.core.session.BridgeError:
            if "original sources with this identifier already exists." not in list(
                    session.error.values())[0]:
                raise

        self.__refer_original_sources(src_id)

        if not self.conf['keep intermediate files']:
            shutil.rmtree('original sources')
            os.remove('original sources.zip')

    def __get_job_or_sub_job_components(self):
        self.logger.info('Get components for sub-job "{0}"'.format(self.id))

        self.components = [
            getattr(
                importlib.import_module('.{0}'.format(component.lower()),
                                        'klever.core'), component)
            for component in self.CORE_COMPONENTS
        ]

        self.logger.debug('Components to be launched: "{0}"'.format(', '.join(
            [component.__name__ for component in self.components])))

    def launch_sub_job_components(self):
        """Has callbacks"""
        self.logger.info('Launch components for sub-job "{0}"'.format(self.id))

        for component in self.components:
            p = component(self.common_components_conf,
                          self.logger,
                          self.id,
                          self.callbacks,
                          self.mqs,
                          self.vals,
                          separate_from_parent=True)
            self.component_processes.append(p)

        klever.core.components.launch_workers(self.logger,
                                              self.component_processes)
Пример #3
0
class RP(klever.core.components.Component):
    def __init__(self,
                 conf,
                 logger,
                 parent_id,
                 callbacks,
                 mqs,
                 vals,
                 id=None,
                 work_dir=None,
                 attrs=None,
                 separate_from_parent=False,
                 include_child_resources=False,
                 qos_resource_limits=None,
                 source_paths=None,
                 element=None):
        # Read this in a callback
        self.element = element
        self.verdict = None
        self.req_spec_id = None
        self.program_fragment_id = None
        self.task_error = None
        self.source_paths = source_paths
        self.__exception = None
        self.__qos_resource_limit = qos_resource_limits
        # Common initialization
        super(RP, self).__init__(conf, logger, parent_id, callbacks, mqs, vals,
                                 id, work_dir, attrs, separate_from_parent,
                                 include_child_resources)

        self.clean_dir = True
        self.session = klever.core.session.Session(self.logger,
                                                   self.conf['Klever Bridge'],
                                                   self.conf['identifier'])

        # Obtain file prefixes that can be removed from file paths.
        self.clade = Clade(self.conf['build base'])
        if not self.clade.work_dir_ok():
            raise RuntimeError('Build base is not OK')

        self.search_dirs = klever.core.utils.get_search_dirs(
            self.conf['main working directory'], abs_paths=True)

    def fetcher(self):
        self.logger.info("VRP instance is ready to work")
        element = self.element
        status, data = element
        task_id, opts, program_fragment_desc, req_spec_id, verifier, additional_srcs, verification_task_files = data
        self.program_fragment_id = program_fragment_desc['id']
        self.req_spec_id = req_spec_id
        self.results_key = '{}:{}'.format(self.program_fragment_id,
                                          self.req_spec_id)
        self.additional_srcs = additional_srcs
        self.verification_task_files = verification_task_files
        self.logger.debug("Process results of task {}".format(task_id))

        files_list_file = 'files list.txt'
        klever.core.utils.save_program_fragment_description(
            program_fragment_desc, files_list_file)
        klever.core.utils.report(self.logger,
                                 'patch', {
                                     'identifier':
                                     self.id,
                                     'attrs': [{
                                         "name": "Program fragment",
                                         "value": self.program_fragment_id,
                                         "data": files_list_file,
                                         "compare": True,
                                         "associate": True
                                     }, {
                                         "name": "Requirements specification",
                                         "value": req_spec_id,
                                         "compare": True,
                                         "associate": True
                                     }]
                                 },
                                 self.mqs['report files'],
                                 self.vals['report id'],
                                 self.conf['main working directory'],
                                 data_files=[files_list_file])

        # Update solution status
        data = list(self.vals['task solution triples'][self.results_key])
        data[0] = status
        self.vals['task solution triples'][self.results_key] = data

        try:
            if status == 'finished':
                self.process_finished_task(task_id, opts, verifier)
                # Raise exception just here sinse the method above has callbacks.
                if self.__exception:
                    self.logger.warning("Raising the saved exception")
                    raise self.__exception
            elif status == 'error':
                self.process_failed_task(task_id)
                # Raise exception just here sinse the method above has callbacks.
                raise RuntimeError(
                    'Failed to decide verification task: {0}'.format(
                        self.task_error))
            else:
                raise ValueError("Unknown task {!r} status {!r}".format(
                    task_id, status))
        finally:
            self.session.sign_out()

    main = fetcher

    def process_witness(self, witness):
        error_trace, attrs = import_error_trace(self.logger, witness,
                                                self.verification_task_files)
        trimmed_file_names = self.__trim_file_names(error_trace['files'])
        error_trace['files'] = [
            trimmed_file_names[file] for file in error_trace['files']
        ]

        # Distinguish multiple witnesses and error traces by using artificial unique identifiers encoded within witness
        # file names.
        match = re.search(r'witness\.(.+)\.graphml', witness)
        if match:
            error_trace_file = 'error trace {0}.json'.format(match.group(1))
        else:
            error_trace_file = 'error trace.json'

        self.logger.info(
            'Write processed witness to "{0}"'.format(error_trace_file))
        with open(error_trace_file, 'w', encoding='utf8') as fp:
            klever.core.utils.json_dump(error_trace, fp,
                                        self.conf['keep intermediate files'])

        return error_trace_file, attrs

    def report_unsafe(self, error_trace_file, attrs):
        klever.core.utils.report(
            self.logger, 'unsafe', {
                'parent':
                "{}/verification".format(self.id),
                'attrs':
                attrs,
                'error_trace':
                klever.core.utils.ArchiveFiles(
                    [error_trace_file],
                    arcnames={error_trace_file: 'error trace.json'})
            }, self.mqs['report files'], self.vals['report id'],
            self.conf['main working directory'])

    def process_single_verdict(self, decision_results, opts, log_file):
        """The function has a callback that collects verdicts to compare them with the ideal ones."""
        # Parse reports and determine status
        benchexec_reports = glob.glob(os.path.join('output', '*.results.xml'))
        if len(benchexec_reports) != 1:
            raise FileNotFoundError(
                'Expect strictly single BenchExec XML report file, but found {}'
                .format(len(benchexec_reports)))

        # Expect single report file
        with open(benchexec_reports[0], encoding="utf8") as fp:
            result = ElementTree.parse(fp).getroot()

            run = result.findall("run")[0]
            for column in run.iter("column"):
                name, value = [
                    column.attrib.get(name) for name in ("title", "value")
                ]
                if name == "status":
                    decision_results["status"] = value

        # Check that we have set status
        if "status" not in decision_results:
            raise KeyError(
                "There is no solution status in BenchExec XML report")

        self.logger.info('Verification task decision status is "{0}"'.format(
            decision_results['status']))

        # Do not fail immediately in case of witness processing failures that often take place. Otherwise we will
        # not upload all witnesses that can be properly processed as well as information on all such failures.
        # Necessary verificaiton finish report also won't be uploaded causing Bridge to corrupt the whole job.
        if re.search('true', decision_results['status']):
            klever.core.utils.report(
                self.logger,
                'safe',
                {
                    'parent': "{}/verification".format(self.id),
                    'attrs': []
                    # TODO: at the moment it is unclear what are verifier proofs.
                    # 'proof': None
                },
                self.mqs['report files'],
                self.vals['report id'],
                self.conf['main working directory'])
            self.verdict = 'safe'
        else:
            witnesses = glob.glob(os.path.join('output', 'witness.*.graphml'))
            self.logger.info("Found {} witnesses".format(len(witnesses)))

            # Create unsafe reports independently on status. Later we will create unknown report in addition if status
            # is not "unsafe".
            if "expect several witnesses" in opts and opts[
                    "expect several witnesses"] and len(witnesses) != 0:
                self.verdict = 'unsafe'
                for witness in witnesses:
                    try:
                        error_trace_file, attrs = self.process_witness(witness)
                        self.report_unsafe(error_trace_file, attrs)
                    except Exception as e:
                        self.logger.warning(
                            'Failed to process a witness:\n{}'.format(
                                traceback.format_exc().rstrip()))
                        self.verdict = 'non-verifier unknown'

                        if self.__exception:
                            try:
                                raise e from self.__exception
                            except Exception as e:
                                self.__exception = e
                        else:
                            self.__exception = e
            if re.search('false', decision_results['status']) and \
                    ("expect several witnesses" not in opts or not opts["expect several witnesses"]):
                self.verdict = 'unsafe'
                try:
                    if len(witnesses) != 1:
                        NotImplementedError(
                            'Just one witness is supported (but "{0}" are given)'
                            .format(len(witnesses)))

                    error_trace_file, attrs = self.process_witness(
                        witnesses[0])
                    self.report_unsafe(error_trace_file, attrs)
                except Exception as e:
                    self.logger.warning(
                        'Failed to process a witness:\n{}'.format(
                            traceback.format_exc().rstrip()))
                    self.verdict = 'non-verifier unknown'
                    self.__exception = e
            elif not re.search('false', decision_results['status']):
                self.verdict = 'unknown'

                # Prepare file to send it with unknown report.
                os.mkdir('verification')
                verification_problem_desc = os.path.join(
                    'verification', 'problem desc.txt')

                # Check resource limitiations
                if decision_results['status'] in ('OUT OF MEMORY', 'TIMEOUT'):
                    if decision_results['status'] == 'OUT OF MEMORY':
                        msg = "memory exhausted"
                    else:
                        msg = "CPU time exhausted"

                    with open(verification_problem_desc, 'w',
                              encoding='utf8') as fp:
                        fp.write(msg)

                    data = list(
                        self.vals['task solution triples'][self.results_key])
                    data[2] = decision_results['status']
                    self.vals['task solution triples'][self.results_key] = data
                else:
                    os.symlink(os.path.relpath(log_file, 'verification'),
                               verification_problem_desc)

                klever.core.utils.report(
                    self.logger, 'unknown', {
                        'parent':
                        "{}/verification".format(self.id),
                        'attrs': [],
                        'problem_description':
                        klever.core.utils.ArchiveFiles(
                            [verification_problem_desc],
                            {verification_problem_desc: 'problem desc.txt'})
                    }, self.mqs['report files'], self.vals['report id'],
                    self.conf['main working directory'], 'verification')

    def process_failed_task(self, task_id):
        """The function has a callback at Job module."""
        self.task_error = self.session.get_task_error(task_id)
        # We do not need task and its files anymore.
        self.session.remove_task(task_id)

        self.verdict = 'non-verifier unknown'

    def process_finished_task(self, task_id, opts, verifier):
        """Function has a callback at Job.py."""
        self.session.download_decision(task_id)

        with zipfile.ZipFile('decision result files.zip') as zfp:
            zfp.extractall()

        with open('decision results.json', encoding='utf8') as fp:
            decision_results = json.load(fp)

        # TODO: specify the computer where the verifier was invoked (this information should be get from BenchExec or VerifierCloud web client.
        log_files_dir = glob.glob(os.path.join('output',
                                               'benchmark*logfiles'))[0]
        log_files = os.listdir(log_files_dir)

        if len(log_files) != 1:
            raise NotImplementedError(
                'Exactly one log file should be outputted (but "{0}" are given)'
                .format(len(log_files)))

        log_file = os.path.join(log_files_dir, log_files[0])

        # Send an initial report
        report = {
            'identifier': "{}/verification".format(self.id),
            'parent': self.id,
            # TODO: replace with something meaningful, e.g. tool name + tool version + tool configuration.
            'attrs': [],
            'component': verifier,
            'wall_time': decision_results['resources']['wall time'],
            'cpu_time': decision_results['resources']['CPU time'],
            'memory': decision_results['resources']['memory size'],
            'original_sources': self.clade.get_uuid()
        }

        if self.additional_srcs:
            report['additional_sources'] = klever.core.utils.ArchiveFiles([
                os.path.join(self.conf['main working directory'],
                             self.additional_srcs)
            ])

        # Get coverage
        coverage_info_dir = os.path.join('total coverages',
                                         self.conf['sub-job identifier'],
                                         self.req_spec_id.replace('/', '-'))
        os.makedirs(os.path.join(self.conf['main working directory'],
                                 coverage_info_dir),
                    exist_ok=True)

        self.coverage_info_file = os.path.join(
            coverage_info_dir,
            "{0}_coverage_info.json".format(task_id.replace('/', '-')))

        # Update solution progress. It is necessary to update the whole list to sync changes
        data = list(self.vals['task solution triples'][self.results_key])
        data[1] = decision_results['resources']
        self.vals['task solution triples'][self.results_key] = data

        if not self.logger.disabled and log_file:
            report['log'] = klever.core.utils.ArchiveFiles(
                [log_file], {log_file: 'log.txt'})

        if self.conf['upload verifier input files']:
            report['task'] = task_id

        # Remember exception and raise it if verdict is not unknown
        exception = None
        if opts['code coverage details'] != "None":
            try:
                LCOV(
                    self.conf, self.logger,
                    os.path.join('output', 'coverage.info'), self.clade,
                    self.source_paths, self.search_dirs,
                    self.conf['main working directory'],
                    opts['code coverage details'],
                    os.path.join(self.conf['main working directory'],
                                 self.coverage_info_file),
                    os.path.join(self.conf['main working directory'],
                                 coverage_info_dir),
                    self.verification_task_files)
            except Exception as err:
                exception = err
            else:
                report['coverage'] = klever.core.utils.ArchiveFiles(
                    ['coverage'])
                self.vals['coverage_finished'][
                    self.conf['sub-job identifier']] = False

        # todo: This should be checked to guarantee that we can reschedule tasks
        klever.core.utils.report(self.logger, 'verification', report,
                                 self.mqs['report files'],
                                 self.vals['report id'],
                                 self.conf['main working directory'])

        try:
            # Submit a verdict
            self.process_single_verdict(decision_results, opts, log_file)
        finally:
            # Submit a closing report
            klever.core.utils.report(self.logger, 'verification finish',
                                     {'identifier': report['identifier']},
                                     self.mqs['report files'],
                                     self.vals['report id'],
                                     self.conf['main working directory'])

        # Check verdict
        if exception and self.verdict != 'unknown':
            raise exception
        elif exception:
            self.logger.exception('Could not parse coverage')

    def __trim_file_names(self, file_names):
        trimmed_file_names = {}

        for file_name in file_names:
            # Remove storage from file names if files were put there.
            storage_file = klever.core.utils.make_relative_path(
                [self.clade.storage_dir], file_name)
            # Caller expects a returned dictionary maps each file name, so, let's fill it anyway.
            trimmed_file_names[file_name] = storage_file
            # Try to make paths relative to source paths or standard search directories.
            tmp = klever.core.utils.make_relative_path(self.source_paths,
                                                       storage_file,
                                                       absolutize=True)

            # Append special directory name "source files" when cutting off source file names.
            if tmp != os.path.join(os.path.sep, storage_file):
                trimmed_file_names[file_name] = os.path.join(
                    'source files', tmp)
            else:
                # Like in klever.core.vtg.weaver.Weaver#weave.
                tmp = klever.core.utils.make_relative_path(self.search_dirs,
                                                           storage_file,
                                                           absolutize=True)
                if tmp != os.path.join(os.path.sep, storage_file):
                    if tmp.startswith('specifications'):
                        trimmed_file_names[file_name] = tmp
                    else:
                        trimmed_file_names[file_name] = os.path.join(
                            'generated models', tmp)

        return trimmed_file_names