Beispiel #1
0
def print_problem_result(pbm:NamedTuple,
                         problems_config:ProblemsManager):

    status = problems_config.get_problem_status(pbm)
    if problems_config.has_problem_trace(pbm):
        traces = problems_config.get_problem_traces(pbm)
    else:
        traces = []
    general_config = problems_config.general_config
    count = len(traces) + 1
    if pbm.name is None:
        return (0, [])
    ret_status = 0

    unk_k = "" if status != VerificationStatus.UNK else "\nBMC depth: %s"%pbm.bmc_length
    Logger.log("\n** Problem %s **"%(pbm.name), 0)
    if pbm.description is not None:
        Logger.log("Description: %s"%(pbm.description), 0)
    if pbm.properties is not None:
        Logger.log("Formula: %s"%(pbm.properties), 1)
    Logger.log("Result: %s%s"%(status, unk_k), 0)
    if pbm.verification == VerificationType.PARAMETRIC:
        region = problems_config.get_problem_region(pbm)
        if region in [TRUE(),FALSE(),None]:
            Logger.log("Region: %s"%(region), 0)
        else:
            Logger.log("Region:\n - %s"%(" or \n - ".join([x.serialize(threshold=100) for x in region])), 0)
    if (pbm.expected is not None):
        expected = VerificationStatus.convert(pbm.expected)
        Logger.log("Expected: %s"%(expected), 0)
        correct = VerificationStatus.compare(VerificationStatus.convert(pbm.expected), status)
        if not correct:
            Logger.log("%s != %s <<<---------| ERROR"%(status, expected), 0)
            ret_status = 1

    assert not(general_config.force_expected and (pbm.expected is None))

    prefix = pbm.trace_prefix
    traces_results = []

    if (traces is not None) and (len(traces) > 0):
        if (pbm.verification == VerificationType.PARAMETRIC) and (status != VerificationStatus.FALSE):
            traces_results = print_traces("Execution", traces, pbm.name, prefix, count)

        if (pbm.verification != VerificationType.SIMULATION) and (status == VerificationStatus.FALSE):
            traces_results = print_traces("Counterexample", traces, pbm.name, prefix, count)

        if (pbm.verification == VerificationType.SIMULATION) and (status == VerificationStatus.TRUE):
            traces_results = print_traces("Execution", traces, pbm.name, prefix, count)

    if general_config.time:
        time = problems_config.get_problem_time(pbm)
        Logger.log("Time: %.2f sec"%(time), 0)

    return (ret_status, traces_results)
Beispiel #2
0
    def _option_handling(self, problems_manager: ProblemsManager) -> None:
        '''
        Do any necessary manual option handling.
        E.g. if some options implicitly set other options, this needs to happen here

        This method should be (carefully) modified whenever a new option is added that is not
        completely independent of other options (e.g. might affect how other options need to be set).
        '''

        general_config = problems_manager.general_config

        # handle case where no properties are given
        # i.e. expecting embedded assertions in the model file
        # command_line is True when no problem file was used (e.g. not argument for --problems)
        command_line = general_config.problems is None
        if command_line and len(problems_manager.problems) == 1:
            pbm = problems_manager.problems[0]
            if pbm.properties is None and \
               pbm.verification not in {VerificationType.EQUIVALENCE, VerificationType.SIMULATION}:
                # use the provided (command line) options as defaults
                problems_manager.set_defaults(pbm)
                # remove the problem
                problems_manager._problems = []
                problems_manager._problems_status = dict()

        ################## synchronizing clock automatically abstracts ###################
        if general_config.synchronize:
            general_config.abstract_clock = True

        # iterate through problems and fix options
        for problem in problems_manager.problems:

            ########################### parametric model checking ############################
            # parametric model checking uses strategy BWD
            # need to set the strategy for interpreting traces correctly
            if problem.verification == VerificationType.PARAMETRIC:
                problem.strategy = VerificationStrategy.BWD

        return problems_manager
Beispiel #3
0
    def get_default_problem_manager(self, **kwargs) -> ProblemsManager:
        '''
        Returns the problem manager with default general options, which can be overriden
        with the keyword arguments.

        See the options.py file for the possible keywords
          where dashes in long option names are replaced by underscores
          (and leading dashes are removed)
          e.g. --trace-prefix is trace_prefix
        '''

        unknown_gen_options = kwargs.keys() - self._problem_options[GENERAL]
        if unknown_gen_options:
            raise RuntimeError(
                "Expecting only general options in section"
                "but got {}.\nGeneral options include:\n"
                "{}".format(unknown_gen_options,
                            '\n\t'.join(self._problem_options[GENERAL])))

        general_options = dict()
        for option in self._problem_options[GENERAL]:
            if option in kwargs:
                general_options[option] = kwargs[option]
            else:
                general_options[option] = self._defaults[option]

        problem_defaults = {
            o: self._defaults[o]
            for o in self._problem_options[PROBLEM]
        }
        # convert defaults to expected type
        for k, v in problem_defaults.items():
            if v is not None:
                assert k in self._types, "Expecting to have (at least default) type info for every option"
                try:
                    problem_defaults[k] = self._types[k](v)
                except ValueError as e:
                    raise ValueError(
                        "Cannot convert '{}' to expected type {}".format(
                            v, self._types[k]))

        return ProblemsManager(Path("./"), general_options, problem_defaults)
Beispiel #4
0
    def read_problem_file(self,
                          config_file: str,
                          _command_line_args: Dict[str, str] = dict(),
                          **kwargs) -> ProblemsManager:
        '''
        Reads a problem file and then overrides defaults with command line options
        if any were provided.

        Users should not pass _command_line_args directly, that is for internal use only.
        Instead, pass options through keyword arguments.
        '''
        config_filepath = Path(config_file)
        config_args = self.parse_config(config_filepath)
        general_options = dict(config_args[GENERAL])

        # populate command line arguments with keyword arguments if provided
        if kwargs:
            # check that all options are valid
            unknown_kwargs = (kwargs.keys() - self._problem_options[GENERAL]) - \
                              self._problem_options[PROBLEM]

            if unknown_kwargs:
                raise RuntimeError(
                    "Expected only valid CoSA options as "
                    "keyword arguments but got {}.\nPlease select "
                    "from:\n\t{}\n\nValid options can be also be viewed "
                    "with --help".format(
                        unknown_kwargs, '\n\t'.join(
                            sorted(
                                itertools.chain(
                                    self._problem_options[GENERAL],
                                    self._problem_options[PROBLEM])))))

            # command line arguments should contain everything or nothing
            # populate with none if we need to override with keyword arguments
            if not _command_line_args:
                for option in itertools.chain(self._problem_options[GENERAL],
                                              self._problem_options[PROBLEM]):
                    _command_line_args[option] = None
            for option, v in kwargs.items():
                _command_line_args[option] = v

        # remove default options
        # -- configparser automatically populates defaults
        #    in every section, which we don't want
        for option in config_args[DEFAULT]:
            general_options.pop(option, None)

        unknown_gen_options = general_options.keys(
        ) - self._problem_options[GENERAL]
        if unknown_gen_options:
            raise RuntimeError("Expecting only general options in section"
                               " [GENERAL] but got {} in {}".format(
                                   unknown_gen_options, config_file))

        # populate with general defaults
        # as an optimization, don't even check _command_line_args if it's empty
        if _command_line_args:
            for option in self._problem_options[GENERAL]:
                if option not in general_options or general_options[
                        option] is None:
                    if _command_line_args[option] is not None:
                        general_options[option] = _command_line_args[option]
                    else:
                        general_options[option] = self._defaults[option]
        else:
            for option in self._problem_options[GENERAL]:
                if option not in general_options or general_options[
                        option] is None:
                    general_options[option] = self._defaults[option]

        problem_defaults = {
            o: self._defaults[o]
            for o in self._problem_options[PROBLEM]
        }

        default_options = dict(config_args[DEFAULT])
        unknown_default_options = default_options.keys(
        ) - self._problem_options[PROBLEM]
        if unknown_default_options:
            raise RuntimeError("Expecting only problem options in section"
                               " [DEFAULT] but got {} in {}".format(
                                   unknown_default_options, config_file))
        for option, value in default_options.items():
            # override the defaults with problem defaults
            problem_defaults[option] = value

        # convert defaults to expected type
        for k, v in problem_defaults.items():
            if v is not None:
                assert k in self._types, "Expecting to have (at least default) type info for every option"
                try:
                    problem_defaults[k] = self._types[k](v)
                except ValueError as e:
                    raise ValueError(
                        "Cannot convert '{}' to expected type {}".format(
                            v, self._types[k]))

        # convert options to expected type
        for k, v in general_options.items():
            if v is not None:
                assert k in self._types, "Expecting to have (at least default) type info for every option"
                try:
                    # handle the 'False' case, note that bool('False') evaluates to True
                    if self._types[k] == bool and isinstance(v, str):
                        if v == 'True':
                            general_options[k] = True
                        elif v == 'False':
                            general_options[k] = False
                        else:
                            raise RuntimeError(
                                "Expecting True or False as an option for {} but got {}"
                                .format(k, v))
                    else:
                        general_options[k] = self._types[k](v)
                except ValueError as e:
                    raise ValueError(
                        "Cannot convert '{}' to expected type {}".format(
                            v, self._types[k]))

        # Generate the problems_manager and populate it
        problems_manager = ProblemsManager(config_filepath.parent,
                                           general_options, problem_defaults)

        # Recall priority order
        # command line > problem option > problem defaults > defaults
        for section in config_args:
            if section == DEFAULT or section == GENERAL:
                continue
            problem_file_options = dict(config_args[section])
            unknown_problem_file_options = problem_file_options.keys(
            ) - self._problem_options[PROBLEM]
            if unknown_problem_file_options:
                raise RuntimeError(
                    "Expecting only problem options "
                    "in problem section but got {} in {}".format(
                        unknown_problem_file_options, config_file))

            # The [HEADER] style sections become problem names
            problem_file_options['name'] = section

            if _command_line_args:
                for arg in self._problem_options[PROBLEM]:
                    if _command_line_args[arg] is not None:
                        # overwrite config file with command line arguments
                        problem_file_options[arg] = _command_line_args[arg]
                    # if the option has still not been set, find a default
                    # problem defaults were already given priority
                    if arg not in problem_file_options:
                        problem_file_options[arg] = problem_defaults[arg]
            else:
                # set defaults if not explicitly set in this particular problem
                for arg in self._problem_options[PROBLEM]:
                    if arg not in problem_file_options:
                        problem_file_options[arg] = problem_defaults[arg]

            for k, v in problem_file_options.items():
                if v is not None:
                    assert k in self._types, "Expecting to have (at least default) type info for every option"
                    try:
                        # handle the 'False' case, note that bool('False') evaluates to True
                        if self._types[k] == bool and isinstance(v, str):
                            if v == 'True':
                                problem_file_options[k] = True
                            elif v == 'False':
                                problem_file_options[k] = False
                            else:
                                raise RuntimeError(
                                    "Expecting True or False as an option for {} but got {}"
                                    .format(k, v))
                        else:
                            problem_file_options[k] = self._types[k](v)
                    except ValueError as e:
                        raise ValueError(
                            "Cannot convert '{}' to expected type {}".format(
                                v, self._types[k]))

            try:
                # using frozen=False keeps the problems mutable for now
                problems_manager.add_problem(**problem_file_options,
                                             frozen=False)
            except TypeError as e:
                if len(e.args) > 0:
                    message = e.args[0]
                if "unexpected keyword argument" in message:
                    unknown_option = message[message.find("argument ") + 9:]
                    raise RuntimeError(
                        "Unknown option in problem file: {}".format(
                            unknown_option))
                else:
                    raise e
        return problems_manager
Beispiel #5
0
    def parse_args(self) -> ProblemsManager:
        command_line_args = vars(super().parse_args())
        config_files = []
        for config_file in self._config_files:
            if command_line_args[config_file] is not None:
                config_files.append(command_line_args[config_file])
        if config_files:
            assert len(config_files
                       ) == 1, "Expecting only a single configuration file"
            problems_manager = self.read_problem_file(
                command_line_args[config_file],
                _command_line_args=command_line_args)
        else:
            # get general options
            general_options = dict()
            for option in self._problem_options[GENERAL]:
                if command_line_args[option] is not None:
                    general_options[option] = command_line_args[option]
                else:
                    general_options[option] = self._defaults[option]

            # convert options to expected type
            for k, v in general_options.items():
                if v is not None:
                    assert k in self._types, "Expecting to have (at least default) type info for every option"
                    try:
                        general_options[k] = self._types[k](v)
                    except ValueError as e:
                        raise ValueError(
                            "Cannot convert '{}' to expected type {}".format(
                                v, self._types[k]))

            # create default options for only problem fields
            problem_defaults = {
                o: self._defaults[o]
                for o in self._problem_options[PROBLEM]
            }

            # convert defaults to expected type
            for k, v in problem_defaults.items():
                if v is not None:
                    assert k in self._types, "Expecting to have (at least default) type info for every option"
                    try:
                        problem_defaults[k] = self._types[k](v)
                    except ValueError as e:
                        raise ValueError(
                            "Cannot convert '{}' to expected type {}".format(
                                v, self._types[k]))

            problems_manager = ProblemsManager(Path("./"), general_options,
                                               problem_defaults)

            # generate a single problem
            single_problem_options = dict()
            for option in self._problem_options[PROBLEM]:
                if command_line_args[option] is not None:
                    single_problem_options[option] = command_line_args[option]
                else:
                    single_problem_options[option] = self._defaults[option]

            for k, v in single_problem_options.items():
                if v is not None:
                    assert k in self._types, "Expecting to have (at least default) type info for every option"
                    try:
                        single_problem_options[k] = self._types[k](v)
                    except ValueError as e:
                        raise ValueError(
                            "Cannot convert '{}' to expected type {}".format(
                                v, self._types[k]))

            # calling with frozen=False keeps the problem mutable for now (might not to override options)
            problems_manager.add_problem(**single_problem_options,
                                         frozen=False)

        # run any manual option handling
        # modifies the problems_manager in-place
        self._option_handling(problems_manager)
        # freeze the problems
        # now all existing problems are (immutable) namedtuples
        # note, you can still add new problems, but they must be frozen (e.g. immutable)
        problems_manager.freeze()

        return problems_manager
Beispiel #6
0
    def solve_problems(self, problems_config: ProblemsManager) -> None:

        general_config = problems_config.general_config
        model_extension = general_config.model_extension
        assume_if_true = general_config.assume_if_true

        self.sparser = StringParser(general_config)
        self.lparser = LTLParser()

        self.coi = ConeOfInfluence()

        modifier = None
        if general_config.model_extension is not None:
            modifier = lambda hts: ModelExtension.extend(
                hts,
                ModelModifiersFactory.modifier_by_name(general_config.
                                                       model_extension))

        # generate main system system
        hts, invar_props, ltl_props = self.parse_model(
            general_config.model_files, problems_config.relative_path,
            general_config, "System 1", modifier)

        # Generate second models if any are necessary
        for problem in problems_config.problems:
            if problem.verification == VerificationType.EQUIVALENCE:
                if problem.equal_to is None:
                    raise RuntimeError(
                        "No second model for equivalence "
                        "checking provided for problem {}".format(
                            problem.name))

                hts2, _, _ = self.parse_model(problem.equal_to,
                                              problems_config.relative_path,
                                              general_config, "System 2",
                                              modifier)
                problems_config.add_second_model(problem, hts2)

        # TODO : contain these types of passes in functions
        #        they should be registered as passes

        if general_config.init is not None:
            iparser = InitParser()
            init_hts, inv_a, ltl_a = iparser.parse_file(
                general_config.init, general_config)
            assert inv_a is None and ltl_a is None, "Not expecting assertions from init state file"

            # remove old inits
            for ts in hts.tss:
                ts.init = TRUE()

            hts.combine(init_hts)
            hts.single_init(rebuild=True)

        # set default bit-wise initial values (0 or 1)
        if general_config.default_initial_value is not None:
            def_init_val = int(general_config.default_initial_value)
            try:
                if int(def_init_val) not in {0, 1}:
                    raise RuntimeError
            except:
                raise RuntimeError(
                    "Expecting 0 or 1 for default_initial_value,"
                    "but received {}".format(def_init_val))
            def_init_ts = TS("Default initial values")
            new_init = []
            initialized_vars = get_free_variables(hts.single_init())
            state_vars = hts.state_vars
            num_def_init_vars = 0
            num_state_vars = len(state_vars)

            const_arr_supported = True

            if hts.logic == L_ABV:
                for p in problems_config.problems:
                    if p.solver_name not in CONST_ARRAYS_SUPPORT:
                        const_arr_supported = False
                        Logger.warning(
                            "Using default_initial_value with arrays, "
                            "but one of the selected solvers, "
                            "{} does not support constant arrays. "
                            "Any assumptions on initial array values will "
                            "have to be done manually".format(
                                problem.solver_name))
                        break

            for sv in state_vars - initialized_vars:
                if sv.get_type().is_bv_type():
                    width = sv.get_type().width
                    if int(def_init_val) == 1:
                        val = BV((2**width) - 1, width)
                    else:
                        val = BV(0, width)

                    num_def_init_vars += 1
                elif sv.get_type().is_array_type() and \
                     sv.get_type().elem_type.is_bv_type() and \
                     const_arr_supported:
                    svtype = sv.get_type()
                    width = svtype.elem_type.width
                    if int(def_init_val) == 1:
                        val = BV((2**width) - 1, width)
                    else:
                        val = BV(0, width)
                    # create a constant array with a default value
                    val = Array(svtype.index_type, val)
                else:
                    continue

                def_init_ts.add_state_var(sv)
                new_init.append(EqualsOrIff(sv, val))
            def_init_ts.set_behavior(simplify(And(new_init)), TRUE(), TRUE())
            hts.add_ts(def_init_ts)
            Logger.msg(
                "Set {}/{} state elements to zero "
                "in initial state\n".format(num_def_init_vars, num_state_vars),
                1)

        problems_config.hts = hts

        # TODO: Update this so that we can control whether embedded assertions are solved automatically
        if not general_config.skip_embedded:
            for invar_prop in invar_props:
                problems_config.add_problem(
                    verification=VerificationType.SAFETY,
                    name=invar_prop[0],
                    description=invar_prop[1],
                    properties=invar_prop[2])
                self.properties.append(invar_prop[2])
            for ltl_prop in ltl_props:
                problems_config.add_problem(verification=VerificationType.LTL,
                                            name=invar_prop[0],
                                            description=invar_prop[1],
                                            properties=invar_prop[2])
                self.properties.append(ltl_prop[2])

        Logger.log(
            "Solving with abstract_clock=%s, add_clock=%s" %
            (general_config.abstract_clock, general_config.add_clock), 2)

        # ensure the miter_out variable exists
        miter_out = None

        for problem in problems_config.problems:
            if problem.name is not None:
                Logger.log(
                    "\n*** Analyzing problem \"%s\" ***" % (problem.name), 1)
                Logger.msg("Solving \"%s\" " % problem.name, 0,
                           not (Logger.level(1)))

            # apply parametric behaviors (such as toggling the clock)
            # Note: This is supposed to be *before* creating the combined system for equivalence checking
            #       we want this assumption to be applied to both copies of the clock
            problem_hts = ParametricBehavior.apply_to_problem(
                problems_config.hts, problem, general_config, self.model_info)

            if problem.verification == VerificationType.EQUIVALENCE:
                hts2 = problems_config.get_second_model(problem)
                problem_hts, miter_out = Miter.combine_systems(
                    hts, hts2, problem.bmc_length,
                    general_config.symbolic_init, problem.properties, True)

            try:
                # convert the formulas to PySMT FNodes
                # lemmas, assumptions and precondition always use the regular parser
                lemmas, assumptions, precondition = self.convert_formulae(
                    [
                        problem.lemmas, problem.assumptions,
                        problem.precondition
                    ],
                    parser=self.sparser,
                    relative_path=problems_config.relative_path)

                if problem.verification != VerificationType.LTL:
                    parser = self.sparser
                else:
                    parser = self.lparser

                prop = None
                if problem.properties is not None:
                    prop = self.convert_formula(
                        problem.properties,
                        relative_path=problems_config.relative_path,
                        parser=parser)
                    assert len(prop) == 1, "Properties should already have been split into " \
                        "multiple problems but found {} properties here".format(len(prop))
                    prop = prop[0]
                    self.properties.append(prop)
                else:
                    if problem.verification == VerificationType.SIMULATION:
                        prop = TRUE()
                    elif (problem.verification
                          is not None) and (problem.verification !=
                                            VerificationType.EQUIVALENCE):
                        Logger.error(
                            "Property not provided for problem {}".format(
                                problem.name))

                if problem.verification == VerificationType.EQUIVALENCE:
                    assert miter_out is not None
                    # set property to be the miter output
                    # if user provided a different equivalence property, this has already
                    # been incorporated in the miter_out
                    prop = miter_out
                    # reset the miter output
                    miter_out = None

                if precondition:
                    assert len(precondition
                               ) == 1, "There should only be one precondition"
                    prop = Implies(precondition[0], prop)

                # TODO: keep assumptions separate from the hts
                # IMPORTANT: CLEAR ANY PREVIOUS ASSUMPTIONS AND LEMMAS
                #   This was previously done in __solve_problem and has been moved here
                #   during the frontend refactor in April 2019
                # this is necessary because the problem hts is just a reference to the
                #   overall (shared) HTS
                problem_hts.assumptions = None
                problem_hts.lemmas = None

                # Compute the Cone Of Influence
                # Returns a *new* hts (not pointing to the original one anymore)
                if problem.coi:
                    if Logger.level(2):
                        timer = Logger.start_timer("COI")
                    hts = self.coi.compute(hts, prop)
                    if Logger.level(2):
                        Logger.get_timer(timer)

                if general_config.time:
                    timer_solve = Logger.start_timer(
                        "Problem %s" % problem.name, False)

                status, trace, traces, region = self.__solve_problem(
                    problem_hts, prop, lemmas, assumptions, problem)

                # set status for this problem
                problems_config.set_problem_status(problem, status)

                # TODO: Determine whether we need both trace and traces
                assert trace is None or traces is None, "Expecting either a trace or a list of traces"
                if trace is not None:
                    problem_traces = self.__process_trace(
                        hts, trace, general_config, problem)
                    problems_config.set_problem_traces(problem, problem_traces)

                if traces is not None:
                    traces_to_add = []
                    for trace in traces:
                        problem_trace = self.__process_trace(
                            hts, trace, general_config, problem)
                        for pt in problem_trace:
                            traces_to_add.append(pt)
                    problems_config.set_problem_traces(problem, traces_to_add)

                if problem.verification == VerificationType.PARAMETRIC:
                    assert region is not None
                    problems_config.set_problem_region(problem, region)

                if status is not None:
                    Logger.msg(" %s\n" % status, 0, not (Logger.level(1)))

                if (assume_if_true) and \
                   (status == VerificationStatus.TRUE) and \
                   (problem.assumptions == None) and \
                   (problem.verification == VerificationType.SAFETY):

                    # TODO: relax the assumption on problem.assumptions
                    #       can still add it, just need to make it an implication

                    ass_ts = TS("Previous assumption from property")
                    if TS.has_next(prop):
                        ass_ts.trans = prop
                    else:
                        ass_ts.invar = prop
                    # add assumptions to main system
                    problem_hts.reset_formulae()
                    problem_hts.add_ts(ass_ts)

                if general_config.time:
                    problems_config.set_problem_time(
                        problem, Logger.get_timer(timer_solve, False))

            except KeyboardInterrupt as e:
                Logger.msg("\b\b Skipped!\n", 0)