示例#1
0
    def encode_run_graph(self, states_to_encode):
        state_to_rejecting_scc = build_state_to_rejecting_scc(self.automaton)

        for q in self.automaton.nodes:
            for m in states_to_encode:
                for label in q.transitions:
                    self._encode_transitions(q, m, label,
                                             state_to_rejecting_scc)

            self.solver.comment('encoded spec state ' + smt_name_spec(q, TYPE_A_STATE))
def is_safety_automaton(ucw_automaton):
    # TODO: are there better ways to identify safety props than checking corresponding UCW?
    from helpers.rejecting_states_finder import build_state_to_rejecting_scc  # TODO: bad circular dependence

    # ltl3ba creates transitional rejecting nodes, so filter them
    node_to_rej_scc = build_state_to_rejecting_scc(ucw_automaton)

    for node in ucw_automaton.rejecting_nodes:  # TODO: does not work with rejecting edges automaton
        if node not in node_to_rej_scc:  # shitty transitional rejecting node
            continue

        assert self_looped(node) or len(node_to_rej_scc[node]) > 1  # TODO: debug purposes

        if not is_absorbing(node):
            return False

    return True
示例#3
0
def is_safety_automaton(automaton):
    """
    This function is sound (yes answer is correct),
    but incomplete (may so while the automaton is safety).
    In safety automata, the only accepting nodes allowed are absorbing nodes (dead ends).
    """
    #TODO: are there better ways to identify automata than checking automata?

    #ltl3ba creates transitional rejecting nodes, so filter them
    node_to_rej_scc = build_state_to_rejecting_scc(automaton)

    for node in automaton.acc_nodes:  # TODO: does not work with rejecting edges automaton
        if node not in node_to_rej_scc:
            continue

        assert _is_self_looped(node) or len(node_to_rej_scc[node]) > 1  # TODO: debug purposes

        if not is_absorbing(node):
            return False

    return True
示例#4
0
    def encode_automaton(self, automaton, automaton_index,
                         is_architecture_specific, cutoff, global_cutoff):
        '''
        Encodes the given automaton

        :param automaton: Automaton instance
        :param automaton_index: Automaton index used for identifying the lambda
                                function corresponding to the automaton
        :param is_architecture_specific: Whether automaton is required
                                         by the used architecture
        :param cutoff: Cut-off associated with the current automaton
        :param global_cutoff: Maximum of all automata-specific cut-offs
        '''
        # declare UCT state
        uct_state = Datatype('Q_%d' % automaton_index)
        state_prefix = 'q%d_' % automaton_index

        nodes_list = list(automaton.nodes)
        for node in nodes_list:
            uct_state.declare(state_prefix + node.name)
        uct_state = uct_state.create()
        uct_states_dict = {nodes_list[i].name:
                           getattr(uct_state, uct_state.constructor(i).name())
                           for i in range(len(nodes_list))}

        # declare lambda functions
        lambda_b_function_argument_sorts = \
            [uct_state] + \
            self.get_fresh_global_state_sorts(cutoff=cutoff) + \
            [BoolSort()]

        lambda_b_function = Function('lambda_b_%d' % (automaton_index),
                                     lambda_b_function_argument_sorts)

        lambda_s_function_argument_sorts = \
            [uct_state] + \
            self.get_fresh_global_state_sorts(cutoff=cutoff) + \
            [IntSort()]

        lambda_s_function = Function('lambda_s_%d' % (automaton_index),
                                     lambda_s_function_argument_sorts)

        # avoid global deadlocks in case of the fairness property
        if is_architecture_specific:
            self._avoid_deadlocks(uct_state, lambda_b_function,
                                  cutoff, global_cutoff)

        assert(len(automaton.initial_sets_list) == 1)
        initial_uct_states = [uct_states_dict[node.name]
                              for node in automaton.initial_sets_list[0]]
        initial_system_states = self._get_initial_system_states(cutoff=cutoff)

        # list of tuples in format (q0, (t1, t2, ...))
        initial_state_tuples = product(*[initial_uct_states,
                                         initial_system_states])
        # merge tuples
        initial_state_tuples = [tuple([item[0]] + list(item[1]))
                                for item in initial_state_tuples]

        logging.debug("Automaton %d   Initial states: %s",
                      automaton_index, initial_state_tuples)

        for initial_uct_state in initial_state_tuples:
            self.encoder_info.solver.add(lambda_b_function(initial_uct_state))
            self.encoder_info.solver.add(lambda_s_function(initial_uct_state) == 0)

        # (template function, instance index)
        template_instance_index_tuples = \
            self._get_templates_instance_index_tuples(cutoff=cutoff)

        # assignment of the scheduling variables the particular processes
        # are scheduled (k,i) -> scheduling variable assignment list
        schedule_values_dict = dict(self.spec.get_schedule_values())
        scheduling_signals = self.spec.get_scheduling_signals()

        # used for SCC lambda_s optimization
        sccs = build_state_to_rejecting_scc(automaton)
        scc_lambda_functions = \
            {scc: Function('lambda_s_%d_%d' % (automaton_index, scc_index),
                           [uct_state] +
                           self.get_fresh_global_state_sorts(cutoff=cutoff) +
                           [BitVecSort(len(scc))])
             for scc_index, scc in enumerate(sccs.values())}

        spec_cutoff_process_indices = \
            self.get_process_indices(cutoff=self.spec.cutoff)

        global_state_tuples = \
            self.get_fresh_global_state_variables(cutoff=cutoff,
                                                  prefix="curr",
                                                  include_indices=True)

        global_state_dict = dict(global_state_tuples)

        current_global_state = [state_variable for _, state_variable
                                in global_state_tuples]

        input_signals_set = {(t[0].template_index, t[1]):
                             t[0].get_input_signals(t[1])
                             for t in template_instance_index_tuples}

        input_signals_list = [sig for t in template_instance_index_tuples
                              for sig in t[0].get_input_signals(t[1])]

        input_signal_expr_dict = {sig: Bool(str(sig))
                                  for sig in input_signals_list}

        # dictionary of output signals -> function call
        output_signal_expr_dict = \
            {signal_name:
             signal_function(global_state_dict[(template_function.template_index,
                                                instance_index)])
             for template_function, instance_index in template_instance_index_tuples
             for signal_name, signal_function in
             template_function.get_output_signals_function_dict(instance_index).items()}

        function_placeholder_signals_set = \
             set(self.architecture.get_placeholder_signals(cutoff))

        transitions = [(src_node, transition, target_node_info)
                       for src_node in automaton.nodes
                       for transition, target_node_infos
                       in src_node.transitions.items()
                       for target_node_info in target_node_infos[0]]
        node = None
        for src_node, transition, target_node_info in transitions:

            target_node, is_rejecting_target_node = target_node_info

            logging.debug("Automaton: %d: %s->%s, condition: %s",
                          automaton_index, src_node.name, target_node.name,
                          transition)

            for templ_func, i in template_instance_index_tuples:
                # we use k for the template index and i for the instance index
                # as defined in the paper
                k = templ_func.template_index

                others_global_state_tuples = \
                    [(k_i, state_variable)
                     for k_i, state_variable in
                     filter(lambda k_i_state_tuple: k_i_state_tuple[0] !=
                            (k, i),
                            global_state_tuples)]

                others_global_state = [state for _, state
                                       in others_global_state_tuples]

                current_local_state = global_state_dict[(k, i)]

                next_local_state = Const('t_next_%d_%d' % (k, i),
                                         templ_func.state_sort)
                next_global_state = [state
                                     if k_i != (k, i)
                                     else next_local_state
                                     for k_i, state in global_state_tuples]

                # get scheduling assignment if current instance is scheduled
                sched_assignment = schedule_values_dict[(k, i)]
                sched_assignment_dict = \
                    {scheduling_signals[i]: sched_assignment[i]
                     for i in range(0, len(scheduling_signals))}

                logging.debug("\tinstance: (%d, %d) sched=%s",
                              k, i, sched_assignment)

                # parameters for functions
                guard_set_call_expr = \
                    templ_func.guard_set(
                        self._blowup_state_set(others_global_state_tuples,
                                               spec_cutoff_process_indices,
                                               (k, i)))

                # only add constraint if scheduling assignment
                # matches the label
                if not self._compare_scheduling(sched_assignment_dict,
                                                transition):
                    logging.debug("\tSKIP %s->%s, condition: %s, scheduling=%s"
                                  % (src_node.name, target_node.name,
                                     transition, sched_assignment))
                    continue

                transition_keys = set(transition.keys())
                used_input_signals = transition_keys.intersection(set(input_signals_list))
                used_output_signals = transition_keys.intersection(output_signal_expr_dict.keys())
                used_placeholder_signals = transition_keys.intersection(function_placeholder_signals_set)
                used_scheduler_signals = transition_keys.intersection(set(scheduling_signals))

                assert(len(used_input_signals) + \
                       len(used_output_signals) + \
                       len(used_placeholder_signals) + \
                       len(used_scheduler_signals) == len(transition.items()))

                condition = []

                for input_signal in used_input_signals:
                    condition.append(input_signal_expr_dict[input_signal] ==
                                     transition[input_signal])

                for output_signal in used_output_signals:
                    condition.append(output_signal_expr_dict[output_signal] ==
                                     transition[output_signal])

                for placeholder_signal in used_placeholder_signals:
                    ph_instance = (placeholder_signal.template_index,
                                   placeholder_signal.instance_index)
                    ph_relative_global_state_tuples = \
                        list(filter(lambda x: x[0] !=
                                    ph_instance, global_state_tuples))
                    ph_template_func = self.encoder_info.template_functions[
                        placeholder_signal.template_index]

                    ph_gs = ph_template_func.guard_set(
                        self._blowup_state_set(ph_relative_global_state_tuples,
                                               spec_cutoff_process_indices,
                                               ph_instance))

                    ph_relative_current_local_state = global_state_dict[ph_instance]
                    ph_relative_current_inputs = \
                        [input_signal_expr_dict[sig]
                         for sig in input_signals_set[ph_instance]]

                    if placeholder_signal.name.startswith('enabled'):
                        condition.append(ph_template_func.is_any_enabled(
                            [ph_relative_current_local_state] + \
                            ph_relative_current_inputs + \
                            [ph_gs]) == transition[placeholder_signal])
                    elif placeholder_signal.name.startswith('active'):
                        condition.append(self.encoder_info.is_scheduled(
                            [placeholder_signal.template_index,
                             placeholder_signal.instance_index] + \
                            sched_assignment) == transition[placeholder_signal])
                    elif placeholder_signal.name.startswith('init'):
                        req_initial_states = ph_template_func.get_initial_states()
                        assert(len(req_initial_states) == 1)
                        condition.append(
                            (ph_relative_current_local_state == req_initial_states[0]) ==
                            transition[placeholder_signal])
                    else:
                        raise Exception(placeholder_signal.name)

                condition_expression = True
                if len(condition) > 0:
                    condition_expression = And(*condition)

                current_local_input_arguments = [input_signal_expr_dict[sig]
                                                 for sig in input_signals_set[(k, i)]]
                input_arguments = [input_signal_expr_dict[signal]
                                   for signal in input_signals_list]
                forall_arguments = \
                    [current_local_state, next_local_state] + \
                    others_global_state + \
                    input_arguments

                current_combined_state_parameters = \
                    [uct_states_dict[src_node.name]] + \
                    current_global_state
                next_combined_state_parameters = \
                    [uct_states_dict[target_node.name]] + \
                    next_global_state

                delta_enabled_function_parameters = [current_local_state] + \
                                                    current_local_input_arguments + \
                                                    [next_local_state] + \
                                                    [guard_set_call_expr]

                lambda_s_req_expr = None
                if self._encoding_optimization & EncodingOptimization.LAMBDA_SCC:
                    logging.debug("Use LAMBDA_SCC optimization")
                    lambda_s_req_expr = True
                    current_scc = sccs.get(src_node)
                    if current_scc is not None and current_scc == sccs.get(target_node):
                        scc_ls_func = scc_lambda_functions[current_scc]
                        lambda_s_req_expr = \
                            [UGE, UGT][is_rejecting_target_node](scc_ls_func(next_combined_state_parameters),
                                                                 scc_ls_func(current_combined_state_parameters))
                else:
                    # using no lambda_s optimizations
                    if is_rejecting_target_node:
                        lambda_s_req_expr = (
                            lambda_s_function(next_combined_state_parameters) >
                            lambda_s_function(current_combined_state_parameters))
                    else:
                        lambda_s_req_expr = (
                            lambda_s_function(next_combined_state_parameters) >=
                            lambda_s_function(current_combined_state_parameters))

                extended_condition_expr = \
                    templ_func.delta_enabled_functions[i](
                        delta_enabled_function_parameters)

                extended_condition_expr = \
                    Or(extended_condition_expr,
                       And(current_local_state == next_local_state,
                           Not(templ_func.is_any_enabled(
                               [current_local_state] + \
                               current_local_input_arguments + \
                               [guard_set_call_expr]))))

                expr = ForAll(forall_arguments, Implies(
                    And(lambda_b_function(current_combined_state_parameters),
                        condition_expression,
                        extended_condition_expr),
                    And(lambda_b_function(next_combined_state_parameters),
                        lambda_s_req_expr)))

                logging.debug("\tADD  %s->%s, condition: %s, scheduling=%s",
                              src_node.name, target_node.name,
                              transition, sched_assignment)

                self.encoder_info.solver.add(expr)