Beispiel #1
0
def _parse_part(acacia_part_text: str):
    lines = [l.strip() for l in acacia_part_text.split('\n') if l.strip()]

    input_lines = lfilter(lambda l: l.startswith('.inputs'), lines)
    output_lines = lfilter(lambda l: l.startswith('.outputs'), lines)

    input_signals = _parse_signals_from_lines(input_lines)
    output_signals = _parse_signals_from_lines(output_lines)

    return input_signals, output_signals
Beispiel #2
0
def _parse_part(acacia_part_text: str):
    # TODO: stupid parser, but this part/ltl format is stupid anyway
    lines = [l.strip() for l in acacia_part_text.split("\n") if l.strip()]

    input_lines = lfilter(lambda l: l.startswith(".inputs"), lines)
    output_lines = lfilter(lambda l: l.startswith(".outputs"), lines)

    input_signals = _parse_signals_from_lines(input_lines)
    output_signals = _parse_signals_from_lines(output_lines)

    return input_signals, output_signals
Beispiel #3
0
def _parse_part(acacia_part_text: str):
    #TODO: stupid parser, but this part/ltl format is stupid anyway
    lines = [l.strip() for l in acacia_part_text.split('\n') if l.strip()]

    input_lines = lfilter(lambda l: l.startswith('.inputs'), lines)
    output_lines = lfilter(lambda l: l.startswith('.outputs'), lines)

    input_signals = _parse_signals_from_lines(input_lines)
    output_signals = _parse_signals_from_lines(output_lines)

    return input_signals, output_signals
Beispiel #4
0
def _add_spec_unit_if_necessary(acacia_ltl_text):
    lines = [l.strip() for l in acacia_ltl_text.split("\n") if l.strip()]
    non_comment_lines = lfilter(lambda l: not l.startswith("#"), lines)

    if non_comment_lines and not non_comment_lines[0].startswith("["):
        lines = ["[spec_unit None]"] + lines

    return "\n".join(lines)
Beispiel #5
0
def _add_spec_unit_if_necessary(acacia_ltl_text):
    lines = [l.strip() for l in acacia_ltl_text.split('\n') if l.strip()]
    non_comment_lines = lfilter(lambda l: not l.startswith('#'), lines)

    if non_comment_lines and not non_comment_lines[0].startswith('['):
        lines = ['[spec_unit None]'] + lines

    return '\n'.join(lines)
Beispiel #6
0
def _add_spec_unit_if_necessary(acacia_ltl_text):
    lines = [l.strip() for l in acacia_ltl_text.split('\n') if l.strip()]
    non_comment_lines = lfilter(lambda l: not l.startswith('#'), lines)

    if non_comment_lines and not non_comment_lines[0].startswith('['):
        lines = ['[spec_unit None]'] + lines

    return '\n'.join(lines)
Beispiel #7
0
 def _assert_no_errors(self, lines):
     real_error_lines = lfilter(lambda l: 'error' in l and 'model is not available' not in l, lines)
     if real_error_lines:
         msg = 'z3 found errors in query. Last piece of query: \n' \
               '{query}\n' \
               '-----------------------\n' \
               'z3 error messages:\n' \
               '{error}'.format(query=str(self._query_storage),
                                error='\n'.join(real_error_lines))
         assert 0, msg
Beispiel #8
0
 def _assert_no_errors(self, lines):
     real_error_lines = lfilter(
         lambda l: 'error' in l and 'model is not available' not in l,
         lines)
     if real_error_lines:
         msg = 'z3 found errors in query. Last piece of query: \n' \
               '{query}\n' \
               '-----------------------\n' \
               'z3 error messages:\n' \
               '{error}'.format(query=str(self._query_storage),
                                error='\n'.join(real_error_lines))
         assert 0, msg
def normalize_aht_transitions(transitions:Iterable[AHTTransition]) \
        -> Set[AHTTransition]:
    # NB: assumption: the automaton that we normalize is non-det (since we do dst_expr1 _&_ dst_expr2)
    for t in transitions:  # type: AHTTransition
        assert t.src.is_existential, "violates the method assumption"

    transitions = set(transitions)  # type: Set[AHTTransition]

    nodes = set(map(lambda t: t.src, transitions))
    for n in nodes:
        while True:
            node_transitions = lfilter(lambda t: t.src == n, transitions)
            t1, t2 = pick_two_intersecting_transitions(node_transitions)
            if t1 is None:
                break

            l1, l2 = t1.state_label, t2.state_label
            t_split = []  # type: List[AHTTransition]

            l1_l2 = common_label(l1, l2)
            t_l1_l2 = AHTTransition(t1.src, l1_l2, t1.dst_expr
                                    | t2.dst_expr)  # todo: non-repeating OR
            t_split.append(t_l1_l2)

            nl2_labels = negate_label(l2)
            for nl2 in nl2_labels:
                l1_nl2 = common_label(l1, nl2)
                if l1_nl2 is not None:
                    t_split.append(AHTTransition(t1.src, l1_nl2, t1.dst_expr))

            nl1_labels = negate_label(l1)
            for nl1 in nl1_labels:
                nl1_l2 = common_label(nl1, l2)
                if nl1_l2 is not None:
                    t_split.append(AHTTransition(t2.src, nl1_l2, t2.dst_expr))

            # NB: we can remove t1 and t2, since the newly generated transitions cover them
            transitions.remove(t1)
            transitions.remove(t2)
            # In contrast to the NBW normalization,
            # we simply call `update` since `transitions` is a _set_ of objects like (src, label, dst_expr),
            # and thus we won't lose transitions with the same `src, label` but different `dst_expr`
            transitions.update(t_split)
        # end of while True
    # end of for n in nodes

    return transitions
def normalize_nbw_transitions(node:NBWNode,
                              transitions:Dict[Label, Set[Tuple[bool, NBWNode]]])\
        -> Dict[Label, Set[Tuple[bool,NBWNode]]]:
    while True:
        # pick two intersecting 'transitions':
        all_intersecting_label_pairs = lfilter(
            lambda l_l: common_label(l_l[0], l_l[1]) is not None,
            combinations(transitions.keys(), 2))
        if not all_intersecting_label_pairs:
            break
        l1, l2 = all_intersecting_label_pairs[0]

        t_split = []  # type: List[Tuple[Label, Set[Tuple[bool, NBWNode]]]]

        t_split.append((common_label(l1,
                                     l2), transitions[l1] | transitions[l2]))

        nl2_labels = negate_label(l2)
        for nl2 in nl2_labels:
            l1_nl2 = common_label(l1, nl2)
            if l1_nl2 is not None:
                t_split.append((l1_nl2, transitions[l1]))

        nl1_labels = negate_label(l1)
        for nl1 in nl1_labels:
            nl1_l2 = common_label(nl1, l2)
            if nl1_l2 is not None:
                t_split.append((nl1_l2, transitions[l2]))

        # NB: we can remove t1 and t2, since the newly generated transitions cover them
        del transitions[l1]
        del transitions[l2]
        # Careful, we may have other transitions with exactly the same label!
        # => we do not replace but rather 'update'
        for (new_lbl, new_transitions) in t_split:
            if new_lbl in transitions:
                transitions[new_lbl].update(new_transitions)
            else:
                transitions[new_lbl] = new_transitions
        # this one is wrong!
        # transitions.update(t_split)

        node._transitions = transitions  # FIXME: fix access to the private member

    return transitions
Beispiel #11
0
    def _encode_state(self, q: Node, m: int) -> List[str]:
        q_transitions = lfilter(lambda t: t.src == q, self.aht_transitions)

        # Encoding:
        # - if q is existential, then one of the transitions must fire:
        #
        #     reach(q,t) ->
        #                OR{state_label \in q_transitions}: sys_out=state_label & reach(q',t')
        #
        # - if q is universal, then all transitions of that system output should fire
        #
        #     reach(q,t) ->
        #                AND{state_label \in q_transitions}: sys_out=state_label -> reach(q',t')
        #

        # build s_premise `reach(q,t)`
        s_m = smt_name_m(m)
        s_q = smt_name_q(q)
        s_premise = call_func(self.reach_func_desc, {
            ARG_MODEL_STATE: s_m,
            ARG_A_STATE: s_q
        })

        # build s_conclusion `exists`
        s_conclusion_out_sExpr_pairs = set()  # type: Set[Tuple[str, str]]
        for t in q_transitions:  # type: Transition
            s_t_state_label = smt_out(s_m, t.state_label, self.inputs,
                                      self.descr_by_output)
            s_dst_expr = self._translate_dst_expr_into_smt(t.dst_expr, q, m)
            s_conclusion_out_sExpr_pairs.add((s_t_state_label, s_dst_expr))

        if q.is_existential:
            s_conclusion_elements = lmap(lambda sce: op_and(sce),
                                         s_conclusion_out_sExpr_pairs)
        else:
            s_conclusion_elements = lmap(
                lambda sce: op_implies(sce[0], sce[1]),
                s_conclusion_out_sExpr_pairs)

        s_conclusion = (op_and, op_or)[q.is_existential](s_conclusion_elements)

        s_assertion = op_implies(s_premise, s_conclusion)

        return [assertion(s_assertion)]
Beispiel #12
0
def atm_to_verilog(atm: Automaton, sys_inputs: Iterable[Signal],
                   sys_outputs: Iterable[Signal], module_name: str,
                   bad_out_name: str) -> str:
    assert len(lfilter(lambda n: is_final_sink(n), atm.nodes)) == 1,\
        'for now I support only one bad state which must be a sink'
    sys_inputs = set(sys_inputs)
    sys_outputs = set(sys_outputs)

    verilog_by_sig = {s: 'controllable_' + s.name for s in sys_outputs}
    verilog_by_sig.update({s: s.name for s in sys_inputs})

    verilog_by_node = {q: '__q' + q.name for q in atm.nodes}
    sink_q = lfilter(lambda n: is_final_sink(n), atm.nodes)[0]

    module_inputs = list(
        chain(map(lambda i: i.name, sys_inputs),
              map(lambda o: 'controllable_' + o.name, sys_outputs)))

    s = StrAwareList()
    s += 'module {module_name}({inputs}, {output});'.format(
        module_name=module_name,
        inputs=', '.join(module_inputs),
        output=bad_out_name)
    s.newline()

    s += '\n'.join('input %s;' % i for i in module_inputs)
    s.newline()

    s += 'output %s;' % bad_out_name
    s.newline()

    s += '\n'.join('reg %s;' % vq for vq in verilog_by_node.values())
    s.newline()

    s += 'wire %s;' % bad_out_name
    s += 'assign {bad} = {sink_q};'.format(bad=bad_out_name,
                                           sink_q=verilog_by_node[sink_q])
    s.newline()

    s += 'initial begin'
    s += '\n'.join('%s = 1;' % verilog_by_node[iq] for iq in atm.init_nodes)
    s += '\n'.join('%s = 0;' % verilog_by_node[q]
                   for q in atm.nodes - atm.init_nodes)
    s += 'end'
    s.newline()

    s += 'always@($global_clock)'
    s += 'begin'

    def lbl_to_v(lbl: Label) -> str:
        return ' && '.join(
            ('!%s', '%s')[lbl[s]] % verilog_by_sig[s] for s in lbl) or '1'

    for q in atm.nodes:
        incoming_edges = incoming_transitions(q, atm)
        if not incoming_edges:
            update_expr = '0'
        else:
            update_expr = ' || '.join('{src} && {lbl}'.format(
                src=verilog_by_node[edge[0]], lbl=lbl_to_v(edge[1]))
                                      for edge in incoming_edges)
        s += '  {q} <= {update_expr};'.format(q=verilog_by_node[q],
                                              update_expr=update_expr)
    s += 'end'
    s += 'endmodule'
    return s.to_str()
Beispiel #13
0
def op_or(arguments):
    filtered_arguments = lfilter(lambda a: a != false(), arguments)
    if not filtered_arguments:
        return false()
    return make_and_or_xor(filtered_arguments, 'or')
Beispiel #14
0
def op_and(arguments):
    filtered_arguments = lfilter(lambda a: a != true(), arguments)
    if not filtered_arguments:
        return true()
    return make_and_or_xor(filtered_arguments, 'and')