def parse_acacia_and_build_expr(ltl_text:str, part_text:str, ltl_to_atm:LTLToAutomaton, strengthen_lvl) -> Spec: """ Note: parses and strengthens the formula. """ input_signals, output_signals, data_by_unit = acacia_parser.parse(ltl_text, part_text) assert data_by_unit is not None ltl_properties = [] for (unit_name, unit_data) in data_by_unit.items(): assumptions = unit_data[0] guarantees = unit_data[1] a_safeties, a_livenesses = split_safety_liveness(assumptions, ltl_to_atm) g_safeties, g_livenesses = split_safety_liveness(guarantees, ltl_to_atm) if strengthen_lvl == 2: ltl_property = strengthen2([], [], a_safeties, g_safeties, a_livenesses, g_livenesses) elif strengthen_lvl == 1: ltl_property = strengthen1([], [], a_safeties, g_safeties, a_livenesses, g_livenesses) else: ltl_property = and_expr(list(a_safeties) + list(a_livenesses))\ >> and_expr(list(g_safeties) + list(g_livenesses)) ltl_properties.append(ltl_property) return Spec(input_signals, output_signals, and_expr(ltl_properties))
def check_unreal(ltl_text: str, part_text: str, is_moore: bool, ltl_to_atm: LTLToAutomaton, min_k, max_k, opt_level: int) -> str or None: spec = parse_acacia_and_build_expr(ltl_text, part_text, ltl_to_atm, opt_level) assert BAD_OUT_NAME not in (spec.inputs | spec.outputs), 'name collision' neg_spec = Spec(spec.outputs, spec.inputs, ~spec.formula) neg_is_moore = not is_moore return _check_real(neg_spec, neg_is_moore, ltl_to_atm, min_k, max_k)
def check_real(spec: Spec, min_size, max_size, ltl_to_atm: LTLToAutomaton, solver_factory: Z3SolverFactory, use_direct_encoding: bool) -> LTS or None: shared_aht, dstFormPropMgr = SharedAHT(), DstFormulaPropMgr() # normalize formula (negations appear only in front of basic propositions) spec.formula = NNFNormalizer().dispatch(spec.formula) logging.info("CTL* formula size: %i", expr_size(spec.formula)) if use_direct_encoding: top_formula, atm_by_p, UCWs = automize_ctl(spec.formula, ltl_to_atm) logging.info("Total number of states in sub-automata: %i", sum([len(a.nodes) for a in atm_by_p.values()])) for p, atm in atm_by_p.items(): logging.debug(str(p) + ', atm: \n' + automaton_to_dot.to_dot(atm)) encoder = CTLEncoderDirect( top_formula, atm_by_p, UCWs, build_tau_desc(spec.inputs), spec.inputs, dict((o, build_output_desc(o, True, spec.inputs)) for o in spec.outputs), range(max_size)) else: aht_automaton = ctl2aht.ctl2aht(spec, ltl_to_atm, shared_aht, dstFormPropMgr) aht_nodes, aht_transitions = get_reachable_from( aht_automaton.init_node, shared_aht.transitions, dstFormPropMgr) logging.info('The AHT automaton size (nodes/transitions) is: %i/%i' % (len(aht_nodes), len(aht_transitions))) if not aht_transitions: logging.info('AHT is empty => the spec is unrealizable!') return None if logging.getLogger().isEnabledFor( logging.DEBUG): # aht2dot takes long time logging.debug('AHT automaton (dot) is...\n') logging.debug( aht2dot.convert(aht_automaton, shared_aht, dstFormPropMgr)) encoder = CTLEncoderViaAHT( aht_automaton, aht_transitions, dstFormPropMgr, build_tau_desc(spec.inputs), spec.inputs, dict((o, build_output_desc(o, True, spec.inputs)) for o in spec.outputs), range(max_size)) model = model_searcher.search(min_size, max_size, encoder, solver_factory.create()) return model
def test_ctl2aht(self): """ 'Crash' test: no assertions fails """ rs, r = sig_prop('r') cs, c = sig_prop('c') gs, g = sig_prop('g') formulas = [ AG(r >> F(g)), AG(r >> F(g)) & EGF(~g), AG(r >> F(g)) & EFG(g) & AFEG(~g), AG(r >> X(g & X(g & EGF(g) & EGF(~g)))), # AG(r >> F(g | c) & EGF(g) & EF(c) & EF(~c) & EGF(r)), # TODO: why those tests are so slow? AG(EFG(r & g)), # AG(EFG(r & g)) | AFEG(g), AG(~r >> F(~g)) & AG(~r >> F(~g)) & EFG(g) & AFEG(~g), A(r), A(r) & A(~r), g, Bool(False), Bool(True) ] i = 0 for f in formulas: i += 1 print('checking: ' + str(f)) dstFormPropMgr = DstFormulaPropMgr() shared_aht = SharedAHT() spec = Spec([rs, cs], [gs], f) ctl2aht(spec, self.ltl2ba, shared_aht, dstFormPropMgr) with tempfile.NamedTemporaryFile(delete=False) as dot_file: dot = aht2dot.convert(None, shared_aht, dstFormPropMgr) dot_file.write(dot.encode()) # with open('/tmp/ttmmpp%i.dot'%i, 'w') as output: # output.write(dot) os.remove(dot_file.name)
# EF(o1&o2)) # spec3: # diff: it has AGEF instead of E(GF..GF..) # formula = AG((i1&i2) >> F(o1 & o2)) & \ # AG((i1&i2&o1&o2) >> X(o1&o2)) & \ # AG(EF(o1&~o2) & \ # EF(~o1&o2) & \ # EF(~o1&~o2) & \ # EF(o1&o2)) # spec2: # it says that a single path should raise all combinations of o1 o2 # (rather than allowing different paths to do so) # formula = AG((i1&i2) >> F(o1 & o2)) & \ # AG((i1&i2&o1&o2) >> X(o1&o2)) & \ # E(GF(o1&~o2) & \ # GF(~o1&o2) & \ # GF(~o1&~o2) & \ # GF(o1&o2)) # spec1 (it has model 1) # formula = AG((i1&i2) >> F(o1 & o2)) & \ # AG((i1&i2&o1&o2) >> X(o1&o2)) & \ # EGF(o1&~o2) & \ # EGF(~o1&o2) & \ # EGF(~o1&~o2) & \ # EGF(o1&o2) spec = Spec(inputs, outputs, formula)
def convert(spec: Spec, nof_IDs: int or None, ltl_to_atm: LTLToAutomaton) -> Spec: # (E.g. EG¬g ∧ AGEF¬g ∧ EFg) # The algorithm is: # collect all E-subformulas # for each unique E-subformula: # introduce new v-variable if not yet introduced # if nof_IDs is not given: # nof_IDs = the number of states in all existential automata # # nof_IDs defines the range of every v-variable # introduce nof_IDs d-variables (each is a valuation of all inputs) # for each unique A-subformula: # introduce p-variable # for each unique A-subformula: # create an LTL formula (0) # for each unique E-subformula: # create an LTL formula (1) # create the top-level formula (2) # create the conjunction (0) & (1) & (2) # //(nope, we don't do this) inline back A-subformulas (replace their p by the path formulas (without 'A')) # replace existential propositions by 'v != 0' # return the result spec = Spec(spec.inputs, spec.outputs, NNFNormalizer().dispatch(spec.formula)) atomizer = CTLAtomizerVisitor('__p') top_formula = atomizer.dispatch(spec.formula) exist_props = [p for (p, f) in atomizer.f_by_p.items() if f.name == 'E'] if nof_IDs is None: atm_by_exist_p = dict( (p, ltl_to_atm.convert(atomizer.f_by_p[p].arg, '__q_' + p.arg1.name)) for p in exist_props) nof_IDs = sum(len(atm.nodes) for atm in atm_by_exist_p.values()) logging.info("k = %i", nof_IDs) v_bits_by_exist_p = dict(( p, tuple( reversed([ Signal('__v%s_%i' % (p.arg1.name.replace('_', ''), i)) for i in range(ceil(log(nof_IDs + 1, 2)) or 1) ])) # NB: +1 to account for 0 ) for p in exist_props) # type: Dict[BinOp, SignalsTuple] ordered_inputs = tuple(spec.inputs) # type: SignalsTuple dTuple_by_id = dict( (j, tuple(Signal('__d%i_%s' % (j, i)) for i in ordered_inputs)) for j in range(1, nof_IDs + 1)) # type: Dict[int, SignalsTuple] univ_props_to_inline = set(atomizer.f_by_p.keys()) - \ _calc_nested_props(atomizer.f_by_p) - \ set(exist_props) ltl_formula = top_formula # such props can only be mentioned in the top_formula ltl_formula = _inline_univ_p( ltl_formula, dict( (p, atomizer.f_by_p[p]) for p in univ_props_to_inline)) ltl_formula &= _conjunction( _create_LTL_for_A_formula(p, atomizer.f_by_p[p].arg) for p in set(atomizer.f_by_p) - set(exist_props) - univ_props_to_inline) ltl_formula &= _conjunction( _create_LTL_for_E_formula(v_bits_by_exist_p[p], atomizer.f_by_p[p].arg, dTuple_by_id, ordered_inputs) for p in exist_props) ltl_formula = _replace_exist_propositions(ltl_formula, v_bits_by_exist_p, nof_IDs) logging.debug("exist propositions: \n%s", pformat([ep.arg1 for ep in v_bits_by_exist_p])) new_outputs = list(chain(*v_bits_by_exist_p.values())) + \ list(chain(*dTuple_by_id.values())) + \ list(p.arg1 for p in set(atomizer.f_by_p) - set(exist_props) - univ_props_to_inline) spec = Spec(spec.inputs, set(new_outputs) | spec.outputs, ltl_formula) return spec