def __init__(self, teacher: Teacher): super().__init__(teacher) # Observation table (S, E, T) # NotifierSets raise a flag once they're modified # This is used to avoid repeating expensive computations self.S = NotifierSet() self.E = NotifierSet() # S starts with the empty string self.S.add(tuple()) self.T = {} # Alphabet A self.A = set([(x,) for x in teacher.get_alphabet()]) # at the start, E = A for a in self.A: self.E.add(a) # Don't redo expensive computations unless necessary self._mem = {} self._watch = {} # Checkpoints? self._save_checkpoints = False self._checkpointname = None self._checkpointdir = None
def __init__(self, teacher: Teacher): super().__init__(teacher) # # # Access sequences S + state bookkeeping # self.S = {tuple(): State("s0")} # Discrimination tree self.DTree = DTree(self.S[tuple()]) # Query cache self.T = {} # Alphabet A self.A = set([(x, ) for x in teacher.get_alphabet()])
def __init__(self, teacher: Teacher): super().__init__(teacher) # Observation table (S, E, T) self.S = set() self.E = set() self.S.add(tuple()) self.E.add(tuple()) self.T = {} # Alphabet A self.A = set([(x, ) for x in teacher.get_alphabet()])
horizon=12, stop_on={'invalid_input'}, stop_on_startswith={'error'}, order_type='ce count')) # Store found counterexamples def onct(ctex): ct.add(ctex) ct.save(f'counterexamples_{problem}_nomutation.p') eqc.onCounterexample(onct) # Set up the teacher, with the system under learning and the equivalence checker teacher = Teacher(sul, eqc) # Set up the learner who only talks to the teacher learner = TTTMealyLearner(teacher) # learner.enable_checkpoints('checkpoints3') # learner.load_checkpoint('/home/tom/projects/lstar/experiments/counterexampletracker/checkpoints3/cZsmSu/2020-05-06_20:00:33:790987') # Get the learners hypothesis hyp = learner.run( show_intermediate=False, render_options={'ignore_self_edges': ['error', 'invalid']}, on_hypothesis=lambda x: check_result( x, f'../../rers/TrainingSeqReachRers2019/{problem}/reachability-solution-{problem}.csv' )) print(
s1 = State('q0') s2 = State('q1') s3 = State('q2') s1.add_edge('a', s2) s1.add_edge('b', s1) s2.add_edge('b', s1) s2.add_edge('a', s3) s3.add_edge('a', s3) s3.add_edge('b', s1) sm = DFA(s1, [s3]) # Or use a regex to define the state machine sm = RegexMachine('(ab)+') # We are using the brute force equivalence checker eqc = BFEquivalenceChecker(sm, max_depth=10) # Set up the teacher, with the system under learning and the equivalence checker teacher = Teacher(sm, eqc) # Set up the learner who only talks to the teacher learner = DFALearner(teacher) # Get the learners hypothesis hyp = learner.run(show_intermediate=True) hyp.render_graph(tempfile.mktemp('.gv'))
from util.dotloader import load_mealy_dot path = "/home/tom/projects/lstar/rers/industrial/m54.dot" mm = load_mealy_dot(path) mm.render_graph(tempfile.mktemp('.gv'), render_options={'ignore_self_edges': ['error', 'invalid']},) # Use the W method equivalence checker eqc = SmartWmethodEquivalenceChecker(mm, m=len(mm.get_states()) + 1, stop_on={'error'}) eqc.onCounterexample(lambda x: print('Counterexample:', x)) teacher = Teacher(mm, eqc) # We are learning a mealy machine learner = MealyLearner(teacher) hyp = learner.run( show_intermediate=False, render_options={'ignore_self_edges': ['error', 'invalid']}, ) hyp.render_graph(tempfile.mktemp('.gv')) assert len(hyp.get_states()) == len(mm.get_states()) print("done")
other_state.name, label=f'{action}/{output}') else: g.edge(cur_state.name, other_state.name, label=f'{action}') g.node(lastname, label=str(min(laststeps))) g.view() if __name__ == "__main__": np.random.seed(1337) tspprob = TSPProblem().make_random(4) tsp = TSPSul(tspprob, 0) tsp.calc_expected_future_len([], 1000) eqc = BFEquivalenceChecker(tsp, max_depth=6) teacher = Teacher(tsp, eqc) learner = TSPLearner(teacher, tsp=tsp) #learner = MealyLearner(teacher) hyp = learner.run(show_intermediate=True) #filter_errs(hyp) cleanup(hyp) #raw(hyp, tempfile.mktemp('.gv')) hyp.render_graph(tempfile.mktemp('.gv')) # tspprob = TSPProblem().make_random(5) # tsp = TSPSul(tspprob, 0)
[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], ]) world = World(map, rewards, endstates=[(1, 3)], initial_pos=(7, 3)) world.show() # We are using the brute force equivalence checker eqc = BFEquivalenceChecker(world, max_depth=12) # Set up the teacher, with the system under learning and the equivalence checker teacher = Teacher(world, eqc) # Set up the learner who only talks to the teacher learner = MealyLearner(teacher) # Get the learners hypothesis hyp = learner.run(show_intermediate=True) hyp.render_graph(tempfile.mktemp('.gv')) value_iterate(hyp, 0.9)
s3 = State('q2') s1.add_edge('a', s2) s1.add_edge('b', s1) s2.add_edge('b', s3) s2.add_edge('a', s2) s3.add_edge('a', s3) s3.add_edge('b', s3) dfa = DFA(s1, [s3]) # We are using the brute force equivalence checker eqc = BFEquivalenceChecker(dfa) # Set up the teacher, with the system under learning and the equivalence checker teacher = Teacher(dfa, eqc) # Set up the learner who only talks to the teacher learner = DFALearner(teacher) # First step ----------------------------------- while not (learner._is_closed() and learner._is_consistent()): learner.step() hyp1 = learner.build_dfa() hyp1.render_graph('step1', format='png') # Find counterexample equivalent, counterexample = learner.teacher.equivalence_query(hyp1) print('COUNTEREXAMPLE', counterexample)