def learn_vending_machine(visualize=False): sul = VendingMachineSUL() alphabet = sul.alphabet eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=50, walk_len=20) learned_model = run_Lstar(alphabet, sul, eq_oracle, automaton_type='mealy') # Example of a error sul = MealySUL(learned_model) print( sul.query(( 'add_coin_0.2', 'add_coin_0.5', 'add_coin_0.2', 'add_coin_0.2', 'add_coin_0.2', 'add_coin_0.2', ))) if visualize: visualize_automaton(learned_model, display_same_state_trans=False) return learned_model
def random_moore_example(alphabet_size, number_of_states, output_size=8): """ Generate a random Moore machine and learn it. :param alphabet_size: size of input alphabet :param number_of_states: number of states in generated Mealy machine :param output_size: size of the output :return: learned Moore machine """ alphabet = [*range(0, alphabet_size)] random_moore = generate_random_moore_machine(number_of_states, alphabet, output_alphabet=list( range(output_size))) sul_mealy = MooreSUL(random_moore) state_origin_eq_oracle = StatePrefixEqOracle(alphabet, sul_mealy, walks_per_state=15, walk_len=20) learned_moore = run_Lstar(alphabet, sul_mealy, state_origin_eq_oracle, cex_processing='rs', closing_strategy='single', automaton_type='moore', cache_and_non_det_check=True) return learned_moore
def extract_finite_state_transducer(rnn, input_alphabet, output_al, max_learning_rounds=10, formalism='mealy', print_level=2): assert formalism in ['mealy', 'moore'] outputs_2_ints = { integer: output for output, integer in tokenized_dict(output_al).items() } sul = RnnMealySUL(rnn, outputs_2_ints) eq_oracle = StatePrefixEqOracle(input_alphabet, sul, walks_per_state=150, walk_len=25) learned_automaton = run_Lstar(alphabet=input_alphabet, sul=sul, eq_oracle=eq_oracle, automaton_type=formalism, cache_and_non_det_check=False, max_learning_rounds=max_learning_rounds, suffix_closedness=True, print_level=print_level) return learned_automaton
def random_mealy_example(alphabet_size, number_of_states, output_size=8): """ Generate a random Mealy machine and learn it. :param alphabet_size: size of input alphabet :param number_of_states: number of states in generated Mealy machine :param output_size: size of the output :return: learned Mealy machine """ alphabet = [*range(0, alphabet_size)] random_mealy = generate_random_mealy_machine(number_of_states, alphabet, output_alphabet=list( range(output_size))) sul_mealy = MealySUL(random_mealy) random_walk_eq_oracle = RandomWalkEqOracle(alphabet, sul_mealy, 5000) state_origin_eq_oracle = StatePrefixEqOracle(alphabet, sul_mealy, walks_per_state=10, walk_len=15) learned_mealy = run_Lstar(alphabet, sul_mealy, random_walk_eq_oracle, automaton_type='mealy', cex_processing='longest_prefix') return learned_mealy
def train_and_extract_tomita(tomita_grammar, acc_stop=1., loss_stop=0.005, load=False): tomita_alphabet = ["0", "1"] if not load: rnn = train_RNN_on_tomita_grammar(tomita_grammar, acc_stop=acc_stop, loss_stop=loss_stop) else: rnn = train_RNN_on_tomita_grammar(tomita_grammar, train=False) rnn.load(f"RNN_Models/tomita_{tomita_grammar}.model") sul = RnnBinarySUL(rnn) alphabet = tomita_alphabet state_eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=1000, walk_len=5) dfa = run_Lstar(alphabet=alphabet, sul=sul, eq_oracle=state_eq_oracle, automaton_type='dfa', cache_and_non_det_check=True) save_automaton_to_file(dfa, f'LearnedAutomata/learned_tomita{tomita_grammar}') visualize_automaton(dfa)
def generate_data_based_on_characterization_set(automaton, automaton_type='mealy'): from aalpy.SULs import MealySUL, DfaSUL from aalpy.oracles import RandomWalkEqOracle from aalpy.learning_algs import run_Lstar # automaton = load_automaton_from_file(path_to_automaton, automaton_type) alphabet = automaton.get_input_alphabet() eq_oracle = RandomWalkEqOracle(alphabet, automaton, num_steps=5000, reset_prob=0.09, reset_after_cex=True) sul = DfaSUL(automaton) if automaton_type == 'dfa' else MealySUL(automaton) automaton, data = run_Lstar(alphabet, sul, eq_oracle, automaton_type=automaton_type, print_level=0, return_data=True, suffix_closedness=True) characterization_set = data['characterization set'] prefixes = [state.prefix for state in automaton.states] sequences = [p + e for e in characterization_set for p in prefixes] sequences.extend([ p + tuple([i]) + e for p in prefixes for i in automaton.get_input_alphabet() for e in characterization_set ]) # sequences.extend([p + e for p in sequences for e in characterization_set]) for _ in range(1): sequences.extend([ p + tuple([i]) + e for p in sequences for i in automaton.get_input_alphabet() for e in characterization_set ]) for _ in range(3): sequences.extend(sequences) labels = [sul.query(s)[-1] for s in sequences] sequences = [list(s) for s in sequences] input_al = automaton.get_input_alphabet() output_al = { output for state in automaton.states for output in state.output_fun.values() } input_dict = tokenized_dict(input_al) out_dict = tokenized_dict(output_al) train_seq = [seq_to_tokens(word, input_dict) for word in sequences] train_labels = [seq_to_tokens(word, out_dict) for word in labels] return train_seq, train_labels
def test_closing_strategies(self): dfa = get_Angluin_dfa() alphabet = dfa.get_input_alphabet() closing_strategies = ['shortest_first', 'longest_first', 'single'] automata_type = ['dfa', 'mealy', 'moore'] for automata in automata_type: for closing in closing_strategies: sul = DfaSUL(dfa) eq_oracle = RandomWalkEqOracle(alphabet, sul, 1000) learned_dfa = run_Lstar(alphabet, sul, eq_oracle, automaton_type=automata, closing_strategy=closing, cache_and_non_det_check=True, cex_processing='rs', print_level=0) is_eq = self.prove_equivalence(learned_dfa) if not is_eq: assert False assert True
def test_suffix_closedness(self): angluin_example = get_Angluin_dfa() alphabet = angluin_example.get_input_alphabet() suffix_closedness = [True, False] automata_type = ['dfa', 'mealy', 'moore'] for automata in automata_type: for s_closed in suffix_closedness: sul = DfaSUL(angluin_example) eq_oracle = RandomWalkEqOracle(alphabet, sul, 500) learned_dfa = run_Lstar(alphabet, sul, eq_oracle, automaton_type=automata, suffix_closedness=s_closed, cache_and_non_det_check=True, cex_processing='rs', print_level=0) is_eq = self.prove_equivalence(learned_dfa) if not is_eq: assert False assert True
def test_cex_processing(self): angluin_example = get_Angluin_dfa() alphabet = angluin_example.get_input_alphabet() cex_processing = [None, 'longest_prefix', 'rs'] automata_type = ['dfa', 'mealy', 'moore'] for automata in automata_type: for cex in cex_processing: sul = DfaSUL(angluin_example) eq_oracle = RandomWalkEqOracle(alphabet, sul, 500) learned_dfa = run_Lstar(alphabet, sul, eq_oracle, automaton_type=automata, cache_and_non_det_check=True, cex_processing=cex, print_level=0) is_eq = self.prove_equivalence(learned_dfa) if not is_eq: assert False assert True
def learn_diff_drive_robot(visualize=False): all_faults = [ 'left_faster', 'left_slower', 'left_stuck', 'right_faster', 'right_slower', 'right_stuck' ] wheel_inputs = [(0, 0), (0, 2), (2, 0), (2, 2), (0, -2), (2, -2), (-2, 0), (-2, 2), (-2, -2)] alphabet = list(wheel_inputs) alphabet.extend(all_faults) sul = StrongFaultRobot(upper_speed_limit=10) eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=20, walk_len=15) learned_model = run_Lstar(alphabet, sul, eq_oracle, automaton_type='mealy') if visualize: visualize_automaton(learned_model, display_same_state_trans=False) return learned_model
def learn_python_class(): """ Learn a Mealy machine where inputs are methods and arguments of the class that serves as SUL. :return: Mealy machine """ # class mqtt = MockMqttExample input_al = [ FunctionDecorator(mqtt.connect), FunctionDecorator(mqtt.disconnect), FunctionDecorator(mqtt.subscribe, 'topic'), FunctionDecorator(mqtt.unsubscribe, 'topic'), FunctionDecorator(mqtt.publish, 'topic') ] sul = PyClassSUL(mqtt) eq_oracle = StatePrefixEqOracle(input_al, sul, walks_per_state=20, walk_len=20) mealy = run_Lstar(input_al, sul, eq_oracle=eq_oracle, automaton_type='mealy', cache_and_non_det_check=True) visualize_automaton(mealy)
def test_all_configuration_combinations(self): angluin_example = get_Angluin_dfa() alphabet = angluin_example.get_input_alphabet() automata_type = ['dfa', 'mealy', 'moore'] closing_strategies = ['shortest_first', 'longest_first', 'single'] cex_processing = [None, 'longest_prefix', 'rs'] suffix_closedness = [True, False] caching = [True, False] for automata in automata_type: for closing in closing_strategies: for cex in cex_processing: for suffix in suffix_closedness: for cache in caching: sul = DfaSUL(angluin_example) random_walk_eq_oracle = RandomWalkEqOracle(alphabet, sul, 5000, reset_after_cex=True) state_origin_eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=10, walk_len=50) tran_cov_eq_oracle = TransitionFocusOracle(alphabet, sul, num_random_walks=200, walk_len=30, same_state_prob=0.3) w_method_eq_oracle = WMethodEqOracle(alphabet, sul, max_number_of_states=len(angluin_example.states)) random_W_method_eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=10, walk_len=50) bf_exploration_eq_oracle = BreadthFirstExplorationEqOracle(alphabet, sul, 3) random_word_eq_oracle = RandomWordEqOracle(alphabet, sul) cache_based_eq_oracle = CacheBasedEqOracle(alphabet, sul) kWayStateCoverageEqOracle = KWayStateCoverageEqOracle(alphabet, sul) oracles = [random_walk_eq_oracle, random_word_eq_oracle, random_W_method_eq_oracle, kWayStateCoverageEqOracle, cache_based_eq_oracle, bf_exploration_eq_oracle, tran_cov_eq_oracle, w_method_eq_oracle, state_origin_eq_oracle] if not cache: oracles.remove(cache_based_eq_oracle) for oracle in oracles: sul = DfaSUL(angluin_example) oracle.sul = sul learned_model = run_Lstar(alphabet, sul, oracle, automaton_type=automata, closing_strategy=closing, suffix_closedness=suffix, cache_and_non_det_check=cache, cex_processing=cex, print_level=0) is_eq = self.prove_equivalence(learned_model) if not is_eq: print(oracle, automata) assert False assert True
def learn_date_validator(): from aalpy.base import SUL from aalpy.utils import visualize_automaton, DateValidator from aalpy.oracles import StatePrefixEqOracle from aalpy.learning_algs import run_Lstar class DateSUL(SUL): """ An example implementation of a system under learning that can be used to learn the behavior of the date validator. """ def __init__(self): super().__init__() # DateValidator is a black-box class used for date string verification # The format of the dates is %d/%m/%Y' # Its method is_date_accepted returns True if date is accepted, False otherwise self.dv = DateValidator() self.string = "" def pre(self): # reset the string used for testing self.string = "" pass def post(self): pass def step(self, letter): # add the input to the current string if letter is not None: self.string += str(letter) # test if the current sting is accepted return self.dv.is_date_accepted(self.string) # instantiate the SUL sul = DateSUL() # define the input alphabet alphabet = list(range(0, 9)) + ['/'] # define a equivalence oracle eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=500, walk_len=15) # run the learning algorithm learned_model = run_Lstar(alphabet, sul, eq_oracle, automaton_type='dfa') # visualize the automaton visualize_automaton(learned_model)
def random_dfa_example(alphabet_size, number_of_states, num_accepting_states=1): """ Generate a random DFA machine and learn it. :param alphabet_size: size of the input alphabet :param number_of_states: number of states in the generated DFA :param num_accepting_states: number of accepting states :return: DFA """ assert num_accepting_states <= number_of_states alphabet = list(string.ascii_letters[:26])[:alphabet_size] random_dfa = generate_random_dfa(number_of_states, alphabet, num_accepting_states) # visualize_automaton(random_dfa, path='correct') sul_dfa = DfaSUL(random_dfa) # examples of various equivalence oracles random_walk_eq_oracle = RandomWalkEqOracle(alphabet, sul_dfa, 5000) state_origin_eq_oracle = StatePrefixEqOracle(alphabet, sul_dfa, walks_per_state=10, walk_len=50) tran_cov_eq_oracle = TransitionFocusOracle(alphabet, sul_dfa, num_random_walks=200, walk_len=30, same_state_prob=0.3) w_method_eq_oracle = WMethodEqOracle(alphabet, sul_dfa, max_number_of_states=number_of_states) random_W_method_eq_oracle = RandomWMethodEqOracle(alphabet, sul_dfa, walks_per_state=10, walk_len=50) bf_exploration_eq_oracle = BreadthFirstExplorationEqOracle( alphabet, sul_dfa, 5) random_word_eq_oracle = RandomWordEqOracle(alphabet, sul_dfa) cache_based_eq_oracle = CacheBasedEqOracle(alphabet, sul_dfa) user_based_eq_oracle = UserInputEqOracle(alphabet, sul_dfa) kWayStateCoverageEqOracle = KWayStateCoverageEqOracle(alphabet, sul_dfa) learned_dfa = run_Lstar(alphabet, sul_dfa, random_walk_eq_oracle, automaton_type='dfa', cache_and_non_det_check=False, cex_processing='rs') # visualize_automaton(learned_dfa) return learned_dfa
def learn_light_switch(visualize=False): alphabet = ['press', 'increase_delay', 'fix_delay'] # 'fix_delay' sul = LightSwitchSUL() eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=20, walk_len=15) learned_model = run_Lstar(alphabet, sul, eq_oracle, automaton_type='moore') if visualize: visualize_automaton(learned_model, display_same_state_trans=False) return learned_model
def train_and_extract_bp(path="TrainingDataAndAutomata/balanced()_1.txt", load=False): bp_alphabet = list(string.ascii_lowercase + "()") x, y = parse_data(path) x_train, y_train, x_test, y_test = preprocess_binary_classification_data( x, y, bp_alphabet) # CHANGE PARAMETERS OF THE RNN if you want rnn = RNNClassifier(bp_alphabet, output_dim=2, num_layers=2, hidden_dim=50, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test, batch_size=18, nn_type="GRU") data_index = path[-5] if not load: rnn.train(stop_acc=1., stop_epochs=3, verbose=True) rnn.save(f"RNN_Models/balanced_parentheses{data_index}.rnn") else: rnn.load(f"RNN_Models/balanced_parentheses{data_index}.rnn") sul = RnnBinarySUL(rnn) alphabet = bp_alphabet state_eq_oracle = TransitionFocusOracle(alphabet, sul, num_random_walks=500, walk_len=30, same_state_prob=0.3) dfa = run_Lstar(alphabet=alphabet, sul=sul, eq_oracle=state_eq_oracle, automaton_type='dfa', cache_and_non_det_check=False, max_learning_rounds=5) save_automaton_to_file( dfa, f'LearnedAutomata/balanced_parentheses{data_index}') return dfa
def learn_wind_turbine(visualize=False): alphabet = [ 'increase_speed', 'stop_turbine', 'unexpected_speed_increase', 'unexpected_slow_down' ] sul = TurbineSUL() eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=20, walk_len=15) learned_model = run_Lstar(alphabet, sul, eq_oracle, automaton_type='mealy') if visualize: visualize_automaton(learned_model, display_same_state_trans=False) return learned_model
def regex_example(regex, alphabet): """ Learn a regular expression. :param regex: regex to learn :param alphabet: alphabet of the regex :return: DFA representing the regex """ from aalpy.SULs import RegexSUL from aalpy.oracles import StatePrefixEqOracle from aalpy.learning_algs import run_Lstar regex_sul = RegexSUL(regex) eq_oracle = StatePrefixEqOracle(alphabet, regex_sul, walks_per_state=2000, walk_len=15) learned_regex = run_Lstar(alphabet, regex_sul, eq_oracle, automaton_type='dfa') return learned_regex
def learn_language_of_coffee_machine_error(visualize=False): sul = FaultyCoffeeMachineSULDFA() alphabet = ['coin', 'button'] eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=5000, walk_len=20) learned_model = run_Lstar(alphabet, sul, eq_oracle, automaton_type='dfa', cache_and_non_det_check=True) if visualize: visualize_automaton(learned_model, display_same_state_trans=True) return learned_model
def tomita_example(tomita_number): """ Pass a tomita function to this example and learn it. :param: function of the desired tomita grammar :rtype: Dfa :return DFA representing tomita grammar """ from aalpy.SULs import TomitaSUL from aalpy.learning_algs import run_Lstar from aalpy.oracles import StatePrefixEqOracle tomita_sul = TomitaSUL(tomita_number) alphabet = [0, 1] state_origin_eq_oracle = StatePrefixEqOracle(alphabet, tomita_sul, walks_per_state=50, walk_len=10) learned_dfa = run_Lstar(alphabet, tomita_sul, state_origin_eq_oracle, automaton_type='dfa', cache_and_non_det_check=True) return learned_dfa
def mqtt_example(): from aalpy.base import SUL class MQTT_SUL(SUL): def __init__(self): super().__init__() self.mqtt = MockMqttExample() def pre(self): self.mqtt.state = 'CONCLOSED' def post(self): self.mqtt.topics.clear() def step(self, letter): if letter == 'connect': return self.mqtt.connect() elif letter == 'disconnect': return self.mqtt.disconnect() elif letter == 'publish': return self.mqtt.publish(topic='test') elif letter == 'subscribe': return self.mqtt.subscribe(topic='test') else: return self.mqtt.unsubscribe(topic='test') sul = MQTT_SUL() input_al = ['connect', 'disconnect', 'publish', 'subscribe', 'unsubscribe'] eq_oracle = RandomWalkEqOracle(input_al, sul, num_steps=2000, reset_after_cex=True, reset_prob=0.15) mealy = run_Lstar(input_al, sul, eq_oracle=eq_oracle, automaton_type='mealy', cache_and_non_det_check=True, print_level=3) visualize_automaton(mealy)
def learn_gearbox(visualize=False): alphabet = [ 'press_clutch', 'release_clutch', 'put_in_reverse', 'increase_gear', 'decrease_gear' ] sul = GearBoxSUL() eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=2000, walk_len=15) learned_model = run_Lstar(alphabet, sul, eq_oracle, automaton_type='mealy') if visualize: visualize_automaton(learned_model, display_same_state_trans=False) return learned_model
def learn_coffee_machine_mbd(visualize=False): sul = FaultInjectedCoffeeMachineSUL() alphabet = ['coin', 'button', 'coin_double_value', 'button_no_effect'] eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=5000, walk_len=20) learned_model = run_Lstar(alphabet, sul, eq_oracle, automaton_type='mealy', cache_and_non_det_check=False) if visualize: visualize_automaton(learned_model, display_same_state_trans=True) return learned_model
def big_input_alphabet_example(input_alphabet_size=1000, automaton_depth=4): """ Small example where input alphabet can be huge and outputs are just true and false (DFA). Args: input_alphabet_size: size of input alphabet automaton_depth: depth of alternating True/False paths in final automaton Returns: learned model """ from aalpy.base import SUL from aalpy.learning_algs import run_Lstar from aalpy.oracles import RandomWMethodEqOracle class alternatingSUL(SUL): def __init__(self): super().__init__() self.counter = 0 def pre(self): self.counter = 0 def post(self): pass def step(self, letter): if letter is None: return False out = letter % 2 self.counter = min(self.counter + 1, automaton_depth) if self.counter % 2 == 1: return not out return out input_al = list(range(input_alphabet_size)) sul = alternatingSUL() eq_oracle = RandomWMethodEqOracle(input_al, sul) model = run_Lstar(input_al, sul, eq_oracle, 'dfa', cache_and_non_det_check=False) return model
def test_wmethod_oracle_with_lstar(self): real = self.generate_real_automata() hyp = self.generate_hypothesis() # visualize_automaton(real) # visualize_automaton(hyp) assert real.get_input_alphabet() == ["x", "y"] assert hyp.get_input_alphabet() == ["x", "y"] assert len(real.states) == 6 assert len(hyp.states) == 2 alphabet = real.get_input_alphabet() oracle = WMethodEqOracle(alphabet, MooreSUL(real), len(real.states) + 1, shuffle_test_set=False) lstar_hyp = run_Lstar(alphabet, MooreSUL(real), oracle, "moore") # print(lstar_hyp) # visualize_automaton(lstar_hyp) assert ( len(lstar_hyp.states) == 6 ), f"Expected {6} states got {len(lstar_hyp.states)} in lstar hypothesis"
def angluin_seminal_example(): """ Example automaton from Anguin's seminal paper. :return: learned DFA """ dfa = get_Angluin_dfa() alph = dfa.get_input_alphabet() sul = DfaSUL(dfa) eq_oracle = RandomWalkEqOracle(alph, sul, 500) learned_dfa = run_Lstar(alph, sul, eq_oracle, automaton_type='dfa', cache_and_non_det_check=True, cex_processing=None, print_level=3) return learned_dfa
def learn_crossroad(visualize=False): sul = CrossroadSUL() alphabet = sul.full_alphabet eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=10, walk_len=30) learned_model = run_Lstar(alphabet, sul, eq_oracle, automaton_type='mealy', cache_and_non_det_check=False, max_learning_rounds=10) if visualize: visualize_automaton(learned_model, display_same_state_trans=False, file_type="dot") return learned_model
def test_eq_oracles(self): angluin_example = get_Angluin_dfa() alphabet = angluin_example.get_input_alphabet() automata_type = ['dfa', 'mealy', 'moore'] for automata in automata_type: sul = DfaSUL(angluin_example) random_walk_eq_oracle = RandomWalkEqOracle(alphabet, sul, 5000, reset_after_cex=True) state_origin_eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=10, walk_len=50) tran_cov_eq_oracle = TransitionFocusOracle(alphabet, sul, num_random_walks=200, walk_len=30, same_state_prob=0.3) w_method_eq_oracle = WMethodEqOracle(alphabet, sul, max_number_of_states=len(angluin_example.states)) random_W_method_eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=10, walk_len=50) bf_exploration_eq_oracle = BreadthFirstExplorationEqOracle(alphabet, sul, 3) random_word_eq_oracle = RandomWordEqOracle(alphabet, sul) cache_based_eq_oracle = CacheBasedEqOracle(alphabet, sul) kWayStateCoverageEqOracle = KWayStateCoverageEqOracle(alphabet, sul) oracles = [random_walk_eq_oracle, random_word_eq_oracle, random_W_method_eq_oracle, w_method_eq_oracle, kWayStateCoverageEqOracle, cache_based_eq_oracle, bf_exploration_eq_oracle, tran_cov_eq_oracle, state_origin_eq_oracle] for oracle in oracles: sul = DfaSUL(angluin_example) oracle.sul = sul learned_model = run_Lstar(alphabet, sul, oracle, automaton_type=automata, cache_and_non_det_check=True, cex_processing=None, print_level=0) is_eq = self.prove_equivalence(learned_model) if not is_eq: print(oracle, automata) assert False assert True
def to_mdp(): eq_oracle = RandomWMethodEqOracle(alphabet, model_sul) learned_model = run_Lstar(alphabet, model_sul, eq_oracle, 'moore') # CC2640R2-no-feature-req.dot # {'mtu_req', 'pairing_req',} have 0.3 percent chance of looping to initial state moore_mdp_state_map = dict() initial_mdp_state = None for state in learned_model.states: mdp_state = MdpState(state.state_id, state.output) moore_mdp_state_map[state.prefix] = mdp_state if not state.prefix: initial_mdp_state = mdp_state # moore_mdp_state_map['sink'] = MdpState('sink', 'NO_RESPONSE') assert initial_mdp_state for state in learned_model.states: mdp_state = moore_mdp_state_map[state.prefix] state_num = int(mdp_state.state_id[1:]) for i in alphabet: reached_moore = state.transitions[i].prefix # if i in {'pairing_req', 'mtu_req'} and mdp_state.output != moore_mdp_state_map[reached_moore].output: if state_num % 2 == 0 and mdp_state.output != moore_mdp_state_map[ reached_moore].output: mdp_state.transitions[i].append((mdp_state, 0.2)) mdp_state.transitions[i].append( (moore_mdp_state_map[reached_moore], 0.8)) else: mdp_state.transitions[i].append( (moore_mdp_state_map[reached_moore], 1.)) mdp = Mdp(initial_mdp_state, list(moore_mdp_state_map.values())) return mdp
for _ in range(repeat): dfa = generate_random_dfa(num_states, alphabet=alphabet, num_accepting_states=num_states // 2) sul = DfaSUL(dfa) # eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=5, walk_len=40) eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=9000, reset_prob=0.09) _, data = run_Lstar(alphabet, sul, eq_oracle, cex_processing=cex_processing, cache_and_non_det_check=False, return_data=True, automaton_type='dfa') learning_time_dfa.append(data['learning_time']) total_time_dfa.append(data['total_time']) del sul del eq_oracle del dfa mealy = generate_random_mealy_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet) sul_mealy = MealySUL(mealy)