def random_mdp_example(num_states, input_len, num_outputs, n_c=20, n_resample=1000, min_rounds=10, max_rounds=1000): """ Generate and learn random MDP. :param num_states: number of states in generated MDP :param input_len: size of input alphabet :param n_c: cutoff for a state to be considered complete :param n_resample: resampling size :param num_outputs: size of output alphabet :param min_rounds: minimum number of learning rounds :param max_rounds: maximum number of learning rounds :return: learned MDP """ from aalpy.SULs import MdpSUL from aalpy.oracles import RandomWalkEqOracle from aalpy.learning_algs import run_stochastic_Lstar from aalpy.utils import generate_random_mdp mdp, input_alphabet = generate_random_mdp(num_states, input_len, num_outputs) sul = MdpSUL(mdp) eq_oracle = RandomWalkEqOracle(input_alphabet, sul=sul, num_steps=5000, reset_prob=0.11, reset_after_cex=True) learned_mdp = run_stochastic_Lstar(input_alphabet, sul, eq_oracle, n_c=n_c, n_resample=n_resample, min_rounds=min_rounds, max_rounds=max_rounds) return learned_mdp
def test_suffix_closedness(self): angluin_example = get_Angluin_dfa() alphabet = angluin_example.get_input_alphabet() suffix_closedness = [True, False] automata_type = ['dfa', 'mealy', 'moore'] for automata in automata_type: for s_closed in suffix_closedness: sul = DfaSUL(angluin_example) eq_oracle = RandomWalkEqOracle(alphabet, sul, 500) learned_dfa = run_Lstar(alphabet, sul, eq_oracle, automaton_type=automata, suffix_closedness=s_closed, cache_and_non_det_check=True, cex_processing='rs', print_level=0) is_eq = self.prove_equivalence(learned_dfa) if not is_eq: assert False assert True
def generate_data_based_on_characterization_set(automaton, automaton_type='mealy'): from aalpy.SULs import MealySUL, DfaSUL from aalpy.oracles import RandomWalkEqOracle from aalpy.learning_algs import run_Lstar # automaton = load_automaton_from_file(path_to_automaton, automaton_type) alphabet = automaton.get_input_alphabet() eq_oracle = RandomWalkEqOracle(alphabet, automaton, num_steps=5000, reset_prob=0.09, reset_after_cex=True) sul = DfaSUL(automaton) if automaton_type == 'dfa' else MealySUL(automaton) automaton, data = run_Lstar(alphabet, sul, eq_oracle, automaton_type=automaton_type, print_level=0, return_data=True, suffix_closedness=True) characterization_set = data['characterization set'] prefixes = [state.prefix for state in automaton.states] sequences = [p + e for e in characterization_set for p in prefixes] sequences.extend([ p + tuple([i]) + e for p in prefixes for i in automaton.get_input_alphabet() for e in characterization_set ]) # sequences.extend([p + e for p in sequences for e in characterization_set]) for _ in range(1): sequences.extend([ p + tuple([i]) + e for p in sequences for i in automaton.get_input_alphabet() for e in characterization_set ]) for _ in range(3): sequences.extend(sequences) labels = [sul.query(s)[-1] for s in sequences] sequences = [list(s) for s in sequences] input_al = automaton.get_input_alphabet() output_al = { output for state in automaton.states for output in state.output_fun.values() } input_dict = tokenized_dict(input_al) out_dict = tokenized_dict(output_al) train_seq = [seq_to_tokens(word, input_dict) for word in sequences] train_labels = [seq_to_tokens(word, out_dict) for word in labels] return train_seq, train_labels
def test_closing_strategies(self): dfa = get_Angluin_dfa() alphabet = dfa.get_input_alphabet() closing_strategies = ['shortest_first', 'longest_first', 'single'] automata_type = ['dfa', 'mealy', 'moore'] for automata in automata_type: for closing in closing_strategies: sul = DfaSUL(dfa) eq_oracle = RandomWalkEqOracle(alphabet, sul, 1000) learned_dfa = run_Lstar(alphabet, sul, eq_oracle, automaton_type=automata, closing_strategy=closing, cache_and_non_det_check=True, cex_processing='rs', print_level=0) is_eq = self.prove_equivalence(learned_dfa) if not is_eq: assert False assert True
def random_mealy_example(alphabet_size, number_of_states, output_size=8): """ Generate a random Mealy machine and learn it. :param alphabet_size: size of input alphabet :param number_of_states: number of states in generated Mealy machine :param output_size: size of the output :return: learned Mealy machine """ alphabet = [*range(0, alphabet_size)] random_mealy = generate_random_mealy_machine(number_of_states, alphabet, output_alphabet=list( range(output_size))) sul_mealy = MealySUL(random_mealy) random_walk_eq_oracle = RandomWalkEqOracle(alphabet, sul_mealy, 5000) state_origin_eq_oracle = StatePrefixEqOracle(alphabet, sul_mealy, walks_per_state=10, walk_len=15) learned_mealy = run_Lstar(alphabet, sul_mealy, random_walk_eq_oracle, automaton_type='mealy', cex_processing='longest_prefix') return learned_mealy
def test_cex_processing(self): angluin_example = get_Angluin_dfa() alphabet = angluin_example.get_input_alphabet() cex_processing = [None, 'longest_prefix', 'rs'] automata_type = ['dfa', 'mealy', 'moore'] for automata in automata_type: for cex in cex_processing: sul = DfaSUL(angluin_example) eq_oracle = RandomWalkEqOracle(alphabet, sul, 500) learned_dfa = run_Lstar(alphabet, sul, eq_oracle, automaton_type=automata, cache_and_non_det_check=True, cex_processing=cex, print_level=0) is_eq = self.prove_equivalence(learned_dfa) if not is_eq: assert False assert True
def test_all_configuration_combinations(self): angluin_example = get_Angluin_dfa() alphabet = angluin_example.get_input_alphabet() automata_type = ['dfa', 'mealy', 'moore'] closing_strategies = ['shortest_first', 'longest_first', 'single'] cex_processing = [None, 'longest_prefix', 'rs'] suffix_closedness = [True, False] caching = [True, False] for automata in automata_type: for closing in closing_strategies: for cex in cex_processing: for suffix in suffix_closedness: for cache in caching: sul = DfaSUL(angluin_example) random_walk_eq_oracle = RandomWalkEqOracle(alphabet, sul, 5000, reset_after_cex=True) state_origin_eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=10, walk_len=50) tran_cov_eq_oracle = TransitionFocusOracle(alphabet, sul, num_random_walks=200, walk_len=30, same_state_prob=0.3) w_method_eq_oracle = WMethodEqOracle(alphabet, sul, max_number_of_states=len(angluin_example.states)) random_W_method_eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=10, walk_len=50) bf_exploration_eq_oracle = BreadthFirstExplorationEqOracle(alphabet, sul, 3) random_word_eq_oracle = RandomWordEqOracle(alphabet, sul) cache_based_eq_oracle = CacheBasedEqOracle(alphabet, sul) kWayStateCoverageEqOracle = KWayStateCoverageEqOracle(alphabet, sul) oracles = [random_walk_eq_oracle, random_word_eq_oracle, random_W_method_eq_oracle, kWayStateCoverageEqOracle, cache_based_eq_oracle, bf_exploration_eq_oracle, tran_cov_eq_oracle, w_method_eq_oracle, state_origin_eq_oracle] if not cache: oracles.remove(cache_based_eq_oracle) for oracle in oracles: sul = DfaSUL(angluin_example) oracle.sul = sul learned_model = run_Lstar(alphabet, sul, oracle, automaton_type=automata, closing_strategy=closing, suffix_closedness=suffix, cache_and_non_det_check=cache, cex_processing=cex, print_level=0) is_eq = self.prove_equivalence(learned_model) if not is_eq: print(oracle, automata) assert False assert True
def random_dfa_example(alphabet_size, number_of_states, num_accepting_states=1): """ Generate a random DFA machine and learn it. :param alphabet_size: size of the input alphabet :param number_of_states: number of states in the generated DFA :param num_accepting_states: number of accepting states :return: DFA """ assert num_accepting_states <= number_of_states alphabet = list(string.ascii_letters[:26])[:alphabet_size] random_dfa = generate_random_dfa(number_of_states, alphabet, num_accepting_states) # visualize_automaton(random_dfa, path='correct') sul_dfa = DfaSUL(random_dfa) # examples of various equivalence oracles random_walk_eq_oracle = RandomWalkEqOracle(alphabet, sul_dfa, 5000) state_origin_eq_oracle = StatePrefixEqOracle(alphabet, sul_dfa, walks_per_state=10, walk_len=50) tran_cov_eq_oracle = TransitionFocusOracle(alphabet, sul_dfa, num_random_walks=200, walk_len=30, same_state_prob=0.3) w_method_eq_oracle = WMethodEqOracle(alphabet, sul_dfa, max_number_of_states=number_of_states) random_W_method_eq_oracle = RandomWMethodEqOracle(alphabet, sul_dfa, walks_per_state=10, walk_len=50) bf_exploration_eq_oracle = BreadthFirstExplorationEqOracle( alphabet, sul_dfa, 5) random_word_eq_oracle = RandomWordEqOracle(alphabet, sul_dfa) cache_based_eq_oracle = CacheBasedEqOracle(alphabet, sul_dfa) user_based_eq_oracle = UserInputEqOracle(alphabet, sul_dfa) kWayStateCoverageEqOracle = KWayStateCoverageEqOracle(alphabet, sul_dfa) learned_dfa = run_Lstar(alphabet, sul_dfa, random_walk_eq_oracle, automaton_type='dfa', cache_and_non_det_check=False, cex_processing='rs') # visualize_automaton(learned_dfa) return learned_dfa
def test_non_det(self): from aalpy.SULs import OnfsmSUL from aalpy.oracles import RandomWordEqOracle, RandomWalkEqOracle from aalpy.learning_algs import run_non_det_Lstar from aalpy.utils import get_benchmark_ONFSM onfsm = get_benchmark_ONFSM() alphabet = onfsm.get_input_alphabet() for _ in range(100): sul = OnfsmSUL(onfsm) oracle = RandomWordEqOracle(alphabet, sul, num_walks=500, min_walk_len=2, max_walk_len=5) learned_onfsm = run_non_det_Lstar(alphabet, sul, oracle, n_sampling=50, print_level=0) eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=10000, reset_prob=0.09, reset_after_cex=True) cex = eq_oracle.find_cex(learned_onfsm) if cex or len(learned_onfsm.states) != len(onfsm.states): assert False assert True
def mqtt_example(): from aalpy.base import SUL class MQTT_SUL(SUL): def __init__(self): super().__init__() self.mqtt = MockMqttExample() def pre(self): self.mqtt.state = 'CONCLOSED' def post(self): self.mqtt.topics.clear() def step(self, letter): if letter == 'connect': return self.mqtt.connect() elif letter == 'disconnect': return self.mqtt.disconnect() elif letter == 'publish': return self.mqtt.publish(topic='test') elif letter == 'subscribe': return self.mqtt.subscribe(topic='test') else: return self.mqtt.unsubscribe(topic='test') sul = MQTT_SUL() input_al = ['connect', 'disconnect', 'publish', 'subscribe', 'unsubscribe'] eq_oracle = RandomWalkEqOracle(input_al, sul, num_steps=2000, reset_after_cex=True, reset_prob=0.15) mealy = run_Lstar(input_al, sul, eq_oracle=eq_oracle, automaton_type='mealy', cache_and_non_det_check=True, print_level=3) visualize_automaton(mealy)
def benchmark_stochastic_example(example, automaton_type='smm', n_c=20, n_resample=1000, min_rounds=10, max_rounds=500, strategy='normal', cex_processing='longest_prefix', stopping_based_on_prop=None, samples_cex_strategy=None): """ Learning the stochastic Mealy Machine(SMM) various benchmarking examples found in Chapter 7 of Martin's Tappler PhD thesis. :param n_c: cutoff for a state to be considered complete :param automaton_type: either smm (stochastic mealy machine) or mdp (Markov decision process) :param n_resample: resampling size :param example: One of ['first_grid', 'second_grid', 'shared_coin', 'slot_machine'] :param min_rounds: minimum number of learning rounds :param max_rounds: maximum number of learning rounds :param strategy: normal, classic or chi2 :param cex_processing: counterexample processing strategy :stopping_based_on_prop: a tuple (path to properties, correct values, error bound) :param samples_cex_strategy: strategy to sample cex in the trace tree :return: learned SMM """ from aalpy.SULs import MdpSUL from aalpy.oracles import RandomWalkEqOracle, RandomWordEqOracle from aalpy.learning_algs import run_stochastic_Lstar from aalpy.utils import load_automaton_from_file # Specify the path to the dot file containing a MDP mdp = load_automaton_from_file(f'./DotModels/MDPs/{example}.dot', automaton_type='mdp') input_alphabet = mdp.get_input_alphabet() sul = MdpSUL(mdp) eq_oracle = RandomWordEqOracle(input_alphabet, sul, num_walks=100, min_walk_len=5, max_walk_len=15, reset_after_cex=True) eq_oracle = RandomWalkEqOracle(input_alphabet, sul=sul, num_steps=2000, reset_prob=0.25, reset_after_cex=True) learned_mdp = run_stochastic_Lstar(input_alphabet=input_alphabet, eq_oracle=eq_oracle, sul=sul, n_c=n_c, n_resample=n_resample, min_rounds=min_rounds, max_rounds=max_rounds, automaton_type=automaton_type, strategy=strategy, cex_processing=cex_processing, samples_cex_strategy=samples_cex_strategy, target_unambiguity=0.99, property_based_stopping=stopping_based_on_prop) return learned_mdp
def angluin_seminal_example(): """ Example automaton from Anguin's seminal paper. :return: learned DFA """ dfa = get_Angluin_dfa() alph = dfa.get_input_alphabet() sul = DfaSUL(dfa) eq_oracle = RandomWalkEqOracle(alph, sul, 500) learned_dfa = run_Lstar(alph, sul, eq_oracle, automaton_type='dfa', cache_and_non_det_check=True, cex_processing=None, print_level=3) return learned_dfa
def test_eq_oracles(self): angluin_example = get_Angluin_dfa() alphabet = angluin_example.get_input_alphabet() automata_type = ['dfa', 'mealy', 'moore'] for automata in automata_type: sul = DfaSUL(angluin_example) random_walk_eq_oracle = RandomWalkEqOracle(alphabet, sul, 5000, reset_after_cex=True) state_origin_eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=10, walk_len=50) tran_cov_eq_oracle = TransitionFocusOracle(alphabet, sul, num_random_walks=200, walk_len=30, same_state_prob=0.3) w_method_eq_oracle = WMethodEqOracle(alphabet, sul, max_number_of_states=len(angluin_example.states)) random_W_method_eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=10, walk_len=50) bf_exploration_eq_oracle = BreadthFirstExplorationEqOracle(alphabet, sul, 3) random_word_eq_oracle = RandomWordEqOracle(alphabet, sul) cache_based_eq_oracle = CacheBasedEqOracle(alphabet, sul) kWayStateCoverageEqOracle = KWayStateCoverageEqOracle(alphabet, sul) oracles = [random_walk_eq_oracle, random_word_eq_oracle, random_W_method_eq_oracle, w_method_eq_oracle, kWayStateCoverageEqOracle, cache_based_eq_oracle, bf_exploration_eq_oracle, tran_cov_eq_oracle, state_origin_eq_oracle] for oracle in oracles: sul = DfaSUL(angluin_example) oracle.sul = sul learned_model = run_Lstar(alphabet, sul, oracle, automaton_type=automata, cache_and_non_det_check=True, cex_processing=None, print_level=0) is_eq = self.prove_equivalence(learned_model) if not is_eq: print(oracle, automata) assert False assert True
def random_onfsm_example(num_states, input_size, output_size, n_sampling): """ Generate and learn random ONFSM. :param num_states: number of states of the randomly generated automaton :param input_size: size of the input alphabet :param output_size: size of the output alphabet :param n_sampling: number of times each query will be repeated to ensure that all non-determinist outputs are observed :return: learned ONFSM """ from aalpy.SULs import OnfsmSUL from aalpy.utils import generate_random_ONFSM from aalpy.oracles import RandomWalkEqOracle, RandomWordEqOracle from aalpy.learning_algs import run_non_det_Lstar onfsm = generate_random_ONFSM(num_states=num_states, num_inputs=input_size, num_outputs=output_size) alphabet = onfsm.get_input_alphabet() sul = OnfsmSUL(onfsm) eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=500, reset_prob=0.15, reset_after_cex=True) eq_oracle = RandomWordEqOracle(alphabet, sul, num_walks=500, min_walk_len=8, max_walk_len=20) learned_model = run_non_det_Lstar(alphabet, sul, eq_oracle=eq_oracle, n_sampling=n_sampling, print_level=2) return learned_model
def faulty_coffee_machine_mdp_example(automaton_type='mdp'): """ Learning faulty coffee machine that can be found in Chapter 5 and Chapter 7 of Martin's Tappler PhD thesis. :automaton_type either mdp or smm :return learned MDP """ from aalpy.SULs import MdpSUL from aalpy.oracles import RandomWalkEqOracle from aalpy.learning_algs import run_stochastic_Lstar from aalpy.utils import get_faulty_coffee_machine_MDP mdp = get_faulty_coffee_machine_MDP() input_alphabet = mdp.get_input_alphabet() sul = MdpSUL(mdp) eq_oracle = RandomWalkEqOracle(input_alphabet, sul=sul, num_steps=500, reset_prob=0.11, reset_after_cex=False) learned_mdp = run_stochastic_Lstar(input_alphabet, sul, automaton_type=automaton_type, eq_oracle=eq_oracle, n_c=20, n_resample=100, min_rounds=3, max_rounds=50, print_level=3, cex_processing='longest_prefix', samples_cex_strategy='bfs') return learned_mdp
def accuracy_test(): ground_truth_model = load_automaton_from_file( 'TrainingDataAndAutomata/bp_depth4.dot', automaton_type='dfa') input_al = ground_truth_model.get_input_alphabet() output_al = [1, 0] train_seq, train_labels = generate_data_from_automaton(ground_truth_model, input_al, num_examples=10000, lens=(1, 2, 3, 5, 8, 10, 12, 15, 20, 25, 30)) x_train, y_train, x_test, y_test = split_train_validation(train_seq, train_labels, 0.8, uniform=True) # Train all neural networks with same parameters, this can be configured to train with different parameters rnn = RNNClassifier(input_al, output_dim=len(output_al), num_layers=2, hidden_dim=50, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test, batch_size=32, nn_type='GRU') rnn.train(epochs=150, stop_acc=1.0, stop_epochs=2, verbose=1) sul = RnnBinarySUL(rnn) gt_sul = DfaSUL(ground_truth_model) random_walk_eq_oracle = RandomWalkEqOracle(input_al, sul, num_steps=10000, reset_prob=0.05) random_word_eq_oracle = RandomWordEqOracle(input_al, sul, min_walk_len=5, max_walk_len=25, num_walks=1000) random_w_eq_oracle = RandomWMethodEqOracle(input_al, sul, walks_per_state=200, walk_len=25) learned_model = run_Lstar(input_al, sul, random_word_eq_oracle, automaton_type='dfa', max_learning_rounds=5) from random import choice, randint random_tc = [] coverage_guided_tc = [] num_tc = 1000 for _ in range(num_tc): random_tc.append( tuple(choice(input_al) for _ in range(randint(10, 25)))) prefix = choice(learned_model.states).prefix middle = tuple(choice(input_al) for _ in range(20)) suffix = choice(learned_model.characterization_set) coverage_guided_tc.append(prefix + middle + suffix) num_adv_random = 0 for tc in random_tc: correct = gt_sul.query(tc) trained = sul.query(tc) if correct != trained: num_adv_random += 1 num_adv_guided = 0 for tc in coverage_guided_tc: correct = gt_sul.query(tc) trained = sul.query(tc) if correct != trained: num_adv_guided += 1 print(f'Random sampling: {round((num_adv_random/num_tc)*100,2)}') print(f'Guided sampling: {round((num_adv_guided/num_tc)*100,2)}')
def multi_client_mqtt_example(): """ Example from paper P'Learning Abstracted Non-deterministic Finite State Machines'. https://link.springer.com/chapter/10.1007/978-3-030-64881-7_4 Returns: learned automaton """ import random from aalpy.base import SUL from aalpy.oracles import RandomWalkEqOracle from aalpy.learning_algs import run_abstracted_ONFSM_Lstar from aalpy.SULs import MealySUL from aalpy.utils import load_automaton_from_file class Multi_Client_MQTT_Mapper(SUL): def __init__(self): super().__init__() five_clients_mqtt_mealy = load_automaton_from_file('DotModels/five_clients_mqtt_abstracted_onfsm.dot', automaton_type='mealy') self.five_client_mqtt = MealySUL(five_clients_mqtt_mealy) self.connected_clients = set() self.subscribed_clients = set() self.clients = ('c0', 'c1', 'c2', 'c3', 'c4') def get_input_alphabet(self): return ['connect', 'disconnect', 'subscribe', 'unsubscribe', 'publish'] def pre(self): self.five_client_mqtt.pre() def post(self): self.five_client_mqtt.post() self.connected_clients = set() self.subscribed_clients = set() def step(self, letter): client = random.choice(self.clients) inp = client + '_' + letter concrete_output = self.five_client_mqtt.step(inp) all_out = '' if letter == 'connect': if client not in self.connected_clients: self.connected_clients.add(client) elif client in self.connected_clients: self.connected_clients.remove(client) if client in self.subscribed_clients: self.subscribed_clients.remove(client) if len(self.subscribed_clients) == 0: all_out = '_UNSUB_ALL' elif letter == 'subscribe' and client in self.connected_clients: self.subscribed_clients.add(client) elif letter == 'disconnect' and client in self.connected_clients: self.connected_clients.remove(client) if client in self.subscribed_clients: self.subscribed_clients.remove(client) if len(self.subscribed_clients) == 0: all_out = '_UNSUB_ALL' elif letter == 'unsubscribe' and client in self.connected_clients: if client in self.subscribed_clients: self.subscribed_clients.remove(client) if len(self.subscribed_clients) == 0: all_out = '_ALL' concrete_outputs = concrete_output.split('__') abstract_outputs = set([e[3:] for e in concrete_outputs]) if 'Empty' in abstract_outputs: abstract_outputs.remove('Empty') if abstract_outputs == {'CONCLOSED'}: if len(self.connected_clients) == 0: all_out = '_ALL' return 'CONCLOSED' + all_out else: if 'CONCLOSED' in abstract_outputs: abstract_outputs.remove('CONCLOSED') abstract_outputs = sorted(list(abstract_outputs)) output = '_'.join(abstract_outputs) return '_'.join(set(output.split('_'))) + all_out sul = Multi_Client_MQTT_Mapper() alphabet = sul.get_input_alphabet() eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=5000, reset_prob=0.09, reset_after_cex=True) abstraction_mapping = { 'CONCLOSED': 'CONCLOSED', 'CONCLOSED_UNSUB_ALL': 'CONCLOSED', 'CONCLOSED_ALL': 'CONCLOSED', 'UNSUBACK': 'UNSUBACK', 'UNSUBACK_ALL': 'UNSUBACK' } learned_onfsm = run_abstracted_ONFSM_Lstar(alphabet, sul, eq_oracle, abstraction_mapping=abstraction_mapping, n_sampling=200, print_level=3) return learned_onfsm
total_time_dfa = [] total_time_mealy = [] total_time_moore = [] states.append(num_states) for _ in range(repeat): dfa = generate_random_dfa(num_states, alphabet=alphabet, num_accepting_states=num_states // 2) sul = DfaSUL(dfa) # eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=5, walk_len=40) eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=9000, reset_prob=0.09) _, data = run_Lstar(alphabet, sul, eq_oracle, cex_processing=cex_processing, cache_and_non_det_check=False, return_data=True, automaton_type='dfa') learning_time_dfa.append(data['learning_time']) total_time_dfa.append(data['total_time']) del sul del eq_oracle
def test_learning_based_on_accuracy_based_stopping(self): example = 'first_grid' mdp = load_automaton_from_file(f'../DotModels/MDPs/{example}.dot', automaton_type='mdp') min_rounds = 10 max_rounds = 500 from aalpy.automata import StochasticMealyMachine from aalpy.utils import model_check_experiment, get_properties_file, \ get_correct_prop_values from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat" aalpy.paths.path_to_properties = "../Benchmarking/prism_eval_props/" stopping_based_on_prop = (get_properties_file(example), get_correct_prop_values(example), 0.02) input_alphabet = mdp.get_input_alphabet() automaton_type = ['mdp', 'smm'] similarity_strategy = ['classic', 'normal', 'chi2'] cex_processing = [None, 'longest_prefix'] samples_cex_strategy = [None, 'bfs', 'random:200:0.3'] for aut_type in automaton_type: for strategy in similarity_strategy: for cex in cex_processing: for sample_cex in samples_cex_strategy: sul = StochasticMealySUL( mdp) if aut_type == 'smm' else MdpSUL(mdp) eq_oracle = RandomWalkEqOracle(input_alphabet, sul=sul, num_steps=200, reset_prob=0.25, reset_after_cex=True) learned_model = run_stochastic_Lstar( input_alphabet=input_alphabet, eq_oracle=eq_oracle, sul=sul, n_c=20, n_resample=1000, min_rounds=min_rounds, max_rounds=max_rounds, automaton_type=aut_type, strategy=strategy, cex_processing=cex, samples_cex_strategy=sample_cex, target_unambiguity=0.99, property_based_stopping=stopping_based_on_prop, print_level=0) if isinstance(learned_model, StochasticMealyMachine): mdp = smm_to_mdp_conversion(learned_model) else: mdp = learned_model results, diff = model_check_experiment( get_properties_file(example), get_correct_prop_values(example), mdp) for d in diff.values(): if d > stopping_based_on_prop[2]: assert False assert True