예제 #1
0
파일: Examples.py 프로젝트: DES-Lab/AALpy
def custom_stochastic_example(stochastic_machine, learning_type='smm', min_rounds=10, max_rounds=500):
    """
    Learning custom SMM.
    :param stochastic_machine: stochastic Mealy machine or MDP to learn
    :param learning_type: 'smm' or 'mdp'
    :param min_rounds: minimum number of learning rounds
    :param max_rounds: maximum number of learning rounds
    :return: learned model
    """
    from aalpy.SULs import MdpSUL, StochasticMealySUL
    from aalpy.automata import Mdp
    from aalpy.oracles import RandomWordEqOracle
    from aalpy.learning_algs import run_stochastic_Lstar

    input_al = stochastic_machine.get_input_alphabet()

    if isinstance(stochastic_machine, Mdp):
        sul = MdpSUL(stochastic_machine)
    else:
        sul = StochasticMealySUL(stochastic_machine)

    eq_oracle = RandomWordEqOracle(alphabet=input_al, sul=sul, num_walks=1000, min_walk_len=10, max_walk_len=30,
                                   reset_after_cex=True)

    learned_model = run_stochastic_Lstar(input_al, sul, eq_oracle,
                                         automaton_type=learning_type,
                                         min_rounds=min_rounds,
                                         max_rounds=max_rounds,
                                         print_level=2)

    return learned_model
예제 #2
0
파일: Examples.py 프로젝트: DES-Lab/AALpy
def abstracted_onfsm_example():
    """
    Learning an abstracted ONFSM. The original ONFSM has 9 states.
    The learned abstracted ONFSM only has 3 states.

    :return: learned abstracted ONFSM
    """
    from aalpy.SULs import OnfsmSUL
    from aalpy.oracles import RandomWordEqOracle
    from aalpy.learning_algs import run_abstracted_ONFSM_Lstar
    from aalpy.utils import get_ONFSM

    onfsm = get_ONFSM()

    alphabet = onfsm.get_input_alphabet()

    sul = OnfsmSUL(onfsm)
    eq_oracle = RandomWordEqOracle(alphabet, sul, num_walks=500, min_walk_len=4, max_walk_len=8, reset_after_cex=True)

    abstraction_mapping = {0: 0, 'O': 0}

    learned_onfsm = run_abstracted_ONFSM_Lstar(alphabet, sul, eq_oracle=eq_oracle,
                                               abstraction_mapping=abstraction_mapping,
                                               n_sampling=50, print_level=3)

    return learned_onfsm
예제 #3
0
    def test_all_configuration_combinations(self):
        angluin_example = get_Angluin_dfa()

        alphabet = angluin_example.get_input_alphabet()

        automata_type = ['dfa', 'mealy', 'moore']
        closing_strategies = ['shortest_first', 'longest_first', 'single']
        cex_processing = [None, 'longest_prefix', 'rs']
        suffix_closedness = [True, False]
        caching = [True, False]

        for automata in automata_type:
            for closing in closing_strategies:
                for cex in cex_processing:
                    for suffix in suffix_closedness:
                        for cache in caching:
                            sul = DfaSUL(angluin_example)

                            random_walk_eq_oracle = RandomWalkEqOracle(alphabet, sul, 5000, reset_after_cex=True)
                            state_origin_eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=10, walk_len=50)
                            tran_cov_eq_oracle = TransitionFocusOracle(alphabet, sul, num_random_walks=200, walk_len=30,
                                                                       same_state_prob=0.3)
                            w_method_eq_oracle = WMethodEqOracle(alphabet, sul,
                                                                 max_number_of_states=len(angluin_example.states))
                            random_W_method_eq_oracle = RandomWMethodEqOracle(alphabet, sul,
                                                                              walks_per_state=10, walk_len=50)
                            bf_exploration_eq_oracle = BreadthFirstExplorationEqOracle(alphabet, sul, 3)
                            random_word_eq_oracle = RandomWordEqOracle(alphabet, sul)
                            cache_based_eq_oracle = CacheBasedEqOracle(alphabet, sul)
                            kWayStateCoverageEqOracle = KWayStateCoverageEqOracle(alphabet, sul)

                            oracles = [random_walk_eq_oracle, random_word_eq_oracle, random_W_method_eq_oracle,
                                       kWayStateCoverageEqOracle, cache_based_eq_oracle, bf_exploration_eq_oracle,
                                       tran_cov_eq_oracle, w_method_eq_oracle, state_origin_eq_oracle]

                            if not cache:
                                oracles.remove(cache_based_eq_oracle)

                            for oracle in oracles:
                                sul = DfaSUL(angluin_example)
                                oracle.sul = sul

                                learned_model = run_Lstar(alphabet, sul, oracle, automaton_type=automata,
                                                          closing_strategy=closing, suffix_closedness=suffix,
                                                          cache_and_non_det_check=cache, cex_processing=cex,
                                                          print_level=0)

                                is_eq = self.prove_equivalence(learned_model)
                                if not is_eq:
                                    print(oracle, automata)
                                    assert False

        assert True
예제 #4
0
파일: Examples.py 프로젝트: DES-Lab/AALpy
def random_dfa_example(alphabet_size, number_of_states, num_accepting_states=1):
    """
    Generate a random DFA machine and learn it.
    :param alphabet_size: size of the input alphabet
    :param number_of_states: number of states in the generated DFA
    :param num_accepting_states: number of accepting states
    :return: DFA
    """
    import string
    from aalpy.SULs import DfaSUL
    from aalpy.learning_algs import run_Lstar
    from aalpy.oracles import StatePrefixEqOracle, TransitionFocusOracle, WMethodEqOracle, \
        RandomWalkEqOracle, RandomWMethodEqOracle, BreadthFirstExplorationEqOracle, RandomWordEqOracle, \
        CacheBasedEqOracle, UserInputEqOracle, KWayStateCoverageEqOracle, KWayTransitionCoverageEqOracle, PacOracle
    from aalpy.utils import generate_random_dfa

    assert num_accepting_states <= number_of_states

    alphabet = list(string.ascii_letters[:26])[:alphabet_size]
    random_dfa = generate_random_dfa(number_of_states, alphabet, num_accepting_states)
    alphabet = list(string.ascii_letters[:26])[:alphabet_size]
    # visualize_automaton(random_dfa, path='correct')
    sul_dfa = DfaSUL(random_dfa)

    # examples of various equivalence oracles

    random_walk_eq_oracle = RandomWalkEqOracle(alphabet, sul_dfa, 5000)
    state_origin_eq_oracle = StatePrefixEqOracle(alphabet, sul_dfa, walks_per_state=10, walk_len=50)
    tran_cov_eq_oracle = TransitionFocusOracle(alphabet, sul_dfa, num_random_walks=200, walk_len=30,
                                               same_state_prob=0.3)
    w_method_eq_oracle = WMethodEqOracle(alphabet, sul_dfa, max_number_of_states=number_of_states)
    pac_oracle = PacOracle(alphabet, sul_dfa)
    random_W_method_eq_oracle = RandomWMethodEqOracle(alphabet, sul_dfa, walks_per_state=10, walk_len=50)
    bf_exploration_eq_oracle = BreadthFirstExplorationEqOracle(alphabet, sul_dfa, 5)
    random_word_eq_oracle = RandomWordEqOracle(alphabet, sul_dfa)
    cache_based_eq_oracle = CacheBasedEqOracle(alphabet, sul_dfa)
    user_based_eq_oracle = UserInputEqOracle(alphabet, sul_dfa)
    kWayStateCoverageEqOracle = KWayStateCoverageEqOracle(alphabet, sul_dfa)
    kWayTransitionCoverageEqOracle = KWayTransitionCoverageEqOracle(alphabet, sul_dfa)
    learned_dfa = run_Lstar(alphabet, sul_dfa, random_W_method_eq_oracle, automaton_type='dfa',
                            cache_and_non_det_check=True, cex_processing='rs')

    # visualize_automaton(learned_dfa)
    return learned_dfa
예제 #5
0
파일: Examples.py 프로젝트: DES-Lab/AALpy
def benchmark_stochastic_example(example, automaton_type='smm', n_c=20, n_resample=1000, min_rounds=10, max_rounds=500,
                                 strategy='normal', cex_processing='longest_prefix', stopping_based_on_prop=None,
                                 samples_cex_strategy=None):
    """
    Learning the stochastic Mealy Machine(SMM) various benchmarking examples
    found in Chapter 7 of Martin's Tappler PhD thesis.
    :param n_c: cutoff for a state to be considered complete
    :param automaton_type: either smm (stochastic mealy machine) or mdp (Markov decision process)
    :param n_resample: resampling size
    :param example: One of ['first_grid', 'second_grid', 'shared_coin', 'slot_machine']
    :param min_rounds: minimum number of learning rounds
    :param max_rounds: maximum number of learning rounds
    :param strategy: normal, classic or chi2
    :param cex_processing: counterexample processing strategy
    :stopping_based_on_prop: a tuple (path to properties, correct values, error bound)
    :param samples_cex_strategy: strategy to sample cex in the trace tree
    :return: learned SMM

    """
    from aalpy.SULs import MdpSUL
    from aalpy.oracles import RandomWalkEqOracle, RandomWordEqOracle
    from aalpy.learning_algs import run_stochastic_Lstar
    from aalpy.utils import load_automaton_from_file

    # Specify the path to the dot file containing a MDP
    mdp = load_automaton_from_file(f'./DotModels/MDPs/{example}.dot', automaton_type='mdp')
    input_alphabet = mdp.get_input_alphabet()

    sul = MdpSUL(mdp)
    eq_oracle = RandomWordEqOracle(input_alphabet, sul, num_walks=100, min_walk_len=5, max_walk_len=15,
                                   reset_after_cex=True)
    eq_oracle = RandomWalkEqOracle(input_alphabet, sul=sul, num_steps=2000, reset_prob=0.25,
                                   reset_after_cex=True)

    learned_mdp = run_stochastic_Lstar(input_alphabet=input_alphabet, eq_oracle=eq_oracle, sul=sul, n_c=n_c,
                                       n_resample=n_resample, min_rounds=min_rounds, max_rounds=max_rounds,
                                       automaton_type=automaton_type, strategy=strategy, cex_processing=cex_processing,
                                       samples_cex_strategy=samples_cex_strategy, target_unambiguity=0.99,
                                       property_based_stopping=stopping_based_on_prop)

    return learned_mdp
예제 #6
0
파일: Examples.py 프로젝트: DES-Lab/AALpy
def onfsm_mealy_paper_example():
    """
    Learning a ONFSM presented in 'Learning Finite State Models of Observable Nondeterministic Systems in a Testing
    Context'.
    :return: learned ONFSM
    """

    from aalpy.SULs import OnfsmSUL
    from aalpy.oracles import RandomWordEqOracle
    from aalpy.learning_algs import run_non_det_Lstar
    from aalpy.utils import get_benchmark_ONFSM

    onfsm = get_benchmark_ONFSM()
    alphabet = onfsm.get_input_alphabet()

    sul = OnfsmSUL(onfsm)
    eq_oracle = RandomWordEqOracle(alphabet, sul, num_walks=500, min_walk_len=5, max_walk_len=12)

    learned_onfsm = run_non_det_Lstar(alphabet, sul, eq_oracle, n_sampling=1, print_level=2)

    return learned_onfsm
예제 #7
0
파일: Examples.py 프로젝트: DES-Lab/AALpy
def weird_coffee_machine_mdp_example():
    """
    Learning faulty coffee machine that can be found in Chapter 5 and Chapter 7 of Martin's Tappler PhD thesis.
    :return learned MDP
    """
    from aalpy.SULs import MdpSUL
    from aalpy.oracles import RandomWordEqOracle
    from aalpy.learning_algs import run_stochastic_Lstar
    from aalpy.utils import get_weird_coffee_machine_MDP

    mdp = get_weird_coffee_machine_MDP()
    input_alphabet = mdp.get_input_alphabet()
    sul = MdpSUL(mdp)

    eq_oracle = RandomWordEqOracle(input_alphabet, sul=sul, num_walks=2000, min_walk_len=4, max_walk_len=10,
                                   reset_after_cex=True)

    learned_mdp = run_stochastic_Lstar(input_alphabet, sul, eq_oracle, n_c=20, n_resample=1000, min_rounds=10,
                                       max_rounds=500, strategy='normal', cex_processing=None,
                                       samples_cex_strategy=None, automaton_type='smm')

    return learned_mdp
예제 #8
0
    def test_eq_oracles(self):
        angluin_example = get_Angluin_dfa()

        alphabet = angluin_example.get_input_alphabet()

        automata_type = ['dfa', 'mealy', 'moore']

        for automata in automata_type:
            sul = DfaSUL(angluin_example)

            random_walk_eq_oracle = RandomWalkEqOracle(alphabet, sul, 5000, reset_after_cex=True)
            state_origin_eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=10, walk_len=50)
            tran_cov_eq_oracle = TransitionFocusOracle(alphabet, sul, num_random_walks=200, walk_len=30,
                                                       same_state_prob=0.3)
            w_method_eq_oracle = WMethodEqOracle(alphabet, sul, max_number_of_states=len(angluin_example.states))
            random_W_method_eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=10, walk_len=50)
            bf_exploration_eq_oracle = BreadthFirstExplorationEqOracle(alphabet, sul, 3)
            random_word_eq_oracle = RandomWordEqOracle(alphabet, sul)
            cache_based_eq_oracle = CacheBasedEqOracle(alphabet, sul)
            kWayStateCoverageEqOracle = KWayStateCoverageEqOracle(alphabet, sul)

            oracles = [random_walk_eq_oracle, random_word_eq_oracle, random_W_method_eq_oracle, w_method_eq_oracle,
                       kWayStateCoverageEqOracle, cache_based_eq_oracle, bf_exploration_eq_oracle, tran_cov_eq_oracle,
                       state_origin_eq_oracle]

            for oracle in oracles:
                sul = DfaSUL(angluin_example)
                oracle.sul = sul

                learned_model = run_Lstar(alphabet, sul, oracle, automaton_type=automata,
                                          cache_and_non_det_check=True, cex_processing=None, print_level=0)

                is_eq = self.prove_equivalence(learned_model)
                if not is_eq:
                    print(oracle, automata)
                    assert False

        assert True
예제 #9
0
    def test_non_det(self):

        from aalpy.SULs import OnfsmSUL
        from aalpy.oracles import RandomWordEqOracle, RandomWalkEqOracle
        from aalpy.learning_algs import run_non_det_Lstar
        from aalpy.utils import get_benchmark_ONFSM

        onfsm = get_benchmark_ONFSM()
        alphabet = onfsm.get_input_alphabet()

        for _ in range(100):
            sul = OnfsmSUL(onfsm)

            oracle = RandomWordEqOracle(alphabet,
                                        sul,
                                        num_walks=500,
                                        min_walk_len=2,
                                        max_walk_len=5)

            learned_onfsm = run_non_det_Lstar(alphabet,
                                              sul,
                                              oracle,
                                              n_sampling=50,
                                              print_level=0)

            eq_oracle = RandomWalkEqOracle(alphabet,
                                           sul,
                                           num_steps=10000,
                                           reset_prob=0.09,
                                           reset_after_cex=True)

            cex = eq_oracle.find_cex(learned_onfsm)

            if cex or len(learned_onfsm.states) != len(onfsm.states):
                assert False
        assert True
예제 #10
0
파일: Examples.py 프로젝트: DES-Lab/AALpy
def random_onfsm_example(num_states, input_size, output_size, n_sampling):
    """
    Generate and learn random ONFSM.
    :param num_states: number of states of the randomly generated automaton
    :param input_size: size of the input alphabet
    :param output_size: size of the output alphabet
    :param n_sampling: number of times each query will be repeated to ensure that all non-determinist outputs are
    observed
    :return: learned ONFSM
    """
    from aalpy.SULs import OnfsmSUL
    from aalpy.utils import generate_random_ONFSM
    from aalpy.oracles import RandomWalkEqOracle, RandomWordEqOracle
    from aalpy.learning_algs import run_non_det_Lstar

    onfsm = generate_random_ONFSM(num_states=num_states, num_inputs=input_size, num_outputs=output_size)
    alphabet = onfsm.get_input_alphabet()

    sul = OnfsmSUL(onfsm)
    eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=500, reset_prob=0.15, reset_after_cex=True)
    eq_oracle = RandomWordEqOracle(alphabet, sul, num_walks=500, min_walk_len=8, max_walk_len=20)

    learned_model = run_non_det_Lstar(alphabet, sul, eq_oracle=eq_oracle, n_sampling=n_sampling, print_level=2)
    return learned_model
def run_comparison(example, train=True, num_layers=2, hidden_dim=50, rnn_class=GRUNetwork,
                   insufficient_testing=False, verbose=False):
    rnn, alphabet, train_set = train_or_load_rnn(example, num_layers=num_layers, hidden_dim=hidden_dim,
                                                 rnn_class=rnn_class, train=train)

    # initial examples for Weiss et Al
    all_words = sorted(list(train_set.keys()), key=lambda x: len(x))
    pos = next((w for w in all_words if rnn.classify_word(w) is True), None)
    neg = next((w for w in all_words if rnn.classify_word(w) is False), None)
    starting_examples = [w for w in [pos, neg] if None is not w]

    # Extract Automaton Using White-Box eq. query
    rnn.renew()
    if verbose:
        print('---------------------------------WHITE BOX EXTRACTION--------------------------------------------------')
    else:
        blockPrint()
    start_white_box = time.time()
    dfa_weiss = extract(rnn, time_limit=500, initial_split_depth=10, starting_examples=starting_examples)
    time_white_box = time.time() - start_white_box
    # Make sure that internal states are back to initial
    rnn.renew()

    if verbose:
        print('---------------------------------BLACK BOX EXTRACTION--------------------------------------------------')
    sul = RNN_BinarySUL_for_Weiss_Framework(rnn)

    alphabet = list(alphabet)

    # define the equivalence oracle
    if insufficient_testing:
        eq_oracle = RandomWordEqOracle(alphabet, sul, num_walks=100, min_walk_len=3, max_walk_len=12)
    else:
        eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=1000, walk_len=25)
        if 'tomita' not in example:
            eq_oracle = TransitionFocusOracle(alphabet, sul, num_random_walks=1000, walk_len=20)

    start_black_box = time.time()
    aalpy_dfa = run_Lstar(alphabet=alphabet, sul=sul, eq_oracle=eq_oracle, automaton_type='dfa', max_learning_rounds=10,
                          print_level=2 , cache_and_non_det_check=False, cex_processing='rs')
    time_black_box = time.time() - start_black_box

    enablePrint()
    if insufficient_testing:
        if len(aalpy_dfa.states) == len(dfa_weiss.Q):
            translated_weiss_2_aalpy = Weiss_to_AALpy_DFA_format(dfa_weiss)
            sul = DfaSUL(translated_weiss_2_aalpy)
            eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=1000, walk_len=10)

            cex = eq_oracle.find_cex(aalpy_dfa)
            if not cex:
                print(
                    '-------------------------WHITE-Box vs. BLACK-BOX WITH INSUFFICIENT TESTING -------------------------')
                print('White-box and Black-box technique extracted the same automaton.')
                print(f'White-box time: {round(time_white_box, 2)} seconds.')
                print(f'Black-box time: {round(time_black_box, 2)} seconds.')
            else:
                verify_cex(aalpy_dfa, translated_weiss_2_aalpy, rnn, [cex])
        return

    if len(aalpy_dfa.states) != len(dfa_weiss.Q):
        print('---------------------------------WHITE vs. BLACK BOX EXTRACTION----------------------------------------')
        nn_props = F'{"GRU" if rnn_class == GRUNetwork else "LSTM"}_layers_{num_layers}_dim_{hidden_dim}'
        print(f'Example       : {example}')
        print(f'Configuration : {nn_props}')
        print(f"Number of states\n  "
              f"White-box extraction: {len(dfa_weiss.Q)}\n  "
              f"Black-box extraction: {len(aalpy_dfa.states)}")

        translated_weiss_2_aalpy = Weiss_to_AALpy_DFA_format(dfa_weiss)

        sul = DfaSUL(translated_weiss_2_aalpy)
        eq_oracle = RandomWMethodEqOracle(alphabet, sul, walks_per_state=10000, walk_len=20)
        if 'tomita' not in example:
            eq_oracle = TransitionFocusOracle(alphabet, sul)

        cex_set = []
        for _ in range(10):
            cex = eq_oracle.find_cex(aalpy_dfa)
            if cex and cex not in cex_set:
                cex_set.append(cex)

        cex_set.sort(key=len)
        # verify that the counterexamples are not spurios and find out which model is correct one
        real_cex = verify_cex(aalpy_dfa, translated_weiss_2_aalpy, rnn, cex_set)
        if not real_cex:
            print('Spurious CEX')
            assert False
        #print('Few Counterexamples')
        #print('  ', cex_set[:3])
    else:
        print('Size of both models: ', len(aalpy_dfa.states))
예제 #12
0
def accuracy_test():
    ground_truth_model = load_automaton_from_file(
        'TrainingDataAndAutomata/bp_depth4.dot', automaton_type='dfa')
    input_al = ground_truth_model.get_input_alphabet()
    output_al = [1, 0]

    train_seq, train_labels = generate_data_from_automaton(ground_truth_model,
                                                           input_al,
                                                           num_examples=10000,
                                                           lens=(1, 2, 3, 5, 8,
                                                                 10, 12, 15,
                                                                 20, 25, 30))

    x_train, y_train, x_test, y_test = split_train_validation(train_seq,
                                                              train_labels,
                                                              0.8,
                                                              uniform=True)

    # Train all neural networks with same parameters, this can be configured to train with different parameters
    rnn = RNNClassifier(input_al,
                        output_dim=len(output_al),
                        num_layers=2,
                        hidden_dim=50,
                        x_train=x_train,
                        y_train=y_train,
                        x_test=x_test,
                        y_test=y_test,
                        batch_size=32,
                        nn_type='GRU')

    rnn.train(epochs=150, stop_acc=1.0, stop_epochs=2, verbose=1)

    sul = RnnBinarySUL(rnn)
    gt_sul = DfaSUL(ground_truth_model)

    random_walk_eq_oracle = RandomWalkEqOracle(input_al,
                                               sul,
                                               num_steps=10000,
                                               reset_prob=0.05)
    random_word_eq_oracle = RandomWordEqOracle(input_al,
                                               sul,
                                               min_walk_len=5,
                                               max_walk_len=25,
                                               num_walks=1000)
    random_w_eq_oracle = RandomWMethodEqOracle(input_al,
                                               sul,
                                               walks_per_state=200,
                                               walk_len=25)

    learned_model = run_Lstar(input_al,
                              sul,
                              random_word_eq_oracle,
                              automaton_type='dfa',
                              max_learning_rounds=5)

    from random import choice, randint
    random_tc = []
    coverage_guided_tc = []
    num_tc = 1000
    for _ in range(num_tc):
        random_tc.append(
            tuple(choice(input_al) for _ in range(randint(10, 25))))

        prefix = choice(learned_model.states).prefix
        middle = tuple(choice(input_al) for _ in range(20))
        suffix = choice(learned_model.characterization_set)
        coverage_guided_tc.append(prefix + middle + suffix)

    num_adv_random = 0
    for tc in random_tc:
        correct = gt_sul.query(tc)
        trained = sul.query(tc)
        if correct != trained:
            num_adv_random += 1

    num_adv_guided = 0
    for tc in coverage_guided_tc:
        correct = gt_sul.query(tc)
        trained = sul.query(tc)
        if correct != trained:
            num_adv_guided += 1

    print(f'Random sampling: {round((num_adv_random/num_tc)*100,2)}')
    print(f'Guided sampling: {round((num_adv_guided/num_tc)*100,2)}')
예제 #13
0
            else:
                mdp_state.transitions[i].append(
                    (moore_mdp_state_map[reached_state], correct_output, 1.))

    mdp = StochasticMealyMachine(initial_mdp_state,
                                 list(moore_mdp_state_map.values()))
    return mdp


mdp = to_smm()
# mdp.visualize()
# exit()
# mdp.save(file_path='CC2640R2-no-feature-req-stochastic')
# exit()
# mdp.make_input_complete('self_loop')
# mdp_sul = StochasticMealySUL(mdp)
mdp_sul = MdpSUL(mdp.to_mdp())
eq_oracle = RandomWordEqOracle(alphabet,
                               model_sul,
                               num_walks=10000,
                               min_walk_len=10,
                               max_walk_len=100)

stochastic_model = run_stochastic_Lstar(alphabet,
                                        mdp_sul,
                                        eq_oracle,
                                        automaton_type='mdp')

# mdp = mdp.to_mdp()
# mdp.save('CYW43455_stochastic')