コード例 #1
0
ファイル: StochasticLStar.py プロジェクト: DES-Lab/AALpy
def run_stochastic_Lstar(input_alphabet, sul: SUL, eq_oracle: Oracle, target_unambiguity=0.99,
                         min_rounds=10, max_rounds=200, automaton_type='mdp', strategy='normal',
                         cex_processing=None, samples_cex_strategy=None, stopping_range_dict='strict', custom_oracle=False,
                         return_data=False, property_based_stopping=None, n_c=20, n_resample=100, print_level=2):
    """
    Learning of Markov Decision Processes and Stochastic Mealy machines based on 'L*-Based Learning of Markov Decision
    Processes' and 'Active Model Learning of Stochastic Reactive Systems' by Tappler et al.

    Args:

        input_alphabet: input alphabet

        sul: system under learning

        eq_oracle: equivalence oracle

        target_unambiguity: target unambiguity value (default 0.99)

        min_rounds: minimum number of learning rounds (Default value = 10)

        max_rounds: if learning_rounds >= max_rounds, learning will stop (Default value = 200)

        automaton_type: either 'mdp' or 'smm' (Default value = 'mdp')

        strategy: either one of ['classic', 'normal', 'chi2'] or a object implementing DifferenceChecker class,
            default value is 'normal'. Classic strategy is the one presented
            in the seed paper, 'normal' is the updated version and chi2 is based on chi squared.

        cex_processing: cex processing strategy, None , 'longest_prefix' or 'rs' (rs is experimental)

        samples_cex_strategy: strategy for finding counterexamples in the trace tree. None, 'bfs' or
            "random:<#traces to check:int>:<stop probability for single trace in [0,1)>" eg. random:200:0.2

        stopping_range_dict: Values in form of a dictionary, or 'strict', 'relaxed' to use predefined stopping
        criteria. Custom values: Dictionary where keys encode the last n unambiguity values which need to be in range
        of its value in order to perform early stopping. Eg. {5: 0.001, 10: 0.01} would stop if last 5 hypothesis had
        unambiguity values when max(last_5_vals) - (last_5_vals) <= 0.001.

        property_based_stopping: A tuple containing (path to the properties file, correct values of each property,
            allowed error for each property. Recommended one is 0.02 (2%)).

        custom_oracle: if True, warning about oracle type will be removed and custom oracle can be used

        return_data: if True, map containing all information like number of queries... will be returned
            (Default value = False)

        n_c: cutoff for a cell to be considered complete (Default value = 20), only used with 'classic' strategy

        n_resample: resampling size (Default value = 100), only used with 'classic' strategy

        print_level: 0 - None, 1 - just results, 2 - current round and hypothesis size, 3 - educational/debug
            (Default value = 2)


    Returns:

      learned MDP/SMM
    """

    assert samples_cex_strategy in cex_sampling_options or samples_cex_strategy.startswith('random')
    assert cex_processing in cex_processing_options
    assert automaton_type in {'mdp', 'smm'}
    if not isinstance(stopping_range_dict, dict):
        assert stopping_range_dict in {'strict', 'relaxed'}
    if property_based_stopping:
        assert len(property_based_stopping) == 3

    if strategy in diff_checker_options:
        compatibility_checker = diff_checker_options[strategy]
    else:
        assert isinstance(strategy, DifferenceChecker)
        compatibility_checker = strategy

    if not custom_oracle and type(eq_oracle) not in available_oracles:
        raise SystemExit(available_oracles_error_msg)

    if stopping_range_dict == 'strict':
        stopping_range_dict = {12: 0.001, 18: 0.002, 25: 0.005, 30: 0.01, 35: 0.02}
    elif stopping_range_dict == 'relaxed':
        stopping_range_dict = {7: 0.001, 12: 0.003, 17: 0.005, 22: 0.01, 28: 0.02}

    stochastic_teacher = StochasticTeacher(sul, n_c, eq_oracle, automaton_type, compatibility_checker,
                                           samples_cex_strategy=samples_cex_strategy)

    # This way all steps from eq. oracle will be added to the tree
    eq_oracle.sul = stochastic_teacher.sul

    observation_table = SamplingBasedObservationTable(input_alphabet, automaton_type,
                                                      stochastic_teacher, compatibility_checker=compatibility_checker,
                                                      strategy=strategy,
                                                      cex_processing=cex_processing)

    start_time = time.time()
    eq_query_time = 0

    # Ask queries for non-completed cells and update the observation table
    observation_table.refine_not_completed_cells(n_resample, uniform=True)
    observation_table.update_obs_table_with_freq_obs()

    learning_rounds = 0

    while True:
        learning_rounds += 1

        observation_table.make_closed_and_consistent()

        hypothesis = observation_table.generate_hypothesis()

        observation_table.trim(hypothesis)

        # If there is no chaos state is not reachable, remove it from state set
        chaos_cex_present = observation_table.chaos_counterexample(hypothesis)

        if not chaos_cex_present:
            if automaton_type == 'mdp':
                hypothesis.states.remove(next(state for state in hypothesis.states if state.output == 'chaos'))
            else:
                hypothesis.states.remove(next(state for state in hypothesis.states if state.state_id == 'chaos'))

        if print_level > 1:
            print(f'Hypothesis: {learning_rounds}: {len(hypothesis.states)} states.')

        if print_level == 3:
            print_observation_table(observation_table, 'stochastic')

        cex = None

        if not chaos_cex_present:
            eq_query_start = time.time()
            cex = stochastic_teacher.equivalence_query(hypothesis)
            eq_query_time += time.time() - eq_query_start

        if cex:
            if print_level == 3:
                print('Counterexample', cex)
            # get all prefixes and add them to the S set
            if cex_processing is None:
                for pre in get_cex_prefixes(cex, automaton_type):
                    if pre not in observation_table.S:
                        observation_table.S.append(pre)
            else:
                suffixes = None
                if cex_processing == 'longest_prefix':
                    prefixes = observation_table.S + list(observation_table.get_extended_s())
                    suffixes = stochastic_longest_prefix(cex, prefixes)
                elif cex_processing == 'rs':
                    suffixes = stochastic_rs(sul, cex, hypothesis)
                for suf in suffixes:
                    if suf not in observation_table.E:
                        observation_table.E.append(suf)
                        break

        # Ask queries for non-completed cells and update the observation table
        refined = observation_table.refine_not_completed_cells(n_resample)
        observation_table.update_obs_table_with_freq_obs()

        if property_based_stopping and learning_rounds >= min_rounds:
            # stop based on maximum allowed error
            if stop_based_on_confidence(hypothesis, property_based_stopping, print_level):
                break
        else:
            # stop based on number of unambiguous rows
            stop_based_on_unambiguity = observation_table.stop(learning_rounds, chaos_cex_present, cex,
                                                               stopping_range_dict,
                                                               target_unambiguity=target_unambiguity,
                                                               min_rounds=min_rounds, max_rounds=max_rounds,
                                                               print_unambiguity=print_level > 1)
            if stop_based_on_unambiguity:
                break

        if not refined:
            break

    total_time = round(time.time() - start_time, 2)
    eq_query_time = round(eq_query_time, 2)
    learning_time = round(total_time - eq_query_time, 2)

    info = {
        'learning_rounds': learning_rounds,
        'automaton_size': len(hypothesis.states),
        'queries_learning': stochastic_teacher.sul.num_queries - eq_oracle.num_queries,
        'steps_learning': stochastic_teacher.sul.num_steps - eq_oracle.num_queries,
        'queries_eq_oracle': eq_oracle.num_queries,
        'steps_eq_oracle': eq_oracle.num_steps,
        'learning_time': learning_time,
        'eq_oracle_time': eq_query_time,
        'total_time': total_time
    }

    if print_level > 0:
        print_learning_info(info)

    if return_data:
        return hypothesis, info

    return hypothesis
コード例 #2
0
ファイル: StochasticLStar.py プロジェクト: Hzaatiti/AALpy
def run_stochastic_Lstar(input_alphabet, sul: SUL, eq_oracle: Oracle, n_c=20, n_resample=100, min_rounds=10,
                         max_rounds=200, automaton_type='mdp', strategy='normal', return_data=False, print_level=2):
    """
    Learning of Markov Decision Processes based on 'L*-Based Learning of Markov Decision Processes' by Tappler et al.

    Args:

        input_alphabet: input alphabet

        sul: system under learning

        eq_oracle: equivalence oracle

        n_c: cutoff for a cell to be considered complete (Default value = 20)

        n_resample: resampling size (Default value = 100)

        min_rounds: minimum number of learning rounds (Default value = 10)

        max_rounds: if learning_rounds >= max_rounds, learning will stop (Default value = 200)

        automaton_type: either 'mdp' or 'smm' (Default value = 'mdp')

        strategy: if no_cq, improved version of the algorithm will be used (Default value = 'normal')

        return_data: if True, map containing all information like number of queries... will be returned
            (Default value = False)

        print_level: 0 - None, 1 - just results, 2 - current round and hypothesis size, 3 - educational/debug
            (Default value = 2)


    Returns:
      learned MDP/SMM

    """

    assert strategy in strategies
    # Initialize teacher and observation table
    stochastic_teacher = StochasticTeacher(sul, n_c, eq_oracle, automaton_type)
    observation_table = SamplingBasedObservationTable(input_alphabet, automaton_type,
                                                      stochastic_teacher, strategy=strategy)

    start_time = time.time()
    eq_query_time = 0

    # Ask queries for non-completed cells and update the observation table
    observation_table.refine_not_completed_cells(n_resample, uniform=True)
    observation_table.update_obs_table_with_freq_obs()

    learning_rounds = 0
    while True:
        learning_rounds += 1

        observation_table.make_closed_and_consistent()

        hypothesis = observation_table.generate_hypothesis()

        observation_table.trim(hypothesis)

        # If there is no chaos state is not reachable, remove it from state set
        cex = observation_table.chaos_counterexample(hypothesis)
        chaos_cex_present = True if cex else False

        if not cex:
            if automaton_type == 'mdp':
                hypothesis.states.remove(next(state for state in hypothesis.states if state.output == 'chaos'))
            else:
                hypothesis.states.remove(next(state for state in hypothesis.states if state.state_id == 'chaos'))

        if print_level > 1:
            print(f'Hypothesis: {learning_rounds}: {len(hypothesis.states)} states.')

        if print_level == 3:
            print_observation_table(observation_table.S, observation_table.get_extended_s(), observation_table.E,
                                    observation_table.T, False)

        # If there is a prefix leading to chaos state, use that as a counterexample, otherwise preform equivalence query
        eq_query_start = time.time()
        cex = stochastic_teacher.equivalence_query(hypothesis) if not cex else cex
        eq_query_time += time.time() - eq_query_start

        if cex:
            if print_level == 3:
                print('Counterexample', cex)
            # get all prefixes and add them to the S set
            for p in get_cex_prefixes(cex, automaton_type):
                if p not in observation_table.S:
                    observation_table.S.append(p)

        # Ask queries for non-completed cells and update the observation table
        observation_table.refine_not_completed_cells(n_resample)
        observation_table.update_obs_table_with_freq_obs()

        if observation_table.stop(learning_rounds, chaos_present=chaos_cex_present, min_rounds=min_rounds,
                                  max_rounds=max_rounds, print_unambiguity=print_level > 1):
            break

    total_time = round(time.time() - start_time, 2)
    eq_query_time = round(eq_query_time, 2)
    learning_time = round(total_time - eq_query_time, 2)

    info = {
        'learning_rounds': learning_rounds,
        'automaton_size': len(hypothesis.states),
        'queries_learning': sul.num_queries,
        'steps_learning': sul.num_steps,
        'queries_eq_oracle': eq_oracle.num_queries,
        'steps_eq_oracle': eq_oracle.num_steps,
        'learning_time': learning_time,
        'eq_oracle_time': eq_query_time,
        'total_time': total_time
    }

    if print_level > 0:
        print_learning_info(info)

    if return_data:
        return hypothesis, info

    return hypothesis
コード例 #3
0
ファイル: LStar.py プロジェクト: haubitzer/AALpy
def run_Lstar(alphabet: list,
              sul: SUL,
              eq_oracle: Oracle,
              automaton_type,
              closing_strategy='longest_first',
              cex_processing='rs',
              suffix_closedness=True,
              closedness_type='suffix',
              max_learning_rounds=None,
              cache_and_non_det_check=True,
              return_data=False,
              print_level=2):
    """Executes L* algorithm with Riverst-Schapire counter example processing.

    Args:

        alphabet: input alphabet

        sul: system under learning

        eq_oracle: equivalence oracle

        automaton_type: type of automaton to be learned. Either 'dfa', 'mealy' or 'moore'.

        closing_strategy: closing strategy used in the close method. Either 'longest_first', 'shortest_first' or
            'single' (Default value = 'longest_first')

        cex_processing: Counterexample processing strategy. Either None, 'rs' (Riverst-Schapire) or 'longest_prefix'.
            (Default value = 'rs')

        suffix_closedness: if True E set will be suffix closed, (Default value = True)

        closedness_type: either 'suffix' or 'prefix'. If suffix, E set will be suffix closed, prefix closed otherwise

        meaning that all prefixes of the suffix will be added. If false, just a single suffix will be added.
            (Default value = 'suffix')

        max_learning_rounds: number of learning rounds after which learning will terminate (Default value = None)

        cache_and_non_det_check: Use caching and non-determinism checks (Default value = True)

        return_data: if True, a map containing all information(runtime/#queries/#steps) will be returned
            (Default value = False)

        print_level: 0 - None, 1 - just results, 2 - current round and hypothesis size, 3 - educational/debug
            (Default value = 2)

    Returns:

        automaton of type automaton_type (dict containing all information about learning if 'return_data' is True)

    """
    assert cex_processing in counterexample_processing_strategy
    assert closedness_type in closedness_options
    assert print_level in print_options

    if cache_and_non_det_check:
        # Wrap the sul in the CacheSUL, so that all steps/queries are cached
        sul = CacheSUL(sul)
        eq_oracle.sul = sul

    start_time = time.time()
    eq_query_time = 0
    learning_rounds = 0
    hypothesis = None

    observation_table = ObservationTable(alphabet, sul, automaton_type)

    # Initial update of observation table, for empty row
    observation_table.update_obs_table()
    while True:
        learning_rounds += 1
        if max_learning_rounds and learning_rounds - 1 == max_learning_rounds:
            break

        # Make observation table consistent (iff there is no counterexample processing)
        if not cex_processing:
            inconsistent_rows = observation_table.get_causes_of_inconsistency()
            while inconsistent_rows is not None:
                extend_set(observation_table.E, inconsistent_rows)
                observation_table.update_obs_table(e_set=inconsistent_rows)
                inconsistent_rows = observation_table.get_causes_of_inconsistency(
                )

        # Close observation table
        rows_to_close = observation_table.get_rows_to_close(closing_strategy)
        while rows_to_close is not None:
            rows_to_query = []
            for row in rows_to_close:
                observation_table.S.append(row)
                rows_to_query.extend([row + (a, ) for a in alphabet])
            observation_table.update_obs_table(s_set=rows_to_query)
            rows_to_close = observation_table.get_rows_to_close(
                closing_strategy)

        # Generate hypothesis
        hypothesis = observation_table.gen_hypothesis(
            check_for_duplicate_rows=cex_processing is None)

        if print_level > 1:
            print(
                f'Hypothesis {learning_rounds}: {len(hypothesis.states)} states.'
            )

        if print_level == 3:
            print_observation_table(observation_table, 'det')

        # Find counterexample
        eq_query_start = time.time()
        cex = eq_oracle.find_cex(hypothesis)
        eq_query_time += time.time() - eq_query_start

        # If no counterexample is found, return the hypothesis
        if cex is None:
            break

        if print_level == 3:
            print('Counterexample', cex)

        # Process counterexample and ask membership queries
        if not cex_processing:
            s_to_update = []
            added_rows = extend_set(observation_table.S, all_prefixes(cex))
            s_to_update.extend(added_rows)
            for p in added_rows:
                s_to_update.extend([p + (a, ) for a in alphabet])

            observation_table.update_obs_table(s_set=s_to_update)
            continue
        elif cex_processing == 'longest_prefix':
            cex_suffixes = longest_prefix_cex_processing(
                observation_table.S + list(observation_table.s_dot_a()), cex,
                closedness_type)
        else:
            cex_suffixes = rs_cex_processing(sul, cex, hypothesis,
                                             suffix_closedness,
                                             closedness_type)

        added_suffixes = extend_set(observation_table.E, cex_suffixes)
        observation_table.update_obs_table(e_set=added_suffixes)

    total_time = round(time.time() - start_time, 2)
    eq_query_time = round(eq_query_time, 2)
    learning_time = round(total_time - eq_query_time, 2)

    info = {
        'learning_rounds': learning_rounds,
        'automaton_size': len(hypothesis.states),
        'queries_learning': sul.num_queries,
        'steps_learning': sul.num_steps,
        'queries_eq_oracle': eq_oracle.num_queries,
        'steps_eq_oracle': eq_oracle.num_steps,
        'learning_time': learning_time,
        'eq_oracle_time': eq_query_time,
        'total_time': total_time,
        'characterization set': observation_table.E
    }
    if cache_and_non_det_check:
        info['cache_saved'] = sul.num_cached_queries

    if print_level > 0:
        print_learning_info(info)

    if return_data:
        return hypothesis, info

    return hypothesis
コード例 #4
0
ファイル: AbstractedOnfsmLstar.py プロジェクト: DES-Lab/AALpy
def run_abstracted_ONFSM_Lstar(alphabet: list,
                               sul: SUL,
                               eq_oracle: Oracle,
                               abstraction_mapping: dict,
                               n_sampling=100,
                               max_learning_rounds=None,
                               return_data=False,
                               print_level=2):
    """
    Based on ''Learning Abstracted Non-deterministic Finite State Machines'' from Pferscher and Aichernig.
    The algorithm learns an abstracted onfsm of a non-deterministic system. For the additional abstraction,
    equivalence classes for outputs are used.
    Learning ONFSM relies on all-weather assumption. If this assumption is not satisfied by sampling,
    learning might not converge to the minimal model and runtime could increase substantially.
    Note that this is the inherent flaw of the all-weather assumption. (All outputs will be seen)
    AALpy v.2.0 will try to solve that problem with a novel approach.

    Args:

        alphabet: input alphabet

        sul: system under learning

        eq_oracle: equivalence oracle

        abstraction_mapping: dictionary containing mappings from abstracted to concrete values (equivalence classes)

        n_sampling: number of times that membership/input queries will be asked for each cell in the observation
            (Default value = 100)

        max_learning_rounds: if max_learning_rounds is reached, learning will stop (Default value = None)

        return_data: if True, map containing all information like number of queries... will be returned
            (Default value = False)

        print_level: 0 - None, 1 - just results, 2 - current round and hypothesis size, 3 - educational/debug
            (Default value = 2)

    Returns:
        learned abstracted ONFSM

    """
    start_time = time.time()
    eq_query_time = 0
    learning_rounds = 0
    hypothesis = None

    sul = SULWrapper(sul)
    eq_oracle.sul = sul

    abstracted_observation_table = AbstractedNonDetObservationTable(
        alphabet, sul, abstraction_mapping, n_sampling)

    # We fist query the initial row. Then based on output in its cells, we generate new rows in S.A,
    # and then we perform membership/input queries for them.
    abstracted_observation_table.update_obs_table()
    new_rows = abstracted_observation_table.update_extended_S()
    abstracted_observation_table.update_obs_table(s_set=new_rows)

    while True:
        learning_rounds += 1
        if max_learning_rounds and learning_rounds - 1 == max_learning_rounds:
            break

        closed_complete_consistent = False
        while not closed_complete_consistent:
            closed_complete_consistent = True

            row_to_close = abstracted_observation_table.get_row_to_close()
            while row_to_close is not None:
                # First we add new rows to the S.A. They are added based on the values in the cells of the
                # rows that is to be closed. Once those rows are created, they are populated and closedness is checked
                # once again.
                closed_complete_consistent = False
                extended_rows = abstracted_observation_table.update_extended_S(
                    row_to_close)
                abstracted_observation_table.update_obs_table(
                    s_set=extended_rows)
                row_to_close = abstracted_observation_table.get_row_to_close()

            row_to_complete = abstracted_observation_table.get_row_to_complete(
            )
            while row_to_complete is not None:
                closed_complete_consistent = False
                abstracted_observation_table.extend_S_dot_A([row_to_complete])
                abstracted_observation_table.update_obs_table(
                    s_set=[row_to_complete])
                row_to_complete = abstracted_observation_table.get_row_to_complete(
                )

            e_column_for_consistency = abstracted_observation_table.get_row_to_make_consistent(
            )
            while e_column_for_consistency is not None:
                closed_complete_consistent = False
                extended_col = abstracted_observation_table.update_E(
                    e_column_for_consistency)
                abstracted_observation_table.update_obs_table(
                    e_set=extended_col)
                e_column_for_consistency = abstracted_observation_table.get_row_to_make_consistent(
                )

        abstracted_observation_table.clean_tables()
        hypothesis = abstracted_observation_table.gen_hypothesis()

        if print_level == 3:
            print('Observation Table')
            print_observation_table(
                abstracted_observation_table.observation_table, 'non-det')
            print()
            print('Abstracted Observation Table')
            # CHANGED, but not important to alg
            print_observation_table(abstracted_observation_table,
                                    'abstracted-non-det')

        if print_level > 1:
            print(
                f'Hypothesis {learning_rounds} has {len(hypothesis.states)} states.'
            )

        # Find counterexample
        eq_query_start = time.time()
        cex = eq_oracle.find_cex(hypothesis)
        eq_query_time += time.time() - eq_query_start

        if cex is None:
            break

        if print_level >= 2:
            print('Counterexample', cex)

        # Process counterexample -> add cex to S.A or E
        abstracted_observation_table.cex_processing(cex, hypothesis)

    total_time = round(time.time() - start_time, 2)
    eq_query_time = round(eq_query_time, 2)
    learning_time = round(total_time - eq_query_time, 2)

    info = {
        'learning_rounds': learning_rounds,
        'automaton_size': len(hypothesis.states),
        'queries_learning': sul.num_queries,
        'steps_learning': sul.num_steps,
        'queries_eq_oracle': eq_oracle.num_queries,
        'steps_eq_oracle': eq_oracle.num_steps,
        'learning_time': learning_time,
        'eq_oracle_time': eq_query_time,
        'total_time': total_time
    }

    if print_level > 0:
        print_learning_info(info)

    if return_data:
        return hypothesis, info

    return hypothesis
コード例 #5
0
def run_Lstar_ONFSM(alphabet: list,
                    sul: SUL,
                    eq_oracle: Oracle,
                    n_sampling=50,
                    max_learning_rounds=None,
                    return_data=False,
                    print_level=2):
    """
    Based on ''Learning Finite State Models of Observable Nondeterministic Systems in a Testing Context '' from Fakih
    et al. Relies on the all-weather assumption. (By sampling we will obtain all possible non-deterministic outputs.
    Learning ONFSM relies on all-weather assumption. If this assumption is not satisfied by sampling,
    learning might not converge to the minimal model and runtime could increase substantially.
    Note that this is the inherent flaw of the all-weather assumption. (All outputs will be seen)
    AALpy v.2.0 will try to solve that problem with a novel approach.

    Args:

        alphabet: input alphabet

        sul: system under learning

        eq_oracle: equivalence oracle

        n_sampling: number of times that each cell has to be updated. If this number is to low, all-weather condition
            will not hold and learning will not converge to the correct model. (Default value = 50)

        max_learning_rounds: if max_learning_rounds is reached, learning will stop (Default value = None)

        return_data: if True, map containing all information like number of queries... will be returned
            (Default value = False)

        print_level: 0 - None, 1 - just results, 2 - current round and hypothesis size, 3 - educational/debug
            (Default value = 2)

    Returns:
        learned ONFSM

    """
    # Print warning
    print(
        'Starting learning with an all-weather assumption.\n'
        'See run_Lstar_ONFSM documentation for more details about possible non-convergence.'
    )

    start_time = time.time()
    eq_query_time = 0
    learning_rounds = 0
    hypothesis = None

    sul = SULWrapper(sul)
    eq_oracle.sul = sul

    observation_table = NonDetObservationTable(alphabet, sul, n_sampling)

    # We fist query the initial row. Then based on output in its cells, we generate new rows in the extended S set,
    # and then we perform membership/input queries for them.
    observation_table.update_obs_table()
    new_rows = observation_table.update_extended_S(observation_table.S[0])
    observation_table.update_obs_table(s_set=new_rows)

    while True:
        learning_rounds += 1
        if max_learning_rounds and learning_rounds - 1 == max_learning_rounds:
            break

        # Close observation table
        row_to_close = observation_table.get_row_to_close()
        while row_to_close is not None:
            # First we add new rows to the extended S set. They are added based on the values in the cells of the
            # rows that is to be closed. Once those rows are created, they are populated and closedness is checked
            # once again.
            extended_rows = observation_table.update_extended_S(row_to_close)
            observation_table.update_obs_table(s_set=extended_rows)
            row_to_close = observation_table.get_row_to_close()

        # Generate hypothesis
        hypothesis = observation_table.gen_hypothesis()

        if print_level > 1:
            print(
                f'Hypothesis {learning_rounds}: {len(hypothesis.states)} states.'
            )

        if print_level == 3:
            print_observation_table(observation_table, 'non-det')

        # Find counterexample
        eq_query_start = time.time()
        cex = eq_oracle.find_cex(hypothesis)
        eq_query_time += time.time() - eq_query_start

        # If no counterexample is found, return the hypothesis
        if cex is None:
            break

        if print_level == 3:
            print('Counterexample', cex)

        # Process counterexample -> Extract suffix to be added to E set
        cex_suffixes = observation_table.cex_processing(cex)
        # Add all suffixes to the E set and ask membership/input queries.
        added_suffixes = extend_set(observation_table.E, cex_suffixes)
        observation_table.update_obs_table(e_set=added_suffixes)

    total_time = round(time.time() - start_time, 2)
    eq_query_time = round(eq_query_time, 2)
    learning_time = round(total_time - eq_query_time, 2)

    info = {
        'learning_rounds': learning_rounds,
        'automaton_size': len(hypothesis.states),
        'queries_learning': sul.num_queries,
        'steps_learning': sul.num_steps,
        'queries_eq_oracle': eq_oracle.num_queries,
        'steps_eq_oracle': eq_oracle.num_steps,
        'learning_time': learning_time,
        'eq_oracle_time': eq_query_time,
        'total_time': total_time
    }

    if print_level > 0:
        print_learning_info(info)

    if return_data:
        return hypothesis, info

    return hypothesis
コード例 #6
0
def run_stochastic_Lstar(input_alphabet,
                         sul: SUL,
                         eq_oracle: Oracle,
                         n_c=20,
                         n_resample=100,
                         min_rounds=10,
                         max_rounds=200,
                         automaton_type='mdp',
                         strategy='normal',
                         cex_processing=None,
                         samples_cex_strategy=None,
                         return_data=False,
                         print_level=2):
    """
    Learning of Markov Decision Processes based on 'L*-Based Learning of Markov Decision Processes' by Tappler et al.

    Args:

        input_alphabet: input alphabet

        sul: system under learning

        eq_oracle: equivalence oracle

        n_c: cutoff for a cell to be considered complete (Default value = 20)

        n_resample: resampling size (Default value = 100)

        min_rounds: minimum number of learning rounds (Default value = 10)

        max_rounds: if learning_rounds >= max_rounds, learning will stop (Default value = 200)

        automaton_type: either 'mdp' or 'smm' (Default value = 'mdp')

        strategy: if no_cq, improved version of the algorithm will be used (Default value = 'normal')

        cex_processing: cex processing strategy, None , 'longest_prefix' or 'rs' (rs is experimental)

        samples_cex_strategy: strategy for finding counterexamples in the trace tree. None, 'bfs' or
            "random:<#traces to check:int>:<stop probability for single trace in [0,1)>" eg. random:200:0.2

        return_data: if True, map containing all information like number of queries... will be returned
            (Default value = False)

        print_level: 0 - None, 1 - just results, 2 - current round and hypothesis size, 3 - educational/debug
            (Default value = 2)


    Returns:

      learned MDP/SMM
    """

    assert strategy in strategies
    assert samples_cex_strategy in cex_sampling_options or samples_cex_strategy.startswith(
        'random')

    compatibility_checker = AdvancedHoeffdingChecker(
    ) if strategy != "normal" else HoeffdingChecker()

    stochastic_teacher = StochasticTeacher(
        sul,
        n_c,
        eq_oracle,
        automaton_type,
        compatibility_checker,
        samples_cex_strategy=samples_cex_strategy)

    # This way all steps from eq. oracle will be added to the tree
    eq_oracle.sul = stochastic_teacher.sul

    observation_table = SamplingBasedObservationTable(
        input_alphabet,
        automaton_type,
        stochastic_teacher,
        compatibility_checker=compatibility_checker,
        strategy=strategy,
        cex_processing=cex_processing)

    start_time = time.time()
    eq_query_time = 0

    # Ask queries for non-completed cells and update the observation table
    observation_table.refine_not_completed_cells(n_resample, uniform=True)
    observation_table.update_obs_table_with_freq_obs()

    learning_rounds = 0
    while True:
        learning_rounds += 1

        observation_table.make_closed_and_consistent()

        hypothesis = observation_table.generate_hypothesis()

        observation_table.trim(hypothesis)

        # If there is no chaos state is not reachable, remove it from state set
        # cex = observation_table.chaos_counterexample(hypothesis)
        chaos_cex_present = observation_table.chaos_counterexample(hypothesis)

        if not chaos_cex_present:
            if automaton_type == 'mdp':
                hypothesis.states.remove(
                    next(state for state in hypothesis.states
                         if state.output == 'chaos'))
            else:
                hypothesis.states.remove(
                    next(state for state in hypothesis.states
                         if state.state_id == 'chaos'))

        if print_level > 1:
            print(
                f'Hypothesis: {learning_rounds}: {len(hypothesis.states)} states.'
            )

        if print_level == 3:
            print_observation_table(observation_table, 'stoc')

        # If there is a prefix leading to chaos state, use that as a counterexample, otherwise preform equivalence query
        cex = None
        if not chaos_cex_present:
            eq_query_start = time.time()
            cex = stochastic_teacher.equivalence_query(hypothesis)
            eq_query_time += time.time() - eq_query_start

        if cex:
            if print_level == 3:
                print('Counterexample', cex)
            # get all prefixes and add them to the S set
            if cex_processing is None:
                for pre in get_cex_prefixes(cex, automaton_type):
                    if pre not in observation_table.S:
                        observation_table.S.append(pre)
            else:
                suffixes = None
                if cex_processing == 'longest_prefix':
                    prefixes = observation_table.S + list(
                        observation_table.get_extended_s())
                    suffixes = stochastic_longest_prefix(cex, prefixes)
                elif cex_processing == 'rs':
                    suffixes = stochastic_rs(sul, cex, hypothesis)
                for suf in suffixes:
                    if suf not in observation_table.E:
                        observation_table.E.append(suf)

        # print('Size of E:', len(observation_table.E))
        # Ask queries for non-completed cells and update the observation table
        refined = observation_table.refine_not_completed_cells(n_resample)
        observation_table.update_obs_table_with_freq_obs()

        if observation_table.stop(learning_rounds,
                                  chaos_present=chaos_cex_present,
                                  min_rounds=min_rounds,
                                  max_rounds=max_rounds,
                                  print_unambiguity=print_level > 1):
            break

        if not refined:
            # If all cells were refined, but stopping did not happen, increase n_c
            # We could also break here
            stochastic_teacher.n_c *= 1.5
            stochastic_teacher.complete_query_cache.clear()

    total_time = round(time.time() - start_time, 2)
    eq_query_time = round(eq_query_time, 2)
    learning_time = round(total_time - eq_query_time, 2)

    info = {
        'learning_rounds': learning_rounds,
        'automaton_size': len(hypothesis.states),
        'queries_learning':
        stochastic_teacher.sul.num_queries - eq_oracle.num_queries,
        'steps_learning':
        stochastic_teacher.sul.num_steps - eq_oracle.num_queries,
        'queries_eq_oracle': eq_oracle.num_queries,
        'steps_eq_oracle': eq_oracle.num_steps,
        'learning_time': learning_time,
        'eq_oracle_time': eq_query_time,
        'total_time': total_time
    }

    if print_level > 0:
        print_learning_info(info)

    if return_data:
        return hypothesis, info

    return hypothesis