Beispiel #1
0
    def prove_rnn_max_property(self, img_patch, rnn_out_idx, max_value, n):
        '''
        prove property on the rnn
        :param rnn_out_idx: one of rnn output idx
        :param max_value: max value for the output
        :param n: number of iterations
        :return:
        '''
        if img_patch is None:
            img_patch = np.array([0.1, 0.2, 0.3, 0.4] * 28)  # 112
            # img_patch = np.array([0.2] * 112)
            # img_patch = np.load('1.pt')
        img_patch = img_patch[:MAX_SIZE]

        self.set_network_description(img_patch, n)

        property_eq = MarabouCore.Equation(MarabouCore.Equation.GE)
        property_eq.addAddend(1, self.rnn_output_idxs[rnn_out_idx])
        property_eq.setScalar(max_value)
        rnn_start_idxs = [i - 3 for i in self.rnn_output_idxs]

        algorithm = IterateAlphasSGD(self.rnn_initial_values, rnn_start_idxs,
                                     self.rnn_output_idxs)
        return prove_multidim_property(self.network, rnn_start_idxs,
                                       self.rnn_output_idxs, [property_eq],
                                       algorithm)
Beispiel #2
0
    def prove_adv_property(self, img_patch, out_idx_max, out_idx_compare, n):
        '''
        prove property on the rnn
        :param img_path: The input img for the network
        :param out_idx_max: which index in the output should be maximum
        :param n: number of iterations
        :return:
        '''
        if img_patch is None:
            # img_patch = np.array([0.1, 0.2, 0.3, 0.4] * 28) # 112
            img_patch = np.array([0.2] * 112)
        img_patch = img_patch[:MAX_SIZE]

        properties = []
        self.set_network_description(img_patch, n)
        assert len(self.out_idx) > out_idx_max

        property_eq = MarabouCore.Equation(MarabouCore.Equation.GE)
        property_eq.addAddend(1, self.out_idx[out_idx_max])
        property_eq.addAddend(-1, out_idx_compare)
        property_eq.setScalar(small)
        properties.append(property_eq)

        rnn_start_idxs = [i - 3 for i in self.rnn_output_idxs]
        return prove_multidim_property(self.network, rnn_start_idxs,
                                       self.rnn_output_idxs,
                                       self.rnn_initial_values, properties)
Beispiel #3
0
def query(xlim: List[Tuple[float, float]], P: Optional[List[Predicate]], Q: List[Predicate], h5_file_path: str,
          algorithm_ptr, n_iterations=10, steps_num=5000):
    '''
    :param xlim:  list of tuples each tuple is (lower_bound, upper_bound), input bounds
    :param P: predicates on the input (linear constraints), besides the bounds on the input (xlim)
    :param Q: conditions on the output (linear constraints), not negated
    :param h5_file_path: path to keras model which we will check on
    :param n_iterations: number of iterations to run
    :return: True / False, and queries_stats
    '''
    rnn_model = RnnMarabouModel(h5_file_path, n_iterations)
    rnn_model.set_input_bounds(xlim)

    add_predicates(rnn_model, P, n_iterations)

    start_initial_alg = timer()
    algorithm = algorithm_ptr(rnn_model, xlim)
    end_initial_alg = timer()
    Q_negate = []
    for q in Q:
        Q_negate.append(negate_equation(q.get_equation(rnn_model)))

    res, queries_stats = prove_multidim_property(rnn_model, Q_negate, algorithm, debug=1, return_queries_stats=True,
                                                 number_of_steps=steps_num)

    if queries_stats:
        step_times = queries_stats['step_times']['raw']
        step_times.insert(0, end_initial_alg - start_initial_alg)
        queries_stats['step_times'] = {'avg': np.mean(step_times), 'median': np.median(step_times), 'raw': step_times}
        queries_stats['step_queries'] = len(step_times)

    return res, queries_stats, algorithm.alpha_history
Beispiel #4
0
def test_auto_positive_sum_positive_iterateSGD():
    num_iterations = 10
    xlim = (-1, 1)
    ylim = (0, num_iterations + 1.1)

    network, rnn_start_idxs, invariant_equation, property_eqs, initial_values = define_positive_sum_network_no_invariant(
        xlim, ylim, num_iterations)
    rnn_output_idxs = [i + 3 for i in rnn_start_idxs]

    algorithm = IterateAlphasSGD(initial_values, rnn_start_idxs,
                                 rnn_output_idxs)
    assert prove_multidim_property(network, rnn_start_idxs, rnn_output_idxs,
                                   property_eqs, algorithm)
Beispiel #5
0
def adversarial_query(x: list, radius: float, y_idx_max: int, other_idx: int, h5_file_path: str, algorithm_ptr,
                      n_iterations=10, steps_num=5000):
    '''
    Query marabou with adversarial query
    :param x: base_vector (input vector that we want to find a ball around it)
    :param radius: determines the limit of the inputs around the base_vector
    :param y_idx_max: max index in the output layer
    :param other_idx: which index to compare max idx
    :param h5_file_path: path to keras model which we will check on
    :param algorithm_ptr: TODO
    :param n_iterations: number of iterations to run
    :return: True / False, and queries_stats
    '''

    if y_idx_max is None or other_idx is None:
        y_idx_max, other_idx = get_out_idx(x, n_iterations, h5_file_path)
        if y_idx_max == other_idx or y_idx_max is None or other_idx is None:
            # This means all the enteris in the out vector are equal...
            return False, None, None

    xlim = calc_min_max_by_radius(x, radius)
    rnn_model = RnnMarabouModel(h5_file_path, n_iterations)
    rnn_model.set_input_bounds(xlim)

    # output[y_idx_max] >= output[0] <-> output[y_idx_max] - output[0] >= 0, before feeding marabou we negate this
    adv_eq = MarabouCore.Equation(MarabouCore.Equation.GE)
    adv_eq.addAddend(-1, rnn_model.output_idx[other_idx])
    adv_eq.addAddend(1, rnn_model.output_idx[y_idx_max])
    adv_eq.setScalar(0)

    time_eq = MarabouCore.Equation()
    time_eq.addAddend(1, rnn_model.get_start_end_idxs(0)[0][0])
    time_eq.setScalar(n_iterations)

    start_initial_alg = timer()
    algorithm = algorithm_ptr(rnn_model, xlim)
    end_initial_alg = timer()
    # rnn_model.network.dump()

    res, queries_stats = prove_multidim_property(rnn_model, [negate_equation(adv_eq), time_eq], algorithm, debug=1,
                                                 return_queries_stats=True, number_of_steps=steps_num)
    if queries_stats:
        step_times = queries_stats['step_times']['raw']
        step_times.insert(0, end_initial_alg - start_initial_alg)
        queries_stats['step_times'] = {'avg': np.mean(step_times), 'median': np.median(step_times), 'raw': step_times}
        queries_stats['step_queries'] = len(step_times)

    if 'invariant_queries' in queries_stats and 'property_queries' in queries_stats and \
            queries_stats['property_queries'] != queries_stats['invariant_queries']:
        print("What happened?\n", x)
    return res, queries_stats, algorithm.alpha_history
Beispiel #6
0
def auto_positive_sum_positive_SMTbase():
    num_iterations = 10
    xlim = (-1, 1)
    ylim = (0, num_iterations + 1.1)

    network, rnn_start_idxs, invariant_equation, property_eqs, initial_values = define_positive_sum_network_no_invariant(
        xlim, ylim, num_iterations)
    rnn_output_idxs = [i + 3 for i in rnn_start_idxs]

    algorithm = SmtAlphaSearch(initial_values, rnn_start_idxs, rnn_output_idxs,
                               np.array([[1]]), np.array([[1]]), [0],
                               [xlim[0]], [xlim[1]], num_iterations)
    assert prove_multidim_property(network, rnn_start_idxs, rnn_output_idxs,
                                   property_eqs, algorithm)
Beispiel #7
0
def test_adversarial_robustness_two_inputs_two_hidden_SGDAlgorithm():
    '''
    This example has 2 input nodes and two RNN cells
    '''
    num_iterations = 10
    xlim = [(0, 1), (1, 2)]

    network, rnn_output_idxs, initial_values, property_eq = \
        define_adversarial_robustness_two_input_nodes_two_hidden(xlim, num_iterations)
    network.dump()
    rnn_start_idxs = [i - 3 for i in rnn_output_idxs]
    algorithm = IterateAlphasSGD(initial_values, rnn_start_idxs,
                                 rnn_output_idxs)
    assert prove_multidim_property(network, rnn_start_idxs, rnn_output_idxs,
                                   property_eq, algorithm)
Beispiel #8
0
    def prove_out_max_property(self, out_idx, max_value):
        '''

        :param out_idx: index in the 10 vector output
        :param max_value: maximum value for that index
        :return: True / False
        '''
        raise NotImplementedError
        self._calc_output_initial_values(img_patch, n)
        property_eq = MarabouCore.Equation(MarabouCore.Equation.GE)
        property_eq.addAddend(w_out[0], out_idx)
        property_eq.setScalar(max_value)

        assert prove_multidim_property(network, rnn_start_idxs,
                                       rnn_output_idxs,
                                       self.output_initial_values,
                                       [property_eq])
Beispiel #9
0
def test_auto_adversarial_robustness_two_inputs_SGDAlgorithm():
    '''
    This example has 2 input nodes and two RNN cells
    '''
    from RNN.MarabouRNNMultiDim import prove_multidim_property
    from rnn_algorithms.IterateAlphasSGD import IterateAlphasSGD

    num_iterations = 10
    xlim = [(0, 1), (1, 2)]

    network, rnn_start_idxs, property_eq, initial_values, *_ = define_adversarial_robustness_two_input_nodes(xlim, num_iterations)
    rnn_invariant_type = [MarabouCore.Equation.GE, MarabouCore.Equation.LE]


    # network.dump()
    rnn_output_idxs = [i + 3 for i in rnn_start_idxs]
    algorithm = IterateAlphasSGD(initial_values, rnn_start_idxs, rnn_output_idxs)
    assert prove_multidim_property(network, rnn_start_idxs, rnn_output_idxs, property_eq, algorithm)
Beispiel #10
0
    rnn_output_idxs = add_rnn_cells(network, w_in, w_h, b_h, n)
    rnn_start_idxs = [i - 3 for i in rnn_output_idxs]

    w_out = np.array([1, 0])
    property_eq = MarabouCore.Equation(MarabouCore.Equation.GE)
    property_eq.addAddend(w_out[0], rnn_output_idxs[0])
    property_eq.setScalar(23.1)

    # draw_r_values(*calc_rnn_values(x_max, w_in, w_h, n))
    w_in = np.array([w_in_0, w_in_1])
    r_min, r_max = calc_min_max_values(x_min, x_max, w_in,
                                       np.zeros(w_in.shape))
    initial_values = [r_min, r_max]
    y, rs = get_max_value([x_min, x_max], w_in, w_h, b_h, w_out, 0, n)
    print("network max value: {}\nrecurrent max values: {}".format(y, rs))
    assert prove_multidim_property(network, rnn_start_idxs, rnn_output_idxs,
                                   initial_values, [property_eq])
    print('property proved')
    #
    # # print("rnn_start_idxs:", rnn_start_idxs)
    # output_idx = add_output_equations(network, rnn_output_idxs, np.array([w_out_0, w_out_1]).T, np.array([0.3]))
    # # network.dump()
    # # print("output idx:", output_idx)
    #
    # # Let's verify that the output node is less then 100, after 10 iterations
    # n = 10
    # property_eq = MarabouCore.Equation(MarabouCore.Equation.LE)
    # property_eq.addAddend(1, output_idx[0])
    # property_eq.setScalar(100)
    #
    # # network, rnn_start_idxs, rnn_invariant_type, initial_values, n_iterations,
    # # property_equations, min_alphas = None, max_alphas = None, rnn_dependent = None