Ejemplo n.º 1
0
def main(_):
    net_weights, net_biases, net_layer_types = read_weights.read_weights(
        FLAGS.checkpoint, FLAGS.model_json)
    nn_params = neural_net_params.NeuralNetParams(net_weights, net_biases,
                                                  net_layer_types)
    print(nn_params.sizes)
    dual_var = utils.initialize_dual(nn_params,
                                     FLAGS.init_dual_file,
                                     init_nu=FLAGS.init_nu)
    # Reading test input and reshaping
    with tf.gfile.Open(FLAGS.test_input) as f:
        test_input = np.load(f)
    test_input = np.reshape(test_input, [np.size(test_input), 1])

    if FLAGS.adv_class == -1:
        start_class = 0
        end_class = FLAGS.num_classes
    else:
        start_class = FLAGS.adv_class
        end_class = FLAGS.adv_class + 1
    for adv_class in range(start_class, end_class):
        print('Adv class', adv_class)
        if adv_class == FLAGS.true_class:
            continue
        dual = dual_formulation.DualFormulation(dual_var, nn_params,
                                                test_input, FLAGS.true_class,
                                                adv_class, FLAGS.input_minval,
                                                FLAGS.input_maxval,
                                                FLAGS.epsilon)
        dual.set_differentiable_objective()
        dual.get_full_psd_matrix()
        optimization_params = {
            'init_penalty': FLAGS.init_penalty,
            'large_eig_num_steps': FLAGS.large_eig_num_steps,
            'small_eig_num_steps': FLAGS.small_eig_num_steps,
            'inner_num_steps': FLAGS.inner_num_steps,
            'outer_num_steps': FLAGS.outer_num_steps,
            'beta': FLAGS.beta,
            'smoothness_parameter': FLAGS.smoothness_parameter,
            'eig_learning_rate': FLAGS.eig_learning_rate,
            'optimizer': FLAGS.optimizer,
            'init_learning_rate': FLAGS.init_learning_rate,
            'learning_rate_decay': FLAGS.learning_rate_decay,
            'momentum_parameter': FLAGS.momentum_parameter,
            'print_stats_steps': FLAGS.print_stats_steps,
            'stats_folder': FLAGS.stats_folder,
            'projection_steps': FLAGS.projection_steps
        }
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            optimization_object = optimization.Optimization(
                dual, sess, optimization_params)
            optimization_object.prepare_one_step()
            is_cert_found = optimization_object.run_optimization()
            if not is_cert_found:
                print('Current example could not be verified')
                exit()
    print('Example successfully verified')
    def prepare_dual_object(self):
        # Function to prepare dual object to be used for testing optimization.
        net_weights = [[[2, 2], [3, 3], [4, 4]], [[1, 1, 1], [-1, -1, -1]]]
        net_biases = [np.transpose(np.matrix([0, 0, 0])),
                      np.transpose(np.matrix([0, 0]))]
        net_layer_types = ['ff_relu', 'ff']
        nn_params1 = neural_net_params.NeuralNetParams(net_weights, net_biases,
                                                       net_layer_types)

        test_input = np.transpose(np.matrix([0, 0]))
        true_class = 0
        adv_class = 1
        input_minval = 0
        input_maxval = 0
        epsilon = 0.1

        # Creating dual variables to use for optimization
        lambda_pos = [tf.get_variable('lambda_pos0',
                                      initializer=np.random.uniform(
                                          0, 0.1, size=(2, 1)).astype(np.float32)),
                      tf.get_variable('lambda_pos1',
                                      initializer=np.random.uniform(
                                          0, 0.1, size=(3, 1)).astype(np.float32))]
        lambda_neg = [tf.get_variable('lambda_neg0',
                                      initializer=np.random.uniform(
                                          0, 0.1, size=(2, 1)).astype(np.float32)),
                      tf.get_variable('lambda_neg1',
                                      initializer=np.random.uniform(
                                          0, 0.1, size=(3, 1)).astype(np.float32))]
        lambda_quad = [tf.get_variable('lambda_quad0',
                                       initializer=np.random.uniform(
                                           0, 0.1, size=(2, 1)).astype(np.float32)),
                       tf.get_variable('lambda_quad1',
                                       initializer=np.random.uniform(
                                           0, 0.1, size=(3, 1)).astype(np.float32))]
        lambda_lu = [tf.get_variable('lambda_lu0',
                                     initializer=np.random.uniform(
                                         0, 0.1, size=(2, 1)).astype(np.float32)),
                     tf.get_variable('lambda_lu1',
                                     initializer=np.random.uniform(
                                         0, 0.1, size=(3, 1)).astype(np.float32))]
        nu = tf.reshape(tf.get_variable('nu', initializer=200.0,
                                        dtype=tf.float32), shape=(1, 1))
        dual_var = {'lambda_pos': lambda_pos, 'lambda_neg': lambda_neg,
                    'lambda_quad': lambda_quad, 'lambda_lu': lambda_lu, 'nu': nu}
        dual_formulation_object = dual_formulation.DualFormulation(dual_var,
                                                                   nn_params1,
                                                                   test_input,
                                                                   true_class,
                                                                   adv_class,
                                                                   input_minval,
                                                                   input_maxval,
                                                                   epsilon)
        return dual_formulation_object
Ejemplo n.º 3
0
    def test_get_psd_product(self):
        # Function to test implicit product with PSD matrix.
        net_weights = [[[2, 2], [3, 3], [4, 4]], [[1, 1, 1], [-1, -1, -1]]]
        net_biases = [
            np.transpose(np.matrix([0, 0, 0])),
            np.transpose(np.matrix([0, 0]))
        ]
        net_layer_types = ['ff_relu', 'ff']
        nn_params1 = neural_net_params.NeuralNetParams(net_weights, net_biases,
                                                       net_layer_types)

        test_input = np.transpose(np.matrix([0, 0]))
        true_class = 0
        adv_class = 1
        input_minval = 0
        input_maxval = 0
        epsilon = 0.1
        three_dim_tensor = tf.random_uniform(shape=(3, 1), dtype=tf.float32)
        two_dim_tensor = tf.random_uniform(shape=(2, 1), dtype=tf.float32)
        scalar = tf.random_uniform(shape=(1, 1), dtype=tf.float32)
        lambda_pos = [two_dim_tensor, three_dim_tensor]
        lambda_neg = lambda_pos
        lambda_quad = lambda_pos
        lambda_lu = lambda_pos
        nu = scalar
        dual_var = {
            'lambda_pos': lambda_pos,
            'lambda_neg': lambda_neg,
            'lambda_quad': lambda_quad,
            'lambda_lu': lambda_lu,
            'nu': nu
        }
        dual_formulation_object = dual_formulation.DualFormulation(
            dual_var, nn_params1, test_input, true_class, adv_class,
            input_minval, input_maxval, epsilon)
        _, matrix_m = dual_formulation_object.get_full_psd_matrix()

        # Testing if the values match
        six_dim_tensor = tf.random_uniform(shape=(6, 1), dtype=tf.float32)
        implicit_product = dual_formulation_object.get_psd_product(
            six_dim_tensor)
        explicit_product = tf.matmul(matrix_m, six_dim_tensor)
        with tf.Session() as sess:
            [implicit_product_value, explicit_product_value
             ] = sess.run([implicit_product, explicit_product])
            self.assertEqual(np.shape(implicit_product_value),
                             np.shape(explicit_product_value))
            self.assertLess(
                np.max(np.abs(implicit_product_value -
                              explicit_product_value)), 1E-5)
    def test_init(self):
        # Function to test initialization of NeuralNetParams object.
        # Valid params
        net_weights = [[[2, 2], [3, 3], [4, 4]], [1, 1, 1]]
        net_biases = [
            np.transpose(np.matrix([0, 0, 0])),
            np.transpose(np.matrix([0, 0]))
        ]
        net_layer_types = ['ff_relu', 'ff']
        nn_params1 = neural_net_params.NeuralNetParams(net_weights, net_biases,
                                                       net_layer_types)
        self.assertIsNotNone(nn_params1)
        # Invalid params : list length
        net_biases = [0]
        with self.assertRaises(ValueError):
            neural_net_params.NeuralNetParams(net_weights, net_biases,
                                              net_layer_types)

        # Invalid params: layer types
        with self.assertRaises(ValueError):
            net_layer_types = ['ff_relu', 'ff_relu']
            neural_net_params.NeuralNetParams(net_weights, net_biases,
                                              net_layer_types)
Ejemplo n.º 5
0
    def test_set_differentiable_objective(self):
        # Function to test the function that sets the differentiable objective.
        net_weights = [[[2, 2], [3, 3], [4, 4]], [[1, 1, 1], [-1, -1, -1]]]
        net_biases = [
            np.transpose(np.matrix([0, 0, 0])),
            np.transpose(np.matrix([0, 0]))
        ]
        net_layer_types = ['ff_relu', 'ff']
        nn_params1 = neural_net_params.NeuralNetParams(net_weights, net_biases,
                                                       net_layer_types)

        test_input = np.transpose(np.matrix([0, 0]))
        true_class = 0
        adv_class = 1
        input_minval = 0
        input_maxval = 0
        epsilon = 0.1
        three_dim_tensor = tf.random_uniform(shape=(3, 1), dtype=tf.float32)
        two_dim_tensor = tf.random_uniform(shape=(2, 1), dtype=tf.float32)
        scalar = tf.random_uniform(shape=(1, 1), dtype=tf.float32)
        lambda_pos = [two_dim_tensor, three_dim_tensor]
        lambda_neg = lambda_pos
        lambda_quad = lambda_pos
        lambda_lu = lambda_pos
        nu = scalar
        dual_var = {
            'lambda_pos': lambda_pos,
            'lambda_neg': lambda_neg,
            'lambda_quad': lambda_quad,
            'lambda_lu': lambda_lu,
            'nu': nu
        }
        dual_formulation_object = dual_formulation.DualFormulation(
            dual_var, nn_params1, test_input, true_class, adv_class,
            input_minval, input_maxval, epsilon)
        dual_formulation_object.set_differentiable_objective()
        self.assertEqual(dual_formulation_object.scalar_f.shape.as_list(), [1])
        self.assertEqual(
            dual_formulation_object.unconstrained_objective.shape.as_list(),
            [1, 1])
        self.assertEqual(dual_formulation_object.vector_g.shape.as_list(),
                         [5, 1])
 def test_forward_pass(self):
     # Function to test forward pass of nn_params.
     net_weights = [[[2, 2], [3, 3], [4, 4]], [1, 1, 1]]
     net_biases = [
         np.transpose(np.matrix([0, 0, 0])),
         np.transpose(np.matrix([0, 0]))
     ]
     net_layer_types = ['ff_relu', 'ff']
     nn_params = neural_net_params.NeuralNetParams(net_weights, net_biases,
                                                   net_layer_types)
     input_vector = tf.random_uniform(shape=(2, 1), dtype=tf.float32)
     output_vector = nn_params.forward_pass(input_vector, 0)
     self.assertEqual(output_vector.shape.as_list(), [3, 1])
     output_vector_2 = nn_params.forward_pass(input_vector, 0, is_abs=True)
     self.assertEqual(output_vector_2.shape.as_list(), [3, 1])
     input_vector_trans = tf.random_uniform(shape=(3, 1), dtype=tf.float32)
     output_vector_3 = nn_params.forward_pass(input_vector_trans,
                                              0,
                                              is_transpose=True)
     self.assertEqual(output_vector_3.shape.as_list(), [2, 1])
Ejemplo n.º 7
0
    def test_get_full_psd_matrix(self):
        # Function to test product with PSD matrix.
        net_weights = [[[2, 2], [3, 3], [4, 4]], [[1, 1, 1], [-1, -1, -1]]]
        net_biases = [
            np.transpose(np.matrix([0, 0, 0])),
            np.transpose(np.matrix([0, 0]))
        ]
        net_layer_types = ['ff_relu', 'ff']
        nn_params1 = neural_net_params.NeuralNetParams(net_weights, net_biases,
                                                       net_layer_types)

        test_input = np.transpose(np.matrix([0, 0]))
        true_class = 0
        adv_class = 1
        input_minval = 0
        input_maxval = 0
        epsilon = 0.1
        three_dim_tensor = tf.random_uniform(shape=(3, 1), dtype=tf.float32)
        two_dim_tensor = tf.random_uniform(shape=(2, 1), dtype=tf.float32)
        scalar = tf.random_uniform(shape=(1, 1), dtype=tf.float32)
        lambda_pos = [two_dim_tensor, three_dim_tensor]
        lambda_neg = lambda_pos
        lambda_quad = lambda_pos
        lambda_lu = lambda_pos
        nu = scalar
        dual_var = {
            'lambda_pos': lambda_pos,
            'lambda_neg': lambda_neg,
            'lambda_quad': lambda_quad,
            'lambda_lu': lambda_lu,
            'nu': nu
        }
        dual_formulation_object = dual_formulation.DualFormulation(
            dual_var, nn_params1, test_input, true_class, adv_class,
            input_minval, input_maxval, epsilon)
        matrix_h, matrix_m = dual_formulation_object.get_full_psd_matrix()
        self.assertEqual(matrix_h.shape.as_list(), [5, 5])
        self.assertEqual(matrix_m.shape.as_list(), [6, 6])
Ejemplo n.º 8
0
    def test_init(self):
        # Function to test initialization of dual formulation class.
        net_weights = [[[2, 2], [3, 3], [4, 4]], [[1, 1, 1], [-1, -1, -1]]]
        net_biases = [
            np.transpose(np.matrix([0, 0, 0])),
            np.transpose(np.matrix([0, 0]))
        ]
        net_layer_types = ['ff_relu', 'ff']
        nn_params1 = neural_net_params.NeuralNetParams(net_weights, net_biases,
                                                       net_layer_types)

        test_input = np.transpose(np.matrix([0, 0]))
        true_class = 0
        adv_class = 1
        input_minval = 0
        input_maxval = 0
        epsilon = 0.1
        three_dim_tensor = tf.random_uniform(shape=(3, 1), dtype=tf.float32)
        two_dim_tensor = tf.random_uniform(shape=(2, 1), dtype=tf.float32)
        scalar = tf.random_uniform(shape=(1, 1), dtype=tf.float32)
        lambda_pos = [two_dim_tensor, three_dim_tensor]
        lambda_neg = lambda_pos
        lambda_quad = lambda_pos
        lambda_lu = lambda_pos
        nu = scalar
        dual_var = {
            'lambda_pos': lambda_pos,
            'lambda_neg': lambda_neg,
            'lambda_quad': lambda_quad,
            'lambda_lu': lambda_lu,
            'nu': nu
        }
        dual_formulation_object = dual_formulation.DualFormulation(
            dual_var, nn_params1, test_input, true_class, adv_class,
            input_minval, input_maxval, epsilon)
        self.assertIsNotNone(dual_formulation_object)