def test_get_min_eig_vec_proxy(self):
        # Function test computing min eigen value using matrix vector products.
        dual_formulation_object = self.prepare_dual_object()
        _, matrix_m = dual_formulation_object.get_full_psd_matrix()
        optimization_params = {'init_learning_rate': 0.1,
                               'learning_rate_decay': 0.9,
                               'eig_num_iter': 2000,
                               'eig_learning_rate': 0.01,
                               'init_smooth': 0.0,
                               'smooth_decay': 0.9}
        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            optimization_object = optimization.Optimization(dual_formulation_object,
                                                            sess, optimization_params)
            eig_vec = optimization_object.get_min_eig_vec_proxy()
            tf_eig_vec = optimization_object.get_min_eig_vec_proxy(use_tf_eig=True)
            self.assertIsNotNone(eig_vec)

            # Running the graphs and checking that minimum eigen value is correct
            # ** No smoothing
            tf_eig_vec_val, eig_vec_val, matrix_m_val = sess.run(
                [tf_eig_vec, eig_vec, matrix_m],
                feed_dict={optimization_object.eig_init_vec_placeholder:
                               np.random.rand(6, 1),
                           optimization_object.eig_num_iter_placeholder: 2000,
                           optimization_object.smooth_placeholder: 0.0})

            # Eigen value corresponding to v is v^\top M v
            eig_val = np.matmul(np.transpose(eig_vec_val),
                                np.matmul(matrix_m_val, eig_vec_val))
            tf_eig_val = np.matmul(np.transpose(tf_eig_vec_val),
                                   np.matmul(matrix_m_val, tf_eig_vec_val))
            [np_eig_values, _] = np.linalg.eig(matrix_m_val)
            self.assertLess(np.abs(np.min(np_eig_values) - eig_val), 1E-5)
            self.assertLess(np.abs(np.min(np_eig_values) - tf_eig_val), 1E-5)

            # Running the graphs and checking that minimum eigen value is correct
            # **Smoothing
            optimization_params['init_smooth'] = 0.0001
            optimization_object = optimization.Optimization(dual_formulation_object,
                                                            sess, optimization_params)
            eig_vec = optimization_object.get_min_eig_vec_proxy()
            tf_eig_vec = optimization_object.get_min_eig_vec_proxy(use_tf_eig=True)

            tf_eig_vec_val, eig_vec_val, matrix_m_val = sess.run(
                [tf_eig_vec, eig_vec, matrix_m],
                feed_dict={optimization_object.eig_init_vec_placeholder:
                               np.random.rand(6, 1),
                           optimization_object.smooth_placeholder: 0.1,
                           optimization_object.eig_num_iter_placeholder: 2000})

            # Eigen value corresponding to v is v^\top M v
            eig_val = np.matmul(np.transpose(eig_vec_val),
                                np.matmul(matrix_m_val, eig_vec_val))
            tf_eig_val = np.matmul(np.transpose(tf_eig_vec_val),
                                   np.matmul(matrix_m_val, tf_eig_vec_val))
            [np_eig_values, _] = np.linalg.eig(matrix_m_val)
            self.assertLess(np.abs(np.min(np_eig_values) - eig_val), 1E-5)
            # In general, smoothed version can be far off
            self.assertLess(np.abs(np.min(np_eig_values) - tf_eig_val), 1E-1)
 def test_optimization(self):
     # Function to test optimization.
     dual_formulation_object = self.prepare_dual_object()
     optimization_params = {
         'init_penalty': 10000,
         'large_eig_num_steps': 1000,
         'small_eig_num_steps': 500,
         'inner_num_steps': 10,
         'outer_num_steps': 2,
         'beta': 2,
         'smoothness_parameter': 0.001,
         'eig_learning_rate': 0.01,
         'optimizer': 'adam',
         'init_learning_rate': 0.1,
         'learning_rate_decay': 0.9,
         'momentum_parameter': 0.9,
         'print_stats_steps': 1,
         'stats_folder': None,
         'projection_steps': 200
     }
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         optimization_object = optimization.Optimization(
             dual_formulation_object, sess, optimization_params)
         is_cert_found = optimization_object.run_optimization()
         self.assertFalse(is_cert_found)
Exemple #3
0
 def test_optimization(self):
     """Function to test optimization."""
     sess, dual_formulation_object = self.prepare_dual_object()
     optimization_params = {
         "init_penalty": 10000,
         "large_eig_num_steps": 1000,
         "small_eig_num_steps": 500,
         "inner_num_steps": 10,
         "outer_num_steps": 2,
         "beta": 2,
         "smoothness_parameter": 0.001,
         "eig_learning_rate": 0.01,
         "optimizer": "adam",
         "init_learning_rate": 0.1,
         "learning_rate_decay": 0.9,
         "momentum_parameter": 0.9,
         "print_stats_steps": 1,
         "stats_folder": None,
         "projection_steps": 200,
         "eig_type": "TF",
     }
     sess.run(tf.global_variables_initializer())
     optimization_object = optimization.Optimization(
         dual_formulation_object, sess, optimization_params
     )
     is_cert_found = optimization_object.run_optimization()
     self.assertFalse(is_cert_found)
Exemple #4
0
def main(_):
    net_weights, net_biases, net_layer_types = read_weights.read_weights(
        FLAGS.checkpoint, FLAGS.model_json)
    nn_params = neural_net_params.NeuralNetParams(net_weights, net_biases,
                                                  net_layer_types)
    print(nn_params.sizes)
    dual_var = utils.initialize_dual(nn_params,
                                     FLAGS.init_dual_file,
                                     init_nu=FLAGS.init_nu)
    # Reading test input and reshaping
    with tf.gfile.Open(FLAGS.test_input) as f:
        test_input = np.load(f)
    test_input = np.reshape(test_input, [np.size(test_input), 1])

    if FLAGS.adv_class == -1:
        start_class = 0
        end_class = FLAGS.num_classes
    else:
        start_class = FLAGS.adv_class
        end_class = FLAGS.adv_class + 1
    for adv_class in range(start_class, end_class):
        print('Adv class', adv_class)
        if adv_class == FLAGS.true_class:
            continue
        dual = dual_formulation.DualFormulation(dual_var, nn_params,
                                                test_input, FLAGS.true_class,
                                                adv_class, FLAGS.input_minval,
                                                FLAGS.input_maxval,
                                                FLAGS.epsilon)
        dual.set_differentiable_objective()
        dual.get_full_psd_matrix()
        optimization_params = {
            'init_penalty': FLAGS.init_penalty,
            'large_eig_num_steps': FLAGS.large_eig_num_steps,
            'small_eig_num_steps': FLAGS.small_eig_num_steps,
            'inner_num_steps': FLAGS.inner_num_steps,
            'outer_num_steps': FLAGS.outer_num_steps,
            'beta': FLAGS.beta,
            'smoothness_parameter': FLAGS.smoothness_parameter,
            'eig_learning_rate': FLAGS.eig_learning_rate,
            'optimizer': FLAGS.optimizer,
            'init_learning_rate': FLAGS.init_learning_rate,
            'learning_rate_decay': FLAGS.learning_rate_decay,
            'momentum_parameter': FLAGS.momentum_parameter,
            'print_stats_steps': FLAGS.print_stats_steps,
            'stats_folder': FLAGS.stats_folder,
            'projection_steps': FLAGS.projection_steps
        }
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            optimization_object = optimization.Optimization(
                dual, sess, optimization_params)
            optimization_object.prepare_one_step()
            is_cert_found = optimization_object.run_optimization()
            if not is_cert_found:
                print('Current example could not be verified')
                exit()
    print('Example successfully verified')
 def test_init(self):
     # Function to test initialization of OptimizationTest.
     dual_formulation_object = self.prepare_dual_object()
     dual_formulation_object.set_differentiable_objective()
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         optimization_params = {'init_learning_rate': 0.1,
                                'learning_rate_decay': 0.9,
                                'eig_num_iter': 10,
                                'eig_learning_rate': 0.01,
                                'init_smooth': 0.5,
                                'smooth_decay': 0.9}
         optimization_object = optimization.Optimization(dual_formulation_object,
                                                         sess, optimization_params)
         self.assertIsNotNone(optimization_object)
Exemple #6
0
 def test_init(self):
     # Function to test initialization of OptimizationTest.
     sess, dual_formulation_object = self.prepare_dual_object()
     dual_formulation_object.set_differentiable_objective()
     sess.run(tf.global_variables_initializer())
     optimization_params = {
         'init_learning_rate': 0.1,
         'learning_rate_decay': 0.9,
         'eig_num_iter': 10,
         'eig_learning_rate': 0.01,
         'init_smooth': 0.5,
         'smooth_decay': 0.9,
         'inner_num_steps': 10,
         'optimizer': 'adam',
         'momentum_parameter': 0.9,
         'eig_type': 'TF'
     }
     optimization_object = optimization.Optimization(
         dual_formulation_object, sess, optimization_params)
     self.assertIsNotNone(optimization_object)
Exemple #7
0
 def test_init(self):
     """ Function to test initialization of OptimizationTest. """
     sess, dual_formulation_object = self.prepare_dual_object()
     dual_formulation_object.set_differentiable_objective()
     sess.run(tf.global_variables_initializer())
     optimization_params = {
         "init_learning_rate": 0.1,
         "learning_rate_decay": 0.9,
         "eig_num_iter": 10,
         "eig_learning_rate": 0.01,
         "init_smooth": 0.5,
         "smooth_decay": 0.9,
         "inner_num_steps": 10,
         "optimizer": "adam",
         "momentum_parameter": 0.9,
         "eig_type": "TF",
     }
     optimization_object = optimization.Optimization(
         dual_formulation_object, sess, optimization_params
     )
     self.assertIsNotNone(optimization_object)
Exemple #8
0
def main(_):
    # pylint: disable=missing-docstring
    tf.logging.set_verbosity(FLAGS.verbosity)

    start_time = time.time()

    # Initialize neural network based on config files
    input_shape = [FLAGS.num_rows, FLAGS.num_columns, FLAGS.num_channels]
    nn_params = nn.load_network_from_checkpoint(FLAGS.checkpoint,
                                                FLAGS.model_json, input_shape)
    tf.logging.info('Loaded neural network with size of layers: %s',
                    nn_params.sizes)
    tf.logging.info('Loaded neural network with input shapes: %s',
                    nn_params.input_shapes)
    tf.logging.info('Loaded neural network with output shapes: %s',
                    nn_params.output_shapes)
    dual_var = utils.initialize_dual(nn_params,
                                     FLAGS.init_dual_file,
                                     init_nu=FLAGS.init_nu)

    # Reading test input and reshaping
    with tf.gfile.Open(FLAGS.test_input) as f:
        test_input = np.load(f)
    test_input = np.reshape(test_input, [np.size(test_input), 1])

    if FLAGS.adv_class == -1:
        start_class = 0
        end_class = FLAGS.num_classes
    else:
        start_class = FLAGS.adv_class
        end_class = FLAGS.adv_class + 1
    for adv_class in range(start_class, end_class):
        tf.logging.info('Running certification for adversarial class %d',
                        adv_class)
        if adv_class == FLAGS.true_class:
            continue

        optimization_params = {
            'init_penalty': FLAGS.init_penalty,
            'large_eig_num_steps': FLAGS.large_eig_num_steps,
            'small_eig_num_steps': FLAGS.small_eig_num_steps,
            'inner_num_steps': FLAGS.inner_num_steps,
            'outer_num_steps': FLAGS.outer_num_steps,
            'beta': FLAGS.beta,
            'smoothness_parameter': FLAGS.smoothness_parameter,
            'eig_learning_rate': FLAGS.eig_learning_rate,
            'optimizer': FLAGS.optimizer,
            'init_learning_rate': FLAGS.init_learning_rate,
            'learning_rate_decay': FLAGS.learning_rate_decay,
            'momentum_parameter': FLAGS.momentum_parameter,
            'print_stats_steps': FLAGS.print_stats_steps,
            'stats_folder': FLAGS.stats_folder,
            'projection_steps': FLAGS.projection_steps,
            'eig_type': FLAGS.eig_type,
            'has_conv': nn_params.has_conv,
            'lanczos_steps': FLAGS.lanczos_steps
        }
        lzs_params = {
            'min_iter': MIN_LANCZOS_ITER,
            'max_iter': FLAGS.lanczos_steps
        }
        with tf.Session() as sess:
            dual = dual_formulation.DualFormulation(
                sess, dual_var, nn_params, test_input, FLAGS.true_class,
                adv_class, FLAGS.input_minval, FLAGS.input_maxval,
                FLAGS.epsilon, lzs_params)
            optimization_object = optimization.Optimization(
                dual, sess, optimization_params)
            is_cert_found = optimization_object.run_optimization()
            if not is_cert_found:
                print('Example could not be verified')
                exit()
    print('Example successfully verified')
    print('Elapsed time: ' + str(time.time() - start_time))
Exemple #9
0
    def test_get_min_eig_vec_proxy(self):
        """ Function test computing min eigen value using matrix vector products."""
        sess, dual_formulation_object = self.prepare_dual_object()
        _, matrix_m = dual_formulation_object.get_full_psd_matrix()
        optimization_params = {
            "init_learning_rate": 0.1,
            "learning_rate_decay": 0.9,
            "eig_num_iter": 2000,
            "eig_learning_rate": 0.01,
            "init_smooth": 0.0,
            "smooth_decay": 0.9,
            "inner_num_steps": 10,
            "optimizer": "adam",
            "momentum_parameter": 0.9,
            "eig_type": "TF",
        }
        sess.run(tf.global_variables_initializer())
        optimization_object = optimization.Optimization(
            dual_formulation_object, sess, optimization_params
        )
        eig_vec = optimization_object.get_min_eig_vec_proxy()
        tf_eig_vec = optimization_object.get_min_eig_vec_proxy(use_tf_eig=True)
        self.assertIsNotNone(eig_vec)

        # Running the graphs and checking that minimum eigen value is correct
        # ** No smoothing
        tf_eig_vec_val, eig_vec_val, matrix_m_val = sess.run(
            [tf_eig_vec, eig_vec, matrix_m],
            feed_dict={
                optimization_object.eig_init_vec_placeholder: np.random.rand(6, 1),
                optimization_object.eig_num_iter_placeholder: 2000,
                optimization_object.smooth_placeholder: 0.0,
            },
        )

        # Eigen value corresponding to v is v^\top M v
        eig_val = np.matmul(
            np.transpose(eig_vec_val), np.matmul(matrix_m_val, eig_vec_val)
        )
        tf_eig_val = np.matmul(
            np.transpose(tf_eig_vec_val), np.matmul(matrix_m_val, tf_eig_vec_val)
        )
        [np_eig_values, _] = np.linalg.eig(matrix_m_val)
        self.assertLess(np.abs(np.min(np_eig_values) - eig_val), 1e-5)
        self.assertLess(np.abs(np.min(np_eig_values) - tf_eig_val), 1e-5)

        # Running the graphs and checking that minimum eigen value is correct
        # **Smoothing
        optimization_params["init_smooth"] = 0.0001
        optimization_object = optimization.Optimization(
            dual_formulation_object, sess, optimization_params
        )
        eig_vec = optimization_object.get_min_eig_vec_proxy()
        tf_eig_vec = optimization_object.get_min_eig_vec_proxy(use_tf_eig=True)

        tf_eig_vec_val, eig_vec_val, matrix_m_val = sess.run(
            [tf_eig_vec, eig_vec, matrix_m],
            feed_dict={
                optimization_object.eig_init_vec_placeholder: np.random.rand(6, 1),
                optimization_object.smooth_placeholder: 0.1,
                optimization_object.eig_num_iter_placeholder: 2000,
            },
        )

        # Eigen value corresponding to v is v^\top M v
        eig_val = np.matmul(
            np.transpose(eig_vec_val), np.matmul(matrix_m_val, eig_vec_val)
        )
        tf_eig_val = np.matmul(
            np.transpose(tf_eig_vec_val), np.matmul(matrix_m_val, tf_eig_vec_val)
        )
        [np_eig_values, _] = np.linalg.eig(matrix_m_val)
        self.assertLess(np.abs(np.min(np_eig_values) - eig_val), 1e-5)
        # In general, smoothed version can be far off
        self.assertLess(np.abs(np.min(np_eig_values) - tf_eig_val), 1e-1)