예제 #1
0
def main(_):
    net_weights, net_biases, net_layer_types = read_weights.read_weights(
        FLAGS.checkpoint, FLAGS.model_json)
    nn_params = neural_net_params.NeuralNetParams(net_weights, net_biases,
                                                  net_layer_types)
    print(nn_params.sizes)
    dual_var = utils.initialize_dual(nn_params,
                                     FLAGS.init_dual_file,
                                     init_nu=FLAGS.init_nu)
    # Reading test input and reshaping
    with tf.gfile.Open(FLAGS.test_input) as f:
        test_input = np.load(f)
    test_input = np.reshape(test_input, [np.size(test_input), 1])

    if FLAGS.adv_class == -1:
        start_class = 0
        end_class = FLAGS.num_classes
    else:
        start_class = FLAGS.adv_class
        end_class = FLAGS.adv_class + 1
    for adv_class in range(start_class, end_class):
        print('Adv class', adv_class)
        if adv_class == FLAGS.true_class:
            continue
        dual = dual_formulation.DualFormulation(dual_var, nn_params,
                                                test_input, FLAGS.true_class,
                                                adv_class, FLAGS.input_minval,
                                                FLAGS.input_maxval,
                                                FLAGS.epsilon)
        dual.set_differentiable_objective()
        dual.get_full_psd_matrix()
        optimization_params = {
            'init_penalty': FLAGS.init_penalty,
            'large_eig_num_steps': FLAGS.large_eig_num_steps,
            'small_eig_num_steps': FLAGS.small_eig_num_steps,
            'inner_num_steps': FLAGS.inner_num_steps,
            'outer_num_steps': FLAGS.outer_num_steps,
            'beta': FLAGS.beta,
            'smoothness_parameter': FLAGS.smoothness_parameter,
            'eig_learning_rate': FLAGS.eig_learning_rate,
            'optimizer': FLAGS.optimizer,
            'init_learning_rate': FLAGS.init_learning_rate,
            'learning_rate_decay': FLAGS.learning_rate_decay,
            'momentum_parameter': FLAGS.momentum_parameter,
            'print_stats_steps': FLAGS.print_stats_steps,
            'stats_folder': FLAGS.stats_folder,
            'projection_steps': FLAGS.projection_steps
        }
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            optimization_object = optimization.Optimization(
                dual, sess, optimization_params)
            optimization_object.prepare_one_step()
            is_cert_found = optimization_object.run_optimization()
            if not is_cert_found:
                print('Current example could not be verified')
                exit()
    print('Example successfully verified')
예제 #2
0
def main(_):
    # pylint: disable=missing-docstring
    tf.logging.set_verbosity(FLAGS.verbosity)

    start_time = time.time()

    # Initialize neural network based on config files
    input_shape = [FLAGS.num_rows, FLAGS.num_columns, FLAGS.num_channels]
    nn_params = nn.load_network_from_checkpoint(FLAGS.checkpoint,
                                                FLAGS.model_json, input_shape)
    tf.logging.info('Loaded neural network with size of layers: %s',
                    nn_params.sizes)
    tf.logging.info('Loaded neural network with input shapes: %s',
                    nn_params.input_shapes)
    tf.logging.info('Loaded neural network with output shapes: %s',
                    nn_params.output_shapes)
    dual_var = utils.initialize_dual(nn_params,
                                     FLAGS.init_dual_file,
                                     init_nu=FLAGS.init_nu)

    # Reading test input and reshaping
    with tf.gfile.Open(FLAGS.test_input) as f:
        test_input = np.load(f)
    test_input = np.reshape(test_input, [np.size(test_input), 1])

    if FLAGS.adv_class == -1:
        start_class = 0
        end_class = FLAGS.num_classes
    else:
        start_class = FLAGS.adv_class
        end_class = FLAGS.adv_class + 1
    for adv_class in range(start_class, end_class):
        tf.logging.info('Running certification for adversarial class %d',
                        adv_class)
        if adv_class == FLAGS.true_class:
            continue

        optimization_params = {
            'init_penalty': FLAGS.init_penalty,
            'large_eig_num_steps': FLAGS.large_eig_num_steps,
            'small_eig_num_steps': FLAGS.small_eig_num_steps,
            'inner_num_steps': FLAGS.inner_num_steps,
            'outer_num_steps': FLAGS.outer_num_steps,
            'beta': FLAGS.beta,
            'smoothness_parameter': FLAGS.smoothness_parameter,
            'eig_learning_rate': FLAGS.eig_learning_rate,
            'optimizer': FLAGS.optimizer,
            'init_learning_rate': FLAGS.init_learning_rate,
            'learning_rate_decay': FLAGS.learning_rate_decay,
            'momentum_parameter': FLAGS.momentum_parameter,
            'print_stats_steps': FLAGS.print_stats_steps,
            'stats_folder': FLAGS.stats_folder,
            'projection_steps': FLAGS.projection_steps,
            'eig_type': FLAGS.eig_type,
            'has_conv': nn_params.has_conv,
            'lanczos_steps': FLAGS.lanczos_steps
        }
        lzs_params = {
            'min_iter': MIN_LANCZOS_ITER,
            'max_iter': FLAGS.lanczos_steps
        }
        with tf.Session() as sess:
            dual = dual_formulation.DualFormulation(
                sess, dual_var, nn_params, test_input, FLAGS.true_class,
                adv_class, FLAGS.input_minval, FLAGS.input_maxval,
                FLAGS.epsilon, lzs_params)
            optimization_object = optimization.Optimization(
                dual, sess, optimization_params)
            is_cert_found = optimization_object.run_optimization()
            if not is_cert_found:
                print('Example could not be verified')
                exit()
    print('Example successfully verified')
    print('Elapsed time: ' + str(time.time() - start_time))