示例#1
0
    def __init__(self, m, back='tf', sess=None):
        if not isinstance(m, model.Model):
            m = model.CallableModelWrapper(m, 'probs')

        super(RandomAttack, self).__init__(m, back, sess)
        self.feedable_kwargs = {
            'eps': np.float32,
            'num_samples': np.float32,
            'num_batches': np.float32,
            'y': np.float32,
            'y_target': np.float32,
            'clip_min': np.float32,
            'clip_max': np.float32
        }
        self.structural_kwargs = ['ord']
示例#2
0
文件: run_eval.py 项目: qiushilin/fda
def run_eval(params):

    # assign names to the different params:
    graph_config = params['graph_config']
    data_config = params['data_config']
    attack_config = params['attack_config']
    attack_params = params['attack_params']
    batch_size = data_config['batch_size']
    gpu_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))

    session = tf.Session(config=gpu_config)

    # fetch the logger
    logger = logging.getLogger('logger')

    # setup the session and the graphs:
    network_fn = nets_factory.get_network_fn(
        graph_config['model'],
        num_classes=(graph_config['num_classes']),
        is_training=False)

    # set up inputs
    eval_image_size = network_fn.default_image_size
    preprocessing_name = graph_config['model']
    image_preprocessing_fn = \
        preprocessing_factory.get_preprocessing(preprocessing_name,
                                                is_training=False)

    data_loader = DataLoader(data_config, image_preprocessing_fn,
                             eval_image_size)

    # setup model
    logits, normal_network = network_fn(data_loader.input_images)

    net_varlist = [v for v in tf.get_collection(tf.GraphKeys.VARIABLES)]
    saver = tf.train.Saver(var_list=net_varlist)
    saver.restore(session, graph_config['checkpoint_file'])

    # also setup evaluation network.
    input_placeholder = \
        tf.placeholder(shape=[None, eval_image_size, eval_image_size, 3],
                       dtype=tf.float32, name='Input_placeholder')
    logits_frm_placeholder, network_frm_placeholder = \
        network_fn(input_placeholder, reuse=tf.AUTO_REUSE)

    logger.info('Loaded the networks')

    # setup the cleverhans attack:
    clever_model = chm.CallableModelWrapper(network_fn, 'logits')
    attacker_class = getattr(attacks, attack_config['attack_name'])
    attacker = attacker_class(clever_model, sess=session)

    # Output
    if attack_config['attack_type'] == "GT":
        outputs = tf.placeholder(shape=[None, graph_config['num_classes']],
                                 dtype=tf.float32,
                                 name='GT_placeholder')
        adversarial_x_tensor = \
            attacker.generate(data_loader.input_images, y=outputs,
                              ** attack_params)
    elif attack_config['attack_type'] == "self":
        adversarial_x_tensor = \
            attacker.generate(data_loader.input_images,
                              ** attack_params)

    logger.info('Loaded the attack')

    # Evaluation
    im_list = open(data_config['image_list']).readlines()
    gt_list = open(data_config['gt_list']).readlines()
    im_list = [x.strip() for x in im_list]
    gt_list = [x.strip() for x in gt_list]

    total_iter = len(im_list) / batch_size

    # metrics
    metrics = metric_functions.init_metrics()

    # define stylize net

    for i in range(int(total_iter)):
        if i % 10 == 0:
            logger.info('iter' + str(i) + ' of ' + str(total_iter))

        im_batch = im_list[i * batch_size:(i + 1) * batch_size]
        gt_real = gt_list[i * batch_size:(i + 1) * batch_size]
        gt_real = [int(x) for x in gt_real]
        gt_batch = utils.get_gt_batch(gt_real, graph_config)

        if attack_config['attack_type'] == "GT":
            feed_dict = {data_loader.image_path: im_batch, outputs: gt_batch}
        elif attack_config['attack_type'] == "self":
            feed_dict = {data_loader.image_path: im_batch}
        else:
            raise ValueError("attack type has to be GT or self")

        # find the adversarial sample
        adversarial_x = session.run(adversarial_x_tensor, feed_dict=feed_dict)

        # perform evaluation
        feed_dict = {
            data_loader.image_path: im_batch,
            input_placeholder: adversarial_x
        }
        normal_out, adv_out = \
            session.run([logits, logits_frm_placeholder], feed_dict=feed_dict)

        metrics = metric_functions.update_metrics(metrics, normal_out, adv_out,
                                                  gt_real,
                                                  graph_config['offset'])

        normal_out = metrics['normal_prediction'][-1]
        adv_out = metrics['adv_prediction'][-1]

        log_txt = ' '.join([
            'Adversarial prediction',
            str(adv_out), 'true_predictions',
            str(normal_out), 'true_gt ',
            str(gt_real[-1])
        ])
        logger.info(log_txt)

        logger.info('======== ITER ' + str(i) + "========")
        logger.info('Real Top-1 Accuracy = {:.2f}'.format(
            metrics['real_acc'][-1]))
        logger.info('Top-1 Accuracy = {:.2f}'.format(metrics['adv_acc'][-1]))
        logger.info('Fooling Rate = {:.2f}'.format(metrics['fr'][-1]))
        logger.info('Old Label New Rank = {:.2f}'.format(
            np.mean(metrics['old_label_rank_new'])))
        logger.info('New Label Old Rank = {:.2f}'.format(
            np.mean(metrics['new_label_rank_old'])))

        # to be on the safe side, log the details of file
        if i % 10 == 0:
            normal_x = session.run(data_loader.input_images, feed_dict)
            pert = normal_x - adversarial_x
            log_txt = ' '.join([
                'Image min max',
                str(np.max(normal_x)),
                str(np.min(normal_x))
            ])
            logger.info(log_txt)
            log_txt = ' '.join(
                ['Perturbation min max',
                 str(np.max(pert)),
                 str(np.min(pert))])
            logger.info(log_txt)

    # now save all the important details.
    result = open(params['results_file'], 'w')
    result.write(
        'Real Top-1 Accuracy = {:.4f}'.format(metrics['real_acc'][-1]) + '\n')
    result.write('Top-1 Accuracy = {:.4f}'.format(metrics['adv_acc'][-1]) +
                 '\n')
    result.write('Fooling Rate = {:.4f}'.format(metrics['fr'][-1]) + '\n')
    result.write('Old Label New Rank = {:.4f}'.format(
        np.mean(metrics['old_label_rank_new'])) + '\n')
    result.write('New Label Old Rank = {:.4f}'.format(
        np.mean(metrics['new_label_rank_old'])) + '\n')
    result.close()