コード例 #1
0
def main():
    # forward_model = load_model()
    forward_model = create_fmodel()
    backward_model = create_fmodel()

    model = foolbox.models.CompositeModel(forward_model=forward_model,
                                          backward_model=backward_model)

    input_dir = '/home/hongyang/data/tiny-imagenet-200-aug/tiny-imagenet-200/train'
    Images, Labels = read_train_images(input_dir)
    print("Images.shape: ", Images.shape)
    for idx in range(100):
        # image is a numpy array with shape (64, 64, 3)
        # and dtype float32, with values between 0 and 255;
        # label is the original label (for untargeted
        # attacks) or the target label (for targeted attacks)
        # adversarial = run_attack(model, image, label)
        # store_adversarial(file_name, adversarial)
        adversarial = run_attack(model=model,
                                 image=Images[idx],
                                 label=Labels[idx].reshape([
                                     1,
                                 ]))
        store_adversarial(file_name, adversarial)

    # Announce that the attack is complete
    # NOTE: In the absence of this call, your submission will timeout
    # while being graded.
    attack_complete()
コード例 #2
0
def main():
    # tf.logging.set_verbosity(tf.logging.INFO)
    # instantiate blackbox and substitute model
    # instantiate blackbox and substitute model
    forward_model = load_model()
    # backward_model1 = create_fmodel_18()
    backward_model2 = create_fmodel_ALP()
    backward_model3 = create_fmodel_ALP1000()
    # print(backward_model1[0])
    # instantiate differntiable composite model
    # (predictions from blackbox, gradients from substitute)
    model = CompositeModel(
        forward_model=forward_model,
        backward_models=[backward_model2, backward_model3],
        weights = [0.5, 0.5])
    predictor = tp.OfflinePredictor(tp.PredictConfig(
        model=SaliencyModel(),
        session_init=tp.get_model_loader("resnet_v1_50.ckpt"),
        input_names=['image'],
        output_names=['saliency']))
    for (file_name, image, label) in read_images():
        pos_salience = find_salience(predictor, image)
        adversarial = run_attack(model, image, label, pos_salience)
        store_adversarial(file_name, adversarial)
    attack_complete()
コード例 #3
0
def main():
    model = load_model()
    for (file_name, image, label) in read_images():
        print('predicted model for: ', file_name,
              np.argmax(model.predictions(image)))
        adversarial = run_attack(model, image, label)
        store_adversarial(file_name, adversarial)
コード例 #4
0
def main():
    loader = TinyImageNetLoader()
    forward_model = load_model()
    backward_model1 = create_fmodel_ALP()
    backward_model2 = create_fmodel_ALP1000()
    model = CompositeModel(forward_model=forward_model,
                           backward_models=[backward_model1, backward_model2],
                           weights=[0.3, 0.7])
    i = 0
    total_sum = 0.0
    prev_avg = 0.0
    for (file_name, image, label) in read_images():
        is_boundary = True
        is_adam = True
        adversarial = run_attack(loader, forward_model, image, label)
        # Calculate both Adam and Boundary for first 5 images.
        if i < 5:
            adversarial_adam = run_attack2(model, image, label, None)
            if adversarial is not None:
                error1 = distance(adversarial, image)
            else:
                error1 = 100000.0
                is_boundary = False
            if adversarial_adam is not None:
                error_adam = distance(adversarial_adam, image)
            else:
                error_adam = 200000.0
                is_adam = False
            if is_adam and error1 - error_adam > 0.0:
                adversarial = adversarial_adam
            if is_adam or is_boundary:
                i += 1
                total_sum += min(error1, error_adam)
        else:
            if adversarial is not None:
                error1 = distance(adversarial, image)
                prev_avg = total_sum / i
                i += 1
                total_sum += error1
            else:
                error1 = 100000.0
            if error1 > 25.0 or error1 > prev_avg or adversarial is None:
                adversarial_adam = run_attack2(model, image, label, None)
                if adversarial_adam is not None:
                    error_adam = distance(adversarial_adam, image)
                else:
                    error_adam = 200000.0
                if error1 - error_adam > 0.0:
                    adversarial = adversarial_adam
        store_adversarial(file_name, adversarial)

    # Announce that the attack is complete
    # NOTE: In the absence of this call, your submission will timeout
    # while being graded.
    attack_complete()
コード例 #5
0
def main():
    forward_model = load_model()
    for (file_name, image, label) in read_images():
        # tf.logging.info('Checking image is np array: %s' % str(type(image) is np.ndarray))
        adversarial = run_attack(forward_model, image, label, None)
        store_adversarial(file_name, adversarial)
    # Announce that the attack is complete
    # NOTE: In the absence of this call, your submission will timeout
    # while being graded.
    # print("Attack is complete")
    attack_complete()
コード例 #6
0
def main():
    # tf.logging.set_verbosity(tf.logging.INFO)
    # instantiate blackbox and substitute model
    # instantiate blackbox and substitute model
    forward_model = load_model()
    backward_model1 = create_fmodel_18()
    backward_model2 = create_fmodel_ALP()
    # print(backward_model1[0])
    # instantiate differntiable composite model
    # (predictions from blackbox, gradients from substitute)
    model = CompositeModel(forward_model=forward_model,
                           backward_models=[backward_model1, backward_model2])
    for (file_name, image, label) in read_images():
        adversarial = run_attack(model, image, label)
        store_adversarial(file_name, adversarial)
    attack_complete()
コード例 #7
0
def main():
    loader = TinyImageNetLoader()

    forward_model = load_model()
    backward_model1 = create_fmodel_ALP()
    backward_model2 = create_fmodel_ALP1000()
    model = CompositeModel(forward_model=forward_model,
                           backward_models=[backward_model1, backward_model2],
                           weights=[0.5, 0.5])
    for (file_name, image, label) in read_images():
        adversarial = run_attack(loader, forward_model, image, label)
        if adversarial is None:
            adversarial = run_attack2(model, image, label, None)
        store_adversarial(file_name, adversarial)

    # Announce that the attack is complete
    # NOTE: In the absence of this call, your submission will timeout
    # while being graded.
    attack_complete()
コード例 #8
0
def main():
    # tf.logging.set_verbosity(tf.logging.INFO)
    # instantiate blackbox and substitute model
    forward_model = load_model()
    backward_model = create_fmodel()

    # instantiate differntiable composite model
    # (predictions from blackbox, gradients from substitute)
    model = CompositeModel(forward_model=forward_model,
                           backward_model=backward_model)
    for (file_name, image, label) in read_images():
        # tf.logging.info('Checking image is np array: %s' % str(type(image) is np.ndarray))
        adversarial = run_attack(model, image, label)
        store_adversarial(file_name, adversarial)
    # Announce that the attack is complete
    # NOTE: In the absence of this call, your submission will timeout
    # while being graded.
    # print("Attack is complete")
    attack_complete()
コード例 #9
0
def main():
    # instantiate blackbox and substitute model
    forward_model = load_model()
    backward_model = create_fmodel()

    # instantiate differntiable composite model
    # (predictions from blackbox, gradients from substitute)
    model = CompositeModel(
        forward_model=forward_model,
        backward_model=backward_model)
    predictor = tp.OfflinePredictor(tp.PredictConfig(
        model=SaliencyModel(),
        session_init=tp.get_model_loader("resnet_v1_50.ckpt"),
        input_names=['image'],
        output_names=['saliency']))
    for (file_name, image, label) in read_images():
        pos_salience = find_salience(predictor, image)
        adversarial = run_attack(model, image, label, pos_salience)
        store_adversarial(file_name, adversarial)
    attack_complete()
コード例 #10
0
def main():
    loader = TinyImageNetLoader()
    forward_model = load_model()
    backward_model1 = create_fmodel_ALP()
    backward_model2 = create_fmodel_ALP1000()
    model = CompositeModel(forward_model=forward_model,
                           backward_models=[backward_model1, backward_model2],
                           weights=[0.5, 0.5])
    predictor = tp.OfflinePredictor(
        tp.PredictConfig(model=SaliencyModel(),
                         session_init=tp.get_model_loader("resnet_v1_50.ckpt"),
                         input_names=['image'],
                         output_names=['saliency']))
    for (file_name, image, label) in read_images():
        adversarial = run_attack(loader, forward_model, image, label)
        if adversarial is None:
            pos_salience = find_salience(predictor, image)
            adversarial = run_attack2(model, image, label, pos_salience)
        store_adversarial(file_name, adversarial)

    # Announce that the attack is complete
    # NOTE: In the absence of this call, your submission will timeout
    # while being graded.
    attack_complete()
コード例 #11
0
def main():

    n_classes = 200
    img_shape = (64, 64, 3)

    timed_wrapper = TimedWrapper(
        load_model())  # Measure model prediction runtime.
    remote_wrapper = RemoteModelWrapper(timed_wrapper,
                                        do_hash=True)  # Remember best adv. ex.

    with SampleGenerator(shape=img_shape, n_threads=1,
                         queue_lengths=100) as sample_gen:

        X_train, y_train, X_val, y_val = load_dataset(
            "/path/to/tiny/imagenet", ds_cache_path='tiny_imagenet_cached.npz')

        with MultiBoundaryAttack(model=remote_wrapper,
                                 X_known=np.vstack([X_train, X_val]),
                                 y_known=np.concatenate([y_train, y_val]),
                                 n_classes=n_classes,
                                 sample_gen=sample_gen,
                                 cached_conf=None) as attack:

            model_mean_query_time_history = []
            time_max = 89  # As allowed in the rules (batch of 10 in 900 seconds)
            time_bonus = 0  # Bonus to account for unfair models (see below)

            i = 0
            for (file_name, image, label) in read_images():

                time_start = default_timer()

                # Time calculation: 90 seconds per image are allowed. Models are allowed to use (40ms*1000calls) = 40s.
                # This leaves 50 seconds for the attacker.
                #
                # But if the model is MUCH slower than allowed, then the attacker has less time and can't finish.
                # To balance the scales, we detect this, and allow ourselves to use up some extra seconds.
                # If we don't do this (and hard-abort at 90 seconds), attacks that don't count time would have an advantage vs us.
                if i % 5 == 0 and len(model_mean_query_time_history) > 3:
                    avg_model_time = np.mean(model_mean_query_time_history)
                    if avg_model_time > 55e-3:
                        time_left_for_attacker = 89 - (1000 * avg_model_time)
                        time_bonus = min(55 - time_left_for_attacker, 50)
                        print(
                            "Model is slower than allowed (would leave only {:.1f} seconds for the attacker). "
                            "Will now use up to {:.1f} additional seconds per image."
                            .format(time_left_for_attacker, time_bonus))
                    elif time_bonus > 0:
                        time_bonus = 0
                        print(
                            "Model speed seems OK now. Reverting to the 90s time limit."
                        )

                print("Image {}:".format(i))
                image = np.float32(image)

                remote_wrapper.adv_set_target(orig_img=image,
                                              is_targeted=True,
                                              label=label)
                attack.run_attack(image=image,
                                  label=label,
                                  is_targeted=True,
                                  start_with_fgm=True,
                                  fgm_acceptable_dist=10,
                                  time_max=time_max + time_bonus)
                safe_adversarial = remote_wrapper.adv_get_best_img()

                if safe_adversarial is None:
                    safe_adversarial = np.uint8(image)
                    print("Couldn't find an adversarial! This sucks!")
                else:
                    dist = util.eval_distance(image, safe_adversarial)
                    print("Final distance: {}".format(dist))

                # Save model query time stats.
                rt_median, rt_mean, rt_std = timed_wrapper.get_runtime_stats()
                print(
                    "Response time of model: median={:.1f}ms, mean={:.1f}ms, std={:.1f}ms"
                    .format(rt_median * 1e3, rt_mean * 1e3, rt_std * 1e3))
                timed_wrapper.reset_runtime_stats()
                if remote_wrapper.adv_get_n_calls() > 100:
                    model_mean_query_time_history.append(rt_mean)

                time_elapsed_s = default_timer() - time_start
                print("Queried the model {} times.".format(
                    remote_wrapper.adv_get_n_calls()))
                print("Attack for this image took {} seconds.".format(
                    time_elapsed_s))
                print()

                store_adversarial(file_name, safe_adversarial)
                i += 1

            attack_complete()