Exemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser(description=DESCRIPTION)
    parser.add_argument('--dataset_dir', type=str,
                        default=config.DEFAULT_DATASET_DIR)
    parser.add_argument('--dropout_rate', type=float, default=0.0)
    parser.add_argument('--optimizer', type=str, default='adam',
                        choices=['sgd', 'adam', 'rmsprop'])
    parser.add_argument('--use_lookahead', action='store_true')
    parser.add_argument('--batch_size', type=int, default=-1)
    parser.add_argument('--iter_size', type=int, default=-1)
    parser.add_argument('--lr_sched', type=str, default='linear',
                        choices=['linear', 'exp'])
    parser.add_argument('--initial_lr', type=float, default=-1.)
    parser.add_argument('--final_lr', type=float, default=-1.)
    parser.add_argument('--weight_decay', type=float, default=-1.)
    parser.add_argument('--epochs', type=int, default=1,
                        help='total number of epochs for training [1]')
    parser.add_argument('model', type=str,
                        help=SUPPORTED_MODELS)
    args = parser.parse_args()

    if args.use_lookahead and args.iter_size > 1:
        raise ValueError('cannot set both use_lookahead and iter_size')

    os.makedirs(config.SAVE_DIR, exist_ok=True)
    os.makedirs(config.LOG_DIR, exist_ok=True)
    config_keras_backend()
    train(args.model, args.dropout_rate, args.optimizer,
          args.use_lookahead, args.batch_size, args.iter_size,
          args.lr_sched, args.initial_lr, args.final_lr,
          args.weight_decay, args.epochs, args.dataset_dir)
    clear_keras_session()
Exemplo n.º 2
0
def runtime_eval(x):
    print(f"Trial config:{x}")

    config_keras_backend(x[6:15])
    acc, fit_time = train(
        model_name,
        0,  #drop out rate
        x[15],  #optimizer
        x[0],  #epsilon
        label_smoothing,
        use_lookahead,
        x[1],  #batch size
        iter_size,
        lr_sched,
        x[2],  #init LR
        x[3],  #final LR
        x[4],  #weight decay
        x[5],  #num_epoch
        datadir,
        x[16],
        x[17],
        x[18])
    print(f"NUM_TRIAL in runtime2: {num_trial}, {acc} {fit_time}")
    clear_keras_session()
    global final_acc
    final_acc = acc
    return -float(fit_time)
Exemplo n.º 3
0
def main():
    args = parse_args()

    # load the cls_list (index to class name)
    with open('data/synset_words.txt') as f:
        cls_list = sorted(f.read().splitlines())

    config_keras_backend()

    # load the trained model
    net = tf.keras.models.load_model(args.model,
                                     compile=False,
                                     custom_objects={'AdamW': AdamW})

    # load and preprocess the test image
    img = cv2.imread(args.jpg)
    if img is None:
        raise SystemExit('cannot load the test image: %s' % args.jpg)
    img = preprocess(img)

    # predict and postprocess
    pred = net.predict(img)[0]
    top5_idx = pred.argsort()[::-1][:5]  # take the top 5 predictions
    for i in top5_idx:
        print('%5.2f   %s' % (pred[i], cls_list[i]))

    clear_keras_session()
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser(description=DESCRIPTION)
    parser.add_argument('--dataset_dir',
                        type=str,
                        default=config.DEFAULT_DATASET_DIR)
    parser.add_argument('--dropout_rate', type=float, default=0.0)
    parser.add_argument('--optimizer',
                        type=str,
                        default='adam',
                        choices=['sgd', 'adam', 'rmsprop'])
    parser.add_argument('--epsilon', type=float, default=1e-1)
    parser.add_argument('--label_smoothing', action='store_false')
    parser.add_argument('--use_lookahead', action='store_true')
    parser.add_argument('--batch_size', type=int, default=-1)
    parser.add_argument('--iter_size', type=int, default=-1)
    parser.add_argument('--lr_sched',
                        type=str,
                        default='linear',
                        choices=['linear', 'exp'])
    parser.add_argument('--initial_lr', type=float, default=-1.)
    parser.add_argument('--final_lr', type=float, default=-1.)
    parser.add_argument('--weight_decay', type=float, default=-1.)
    parser.add_argument('--epochs',
                        type=int,
                        default=1,
                        help='total number of epochs for training [1]')
    parser.add_argument('model', type=str, help=SUPPORTED_MODELS)
    args = parser.parse_args()

    if args.use_lookahead and args.iter_size > 1:
        raise ValueError('cannot set both use_lookahead and iter_size')

    # os.makedirs(config.SAVE_DIR, exist_ok=True)
    # os.makedirs(config.LOG_DIR, exist_ok=True)
    config_keras_backend()
    # check if running hyperband
    epochs_to_run = params["NUM_EPOCH"] if 'TRIAL_BUDGET' not in params.keys(
    ) else params["TRIAL_BUDGET"]  # valid NUM_EPOCH:{1,2,3}
    train(
        args.model,
        0,  #drop out rate
        params["OPTIMIZER"],
        params["EPSILON"],
        args.label_smoothing,
        args.use_lookahead,
        params["BATCH_SIZE"],
        args.iter_size,
        args.lr_sched,
        params["INIT_LR"],
        params["FINAL_LR"],
        params["WEIGHT_DECAY"],
        epochs_to_run,
        args.dataset_dir)
    clear_keras_session()
Exemplo n.º 5
0
def infer_with_tf(img, model):
    """Inference the image with TensorFlow model."""
    import os
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    import tensorflow as tf
    from utils.utils import config_keras_backend, clear_keras_session
    from models.adamw import AdamW

    config_keras_backend()

    # load the trained model
    net = tf.keras.models.load_model(model,
                                     compile=False,
                                     custom_objects={'AdamW': AdamW})
    predictions = net.predict(img)[0]

    clear_keras_session()

    return predictions
Exemplo n.º 6
0
def main():
    parser = argparse.ArgumentParser(description=DESCRIPTION)
    parser.add_argument('--dataset_dir', type=str,
                        default=config.DEFAULT_DATASET_DIR)
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('model_file', type=str,
                        help='a saved model (.h5) file')
    args = parser.parse_args()
    config_keras_backend()
    if not args.model_file.endswith('.h5'):
        sys.exit('model_file is not a .h5')
    model = tf.keras.models.load_model(
        args.model_file,
        custom_objects={'AdamW': AdamW})
    ds_validation = get_dataset(
        args.dataset_dir, 'validation', args.batch_size)
    results = model.evaluate(
        x=ds_validation,
        steps=50000 // args.batch_size)
    print('test loss, test acc:', results)
    clear_keras_session()
Exemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('h5', type=str)
    parser.add_argument('pb', type=str)
    args = parser.parse_args()

    if not args.h5.endswith('.h5'):
        raise SystemExit('bad keras model file name (not .h5)')

    config_keras_backend()
    tf.keras.backend.set_learning_phase(0)

    model = tf.keras.models.load_model(args.h5,
                                       compile=False,
                                       custom_objects={'AdamW': AdamW})

    in_tensor_name, out_tensor_names = keras_to_pb(model, args.pb, None)
    print('input tensor: ', in_tensor_name)
    print('output tensors: ', out_tensor_names)

    clear_keras_session()
Exemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser(description=DESCRIPTION)
    parser.add_argument('--dataset_dir',
                        type=str,
                        default=config.DEFAULT_DATASET_DIR)
    parser.add_argument('--batch_size', type=int, default=10)
    parser.add_argument('--inv_model_file',
                        type=str,
                        help='a saved model (.h5) file')
    args = parser.parse_args()
    config_keras_backend()
    if not args.inv_model_file.endswith('.h5'):
        sys.exit('model_file is not a .h5')
    inv_model = tf.keras.models.load_model(args.inv_model_file,
                                           compile=False,
                                           custom_objects={'AdamW': AdamW})
    inv_model.compile(optimizer='sgd',
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])

    ds_validation = get_dataset(args.dataset_dir, 'validation',
                                args.batch_size)

    ## VGG
    vgg_model = VGG19(include_top=True, weights='imagenet', classes=1000)
    vgg_model.compile(optimizer='sgd',
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
    # InceptionV3
    inception_model = InceptionV3(include_top=True,
                                  weights='imagenet',
                                  classes=1000)
    inception_model.compile(optimizer='sgd',
                            loss='sparse_categorical_crossentropy',
                            metrics=['accuracy'])
    ## ResNet
    resnet_model = ResNet50(include_top=True, weights='imagenet', classes=1000)
    resnet_model.compile(optimizer='sgd',
                         loss='sparse_categorical_crossentropy',
                         metrics=['accuracy'])

    # Process batches
    iteration = 0
    sum1 = 0
    sum2 = 0
    for images, labels in tfds.as_numpy(ds_validation):

        if iteration < 532:  #3822:#532:
            print('continuing')
            iteration += 1
            continue
        if iteration == 50000:
            exit()

        labels = np.argmax(labels, axis=1)

        adv_imgs = run_attack(False,
                              'CarliniL2Method',
                              inception_model,
                              images,
                              labels,
                              batch_size=args.batch_size,
                              dataset='cifar',
                              fgsm_epsilon=0.3,
                              cwl2_confidence=0)
        #adv_imgs = run_attack(False, 'DeepFool', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.3, cwl2_confidence=0)
        #adv_imgs = run_attack(True, 'FastGradientMethod', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.1, cwl2_confidence=0)
        #adv_imgs = run_attack(False, 'ProjectedGradientDescent', inception_model, images, labels, batch_size=10, dataset='cifar', fgsm_epsilon=0.1, cwl2_confidence=0)
        ## VGG ################################################

        #img *= (2.0/255)  # normalize to: 0.0~2.0
        #img -= 1.0        # subtract mean to make it: -1.0~1.0
        #img = np.expand_dims(img, axis=0)

        vgg_imgs = []
        resnet_imgs = []
        inc_imgs = []
        flip_imgs = []
        inv_imgs = []
        adv_vgg_imgs = []
        adv_resnet_imgs = []
        adv_inc_imgs = []
        adv_flip_imgs = []
        adv_inv_imgs = []
        for ii in range(images.shape[0]):
            img = copy.deepcopy(images[ii, :, :, :])
            img += 1.0
            #img /= (2.0/255)
            img *= (255.0 / 2.0)

            ## VGG
            vgg_img = copy.deepcopy(img)
            vgg_img = cv2.resize(vgg_img, (224, 224))
            vgg_img = vgg_preprocess_input(vgg_img)
            vgg_imgs.append(vgg_img)

            ## Resnet
            resnet_img = copy.deepcopy(img)
            resnet_img = cv2.resize(resnet_img, (224, 224))
            resnet_img = resnet_preprocess_input(resnet_img)
            resnet_imgs.append(resnet_img)

            ## InceptionV3
            inc_img = copy.deepcopy(img)
            inc_img = cv2.resize(inc_img, (299, 299))
            inc_img = inception_preprocess_input(inc_img)
            inc_imgs.append(inc_img)

            ## Flipped
            #flip_img = copy.deepcopy(img)
            #flip_img = cv2.resize(flip_img, (299, 299))
            #flip_img = cv2.flip(flip_img, 1)
            #flip_img = inception_preprocess_input(flip_img)
            #flip_imgs.append(flip_img)
            flip_img = copy.deepcopy(images[ii, :, :, :])
            flip_img = cv2.flip(flip_img, 1)
            flip_imgs.append(flip_img)

            ## Inverse
            inv_img = copy.deepcopy(images[ii, :, :, :])  #########
            inv_img += 1.0
            inv_img /= 2.0
            inv_img = 1 - inv_img
            inv_img *= 255.0
            inv_img = cv2.resize(inv_img, (299, 299))
            inv_img = inception_preprocess_input(inv_img)
            inv_imgs.append(inv_img)

            #==========================================
            # ADVERSARIAL ---------------
            adv_img = copy.deepcopy(adv_imgs[ii, :, :, :])
            adv_img += 1.0
            #adv_img /= (2.0/255)
            adv_img *= (255.0 / 2.0)

            # VGG
            adv_vgg_img = copy.deepcopy(adv_img)
            adv_vgg_img = cv2.resize(adv_vgg_img, (224, 224))
            adv_vgg_img = vgg_preprocess_input(adv_vgg_img)
            adv_vgg_imgs.append(adv_vgg_img)

            # Resnet
            adv_resnet_img = copy.deepcopy(adv_img)
            adv_resnet_img = cv2.resize(adv_resnet_img, (224, 224))
            adv_resnet_img = resnet_preprocess_input(adv_resnet_img)
            adv_resnet_imgs.append(adv_resnet_img)

            # InceptionV3
            adv_inc_img = copy.deepcopy(adv_img)
            adv_inc_img = cv2.resize(adv_inc_img, (299, 299))
            adv_inc_img = inception_preprocess_input(adv_inc_img)
            adv_inc_imgs.append(adv_inc_img)

            ## Flipped
            #adv_flip_img = copy.deepcopy(img)
            #adv_flip_img = cv2.resize(adv_flip_img, (299, 299))
            #adv_flip_img = cv2.flip(adv_flip_img, 1)
            #adv_flip_img = inception_preprocess_input(adv_flip_img)
            #adv_flip_imgs.append(adv_flip_img)
            adv_flip_img = copy.deepcopy(adv_imgs[ii, :, :, :])
            adv_flip_img = cv2.flip(adv_flip_img, 1)
            adv_flip_imgs.append(adv_flip_img)

            ## Inverse
            ##test on inverse Inceptionv3
            adv_inv_img = copy.deepcopy(adv_imgs[ii, :, :, :])  #########
            adv_inv_img += 1.0
            adv_inv_img /= 2.0
            adv_inv_img = 1 - adv_inv_img
            adv_inv_img *= 255.0
            adv_inv_img = cv2.resize(adv_inv_img, (299, 299))
            adv_inv_img = inception_preprocess_input(adv_inv_img)
            adv_inv_imgs.append(adv_inv_img)

            # Horizontal Flipping
            # test on Resnet

        vgg_imgs = np.asarray(vgg_imgs)
        resnet_imgs = np.asarray(resnet_imgs)
        inc_imgs = np.asarray(inc_imgs)
        flip_imgs = np.asarray(flip_imgs)
        inv_imgs = np.asarray(inv_imgs)

        adv_vgg_imgs = np.asarray(adv_vgg_imgs)
        adv_resnet_imgs = np.asarray(adv_resnet_imgs)
        adv_inc_imgs = np.asarray(adv_inc_imgs)
        adv_flip_imgs = np.asarray(adv_flip_imgs)
        adv_inv_imgs = np.asarray(adv_inv_imgs)

        # Default ResNet accuracy
        _, results1 = resnet_model.evaluate(x=resnet_imgs, y=labels, verbose=0)
        _, results2 = vgg_model.evaluate(x=vgg_imgs, y=labels, verbose=0)
        _, results3 = inception_model.evaluate(x=inc_imgs, y=labels, verbose=0)
        _, results4 = inception_model.evaluate(x=flip_imgs,
                                               y=labels,
                                               verbose=0)
        _, results5 = inv_model.evaluate(x=inv_imgs, y=labels, verbose=0)
        #        print('-----------------------------------------------------')
        _, results6 = resnet_model.evaluate(x=adv_resnet_imgs,
                                            y=labels,
                                            verbose=0)
        _, results7 = vgg_model.evaluate(x=adv_vgg_imgs, y=labels, verbose=0)
        _, results8 = inception_model.evaluate(x=adv_inc_imgs,
                                               y=labels,
                                               verbose=0)
        _, results9 = inception_model.evaluate(x=adv_flip_imgs,
                                               y=labels,
                                               verbose=0)
        _, results10 = inv_model.evaluate(x=adv_inv_imgs, y=labels, verbose=0)

        print(iteration)
        print(results1, results6)
        print(results2, results7)
        print(results3, results8)
        print(results4, results9)
        print(results5, results10)

        # Print the figure images
        INDEX = 1
        # Original image
        orig = copy.deepcopy(inc_imgs[INDEX])
        orig /= 2.0
        orig += 0.5
        orig *= 255.0
        # Adversarial image
        adv = copy.deepcopy(adv_inc_imgs[INDEX])
        adv /= 2.0
        adv += 0.5
        adv *= 255.0
        #Perturbation image
        diff = adv - orig

        print(np.amax(diff))
        #exit()

        # Flip image
        flip = copy.deepcopy(flip_imgs[INDEX])
        flip /= 2.0
        flip += 0.5
        flip *= 255.0
        # Save images
        imageio.imwrite('pandas/panda_orig.png',
                        np.reshape(orig, (299, 299, 3)))
        imageio.imwrite('pandas/panda_adv.png', np.reshape(adv, (299, 299, 3)))
        imageio.imwrite('pandas/panda_diff.png',
                        np.reshape(diff, (299, 299, 3)))
        imageio.imwrite('pandas/panda_flip.png',
                        np.reshape(flip, (299, 299, 3)))

        print(labels)

        print('Inception---original-------------------------')
        #preds = inception_model.predict(np.reshape(inc_imgs[INDEX],(1,299,299,3)))
        #print('confidence:', inc_decode_predictions(preds, top=1)[0])
        preds = inception_model.predict(inc_imgs)
        print('IncV3 Predicted:', np.argmax(preds, axis=1))
        print('IncV3 Predicted:', np.amax(preds, axis=1))
        print()

        print('VGG---original-------------------------')
        #preds = vgg_model.predict(np.reshape(vgg_imgs[INDEX],(1,224,224,3)))
        #print('confidence:', vgg_decode_predictions(preds, top=1)[0])
        preds = vgg_model.predict(vgg_imgs)
        print('VGG Predicted:', np.argmax(preds, axis=1))
        print('VGG Predicted:', np.amax(preds, axis=1))
        print()

        print('ResNet---original-------------------------')
        #preds = resnet_model.predict(np.reshape(resnet_imgs[INDEX],(1,224,224,3)))
        #print('confidence:', resnet_decode_predictions(preds, top=1)[0])
        preds = resnet_model.predict(resnet_imgs)
        print('ResNet Predicted:', np.argmax(preds, axis=1))
        print('ResNet Predicted:', np.amax(preds, axis=1))
        print()

        print('Inception---adv-------------------------')
        #preds = inception_model.predict(np.reshape(adv_inc_imgs[INDEX],(1,299,299,3)))
        #print('confidence:', inc_decode_predictions(preds, top=1)[0])
        preds = inception_model.predict(adv_inc_imgs)
        print('Adv IncV3 Predicted:', np.argmax(preds, axis=1))
        print('Adv IncV3 Predicted:', np.amax(preds, axis=1))
        print()

        print('VGG---adv-------------------------')
        #preds = vgg_model.predict(np.reshape(adv_vgg_imgs[INDEX],(1,224,224,3)))
        #print('confidence:', vgg_decode_predictions(preds, top=1)[0])
        preds = vgg_model.predict(adv_vgg_imgs)
        print('Adv VGG Predicted:', np.argmax(preds, axis=1))
        print('Adv VGG Predicted:', np.amax(preds, axis=1))
        print()

        print('ResNet---adv-------------------------')
        #preds = resnet_model.predict(np.reshape(adv_resnet_imgs[INDEX],(1,224,224,3)))
        #print('confidence:', resnet_decode_predictions(preds, top=1)[0])
        preds = resnet_model.predict(adv_resnet_imgs)
        print('Adv ResNet Predicted:', np.argmax(preds, axis=1))
        print('Adv ResNet Predicted:', np.amax(preds, axis=1))
        print()

        print('Inception---flip-------------------------')
        #preds = inception_model.predict(np.reshape(adv_flip_imgs[INDEX],(1,299,299,3)))
        #print('confidence:', inc_decode_predictions(preds, top=1)[0])
        preds = inception_model.predict(adv_flip_imgs)
        print('flip Predicted:', np.argmax(preds, axis=1))
        print('flip Predicted:', np.amax(preds, axis=1))
        print()

        #print('Accuracies--------------------------')
        #print('flip accuracy:', inc_decode_predictions(preds, top=3)[0])

        exit()

        with open("output_pgd_untarg_batch-20_norm-2.txt", "a") as myfile:
            myfile.write(
                str(results1) + ' ' + str(results2) + ' ' + str(results3) +
                ' ' + str(results4) + ' ' + str(results5) + ' ' +
                str(results6) + ' ' + str(results7) + ' ' + str(results8) +
                ' ' + str(results9) + ' ' + str(results10) + '\n')

        # Distances
        norm_diffs_1 = [
            np.linalg.norm(
                np.subtract(adv_inc_imgs[ii].flatten(),
                            inc_imgs[ii].flatten()), 1)
            for ii in range(inc_imgs.shape[0])
        ]
        norm_diffs_2 = [
            np.linalg.norm(
                np.subtract(adv_inc_imgs[ii].flatten(),
                            inc_imgs[ii].flatten()), 2)
            for ii in range(inc_imgs.shape[0])
        ]
        norm_diffs_inf = [
            np.linalg.norm(
                np.subtract(adv_inc_imgs[ii].flatten(),
                            inc_imgs[ii].flatten()), np.inf)
            for ii in range(inc_imgs.shape[0])
        ]

        print(np.mean(norm_diffs_1), np.mean(norm_diffs_2),
              np.mean(norm_diffs_inf))

        with open("distances_pgd_untarg_batch-20_norm-2.txt", "a") as myfile:
            myfile.write(
                str(np.mean(norm_diffs_1)) + ' ' + str(np.mean(norm_diffs_2)) +
                ' ' + str(np.mean(norm_diffs_inf)) + '\n')

        iteration += 1

        #exit()

        #results = resnet_model.evaluate(x=adv_imgs, y=to_categorical(labels, 1000))
        #print('RESNET test loss, test acc:', results)
        #results = vgg_model.evaluate(x=adv_imgs, y=to_categorical(labels, 1000))
        #print('VGG    test loss, test acc:', results)

#        labels = np.argmax(labels, axis=1)
#
#        #results = model.evaluate(
#        #               x=images, y=to_categorical(labels, 1000))
#        #print('test loss, test acc:', results)
#        total = total + images.shape[0]
#    print(total)
    exit()

    results = resnet_model.evaluate(x=ds_validation,
                                    steps=50000 // args.batch_size)
    print('test loss, test acc:', results)
    clear_keras_session()
Exemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser(description=DESCRIPTION)
    parser.add_argument('--dataset_dir',
                        type=str,
                        default=config.DEFAULT_DATASET_DIR)
    parser.add_argument('--batch_size', type=int, default=20)
    parser.add_argument('--inv_model_file',
                        type=str,
                        help='a saved model (.h5) file')
    args = parser.parse_args()
    config_keras_backend()
    if not args.inv_model_file.endswith('.h5'):
        sys.exit('model_file is not a .h5')
    inv_model = tf.keras.models.load_model(args.inv_model_file,
                                           compile=False,
                                           custom_objects={'AdamW': AdamW})
    inv_model.compile(optimizer='sgd',
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])

    ds_validation = get_dataset(args.dataset_dir, 'validation',
                                args.batch_size)

    ## VGG
    vgg_model = VGG19(include_top=True, weights='imagenet', classes=1000)
    vgg_model.compile(optimizer='sgd',
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
    # InceptionV3
    inception_model = InceptionV3(include_top=True,
                                  weights='imagenet',
                                  classes=1000)
    inception_model.compile(optimizer='sgd',
                            loss='sparse_categorical_crossentropy',
                            metrics=['accuracy'])
    ## ResNet
    resnet_model = ResNet50(include_top=True, weights='imagenet', classes=1000)
    resnet_model.compile(optimizer='sgd',
                         loss='sparse_categorical_crossentropy',
                         metrics=['accuracy'])

    # Process batches
    iteration = 0
    sum1 = 0
    sum2 = 0
    for images, labels in tfds.as_numpy(ds_validation):

        if iteration < 199:
            print('continuing')
            iteration += 1
            continue
        if iteration == 500:
            exit()

        labels = np.argmax(labels, axis=1)

        #adv_imgs = run_attack(True, 'CarliniL2Method', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.3, cwl2_confidence=0)
        #adv_imgs = run_attack(False, 'DeepFool', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.3, cwl2_confidence=0)
        adv_imgs = run_attack(False,
                              'FastGradientMethod',
                              inception_model,
                              images,
                              labels,
                              batch_size=args.batch_size,
                              dataset='cifar',
                              fgsm_epsilon=0.3,
                              cwl2_confidence=0)
        #adv_imgs = run_attack(False, 'ProjectedGradientDescent', inception_model, images, labels, batch_size=10, dataset='cifar', fgsm_epsilon=0.1, cwl2_confidence=0)
        ## VGG ################################################

        #img *= (2.0/255)  # normalize to: 0.0~2.0
        #img -= 1.0        # subtract mean to make it: -1.0~1.0
        #img = np.expand_dims(img, axis=0)

        vgg_imgs = []
        resnet_imgs = []
        inc_imgs = []
        flip_imgs = []
        inv_imgs = []
        adv_vgg_imgs = []
        adv_resnet_imgs = []
        adv_inc_imgs = []
        adv_flip_imgs = []
        adv_inv_imgs = []
        for ii in range(images.shape[0]):
            img = copy.deepcopy(images[ii, :, :, :])
            img += 1.0
            #img /= (2.0/255)
            img *= (255.0 / 2.0)

            ## VGG
            vgg_img = copy.deepcopy(img)
            vgg_img = cv2.resize(vgg_img, (224, 224))
            vgg_img = vgg_preprocess_input(vgg_img)
            vgg_imgs.append(vgg_img)

            ## Resnet
            resnet_img = copy.deepcopy(img)
            resnet_img = cv2.resize(resnet_img, (224, 224))
            resnet_img = resnet_preprocess_input(resnet_img)
            resnet_imgs.append(resnet_img)

            ## InceptionV3
            inc_img = copy.deepcopy(img)
            inc_img = cv2.resize(inc_img, (299, 299))
            inc_img = inception_preprocess_input(inc_img)
            inc_imgs.append(inc_img)

            ## Flipped
            #flip_img = copy.deepcopy(img)
            #flip_img = cv2.resize(flip_img, (299, 299))
            #flip_img = cv2.flip(flip_img, 1)
            #flip_img = inception_preprocess_input(flip_img)
            #flip_imgs.append(flip_img)
            flip_img = copy.deepcopy(images[ii, :, :, :])
            flip_img = cv2.flip(flip_img, 1)
            flip_imgs.append(flip_img)

            ## Inverse
            inv_img = copy.deepcopy(images[ii, :, :, :])  #########
            inv_img += 1.0
            inv_img /= 2.0
            inv_img = 1 - inv_img
            inv_img *= 255.0
            inv_img = cv2.resize(inv_img, (299, 299))
            inv_img = inception_preprocess_input(inv_img)
            inv_imgs.append(inv_img)

            #==========================================
            # ADVERSARIAL ---------------
            adv_img = copy.deepcopy(adv_imgs[ii, :, :, :])
            adv_img += 1.0
            #adv_img /= (2.0/255)
            adv_img *= (255.0 / 2.0)

            # VGG
            adv_vgg_img = copy.deepcopy(adv_img)
            adv_vgg_img = cv2.resize(adv_vgg_img, (224, 224))
            adv_vgg_img = vgg_preprocess_input(adv_vgg_img)
            adv_vgg_imgs.append(adv_vgg_img)

            # Resnet
            adv_resnet_img = copy.deepcopy(adv_img)
            adv_resnet_img = cv2.resize(adv_resnet_img, (224, 224))
            adv_resnet_img = resnet_preprocess_input(adv_resnet_img)
            adv_resnet_imgs.append(adv_resnet_img)

            # InceptionV3
            adv_inc_img = copy.deepcopy(adv_img)
            adv_inc_img = cv2.resize(adv_inc_img, (299, 299))
            adv_inc_img = inception_preprocess_input(adv_inc_img)
            adv_inc_imgs.append(adv_inc_img)

            ## Flipped
            #adv_flip_img = copy.deepcopy(img)
            #adv_flip_img = cv2.resize(adv_flip_img, (299, 299))
            #adv_flip_img = cv2.flip(adv_flip_img, 1)
            #adv_flip_img = inception_preprocess_input(adv_flip_img)
            #adv_flip_imgs.append(adv_flip_img)
            adv_flip_img = copy.deepcopy(adv_imgs[ii, :, :, :])
            adv_flip_img = cv2.flip(adv_flip_img, 1)
            adv_flip_imgs.append(adv_flip_img)

            ## Inverse
            ##test on inverse Inceptionv3
            adv_inv_img = copy.deepcopy(adv_imgs[ii, :, :, :])  #########
            adv_inv_img += 1.0
            adv_inv_img /= 2.0
            adv_inv_img = 1 - adv_inv_img
            adv_inv_img *= 255.0
            adv_inv_img = cv2.resize(adv_inv_img, (299, 299))
            adv_inv_img = inception_preprocess_input(adv_inv_img)
            adv_inv_imgs.append(adv_inv_img)

            # Horizontal Flipping
            # test on Resnet

        vgg_imgs = np.asarray(vgg_imgs)
        resnet_imgs = np.asarray(resnet_imgs)
        inc_imgs = np.asarray(inc_imgs)
        flip_imgs = np.asarray(flip_imgs)
        inv_imgs = np.asarray(inv_imgs)

        adv_vgg_imgs = np.asarray(adv_vgg_imgs)
        adv_resnet_imgs = np.asarray(adv_resnet_imgs)
        adv_inc_imgs = np.asarray(adv_inc_imgs)
        adv_flip_imgs = np.asarray(adv_flip_imgs)
        adv_inv_imgs = np.asarray(adv_inv_imgs)

        # Default ResNet accuracy
        _, results1 = resnet_model.evaluate(x=resnet_imgs, y=labels, verbose=0)
        _, results2 = vgg_model.evaluate(x=vgg_imgs, y=labels, verbose=0)
        _, results3 = inception_model.evaluate(x=inc_imgs, y=labels, verbose=0)
        _, results4 = inception_model.evaluate(x=flip_imgs,
                                               y=labels,
                                               verbose=0)
        _, results5 = inv_model.evaluate(x=inv_imgs, y=labels, verbose=0)
        #        print('-----------------------------------------------------')
        _, results6 = resnet_model.evaluate(x=adv_resnet_imgs,
                                            y=labels,
                                            verbose=0)
        _, results7 = vgg_model.evaluate(x=adv_vgg_imgs, y=labels, verbose=0)
        _, results8 = inception_model.evaluate(x=adv_inc_imgs,
                                               y=labels,
                                               verbose=0)
        _, results9 = inception_model.evaluate(x=adv_flip_imgs,
                                               y=labels,
                                               verbose=0)
        _, results10 = inv_model.evaluate(x=adv_inv_imgs, y=labels, verbose=0)

        print(iteration)
        print(results1, results6)
        print(results2, results7)
        print(results3, results8)
        print(results4, results9)
        print(results5, results10)

        with open("kot_fgsm_untarg.txt", "a") as myfile:
            myfile.write(
                str(results1) + ' ' + str(results2) + ' ' + str(results3) +
                ' ' + str(results4) + ' ' + str(results5) + ' ' +
                str(results6) + ' ' + str(results7) + ' ' + str(results8) +
                ' ' + str(results9) + ' ' + str(results10) + '\n')

        iteration += 1

        #exit()

        #results = resnet_model.evaluate(x=adv_imgs, y=to_categorical(labels, 1000))
        #print('RESNET test loss, test acc:', results)
        #results = vgg_model.evaluate(x=adv_imgs, y=to_categorical(labels, 1000))
        #print('VGG    test loss, test acc:', results)

#        labels = np.argmax(labels, axis=1)
#
#        #results = model.evaluate(
#        #               x=images, y=to_categorical(labels, 1000))
#        #print('test loss, test acc:', results)
#        total = total + images.shape[0]
#    print(total)
    exit()

    results = resnet_model.evaluate(x=ds_validation,
                                    steps=50000 // args.batch_size)
    print('test loss, test acc:', results)
    clear_keras_session()
Exemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser(description=DESCRIPTION)
    parser.add_argument('--dataset_dir',
                        type=str,
                        default=config.DEFAULT_DATASET_DIR)
    parser.add_argument('--dropout_rate', type=float, default=0.0)
    parser.add_argument('--optimizer',
                        type=str,
                        default='sgd',
                        choices=['sgd', 'adam', 'rmsprop'])
    parser.add_argument('--epsilon', type=float, default=1e-1)
    parser.add_argument('--label_smoothing', action='store_true')
    parser.add_argument('--use_lookahead', action='store_true')
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--iter_size', type=int, default=1)
    parser.add_argument('--lr_sched',
                        type=str,
                        default='steps',
                        choices=['linear', 'exp', 'steps'])
    parser.add_argument('--initial_lr', type=float, default=5e-2)
    parser.add_argument('--final_lr', type=float, default=1e-5)
    parser.add_argument('--weight_decay', type=float, default=1e-4)
    parser.add_argument('--epochs',
                        type=int,
                        default=90,
                        help='total number of epochs for training [1]')
    parser.add_argument('--model', type=str, default='densenet121')
    parser.add_argument('--run_on_hpu', type=str, default='True')
    parser.add_argument('--bfloat16', type=str, default='True')
    parser.add_argument('--log_device_placement', action='store_true')
    parser.add_argument('--skip_eval', action='store_true')
    parser.add_argument('--measure_perf', action='store_true')
    parser.add_argument(
        '--extract_tensors',
        help="--extract_tensors <Path to dump extracted tensors>.",
        type=str)
    parser.add_argument(
        '--only_eval',
        help=
        "--only_eval <Path to checkpoint>. Performs model evaluation only.",
        type=str)
    parser.add_argument('--iterations',
                        help="Sets number of iterations per epoch",
                        type=int)
    parser.add_argument('--train_subset', type=str, default='train')
    parser.add_argument('--val_subset', type=str, default='validation')
    args = parser.parse_args()

    args.bfloat16 = eval(args.bfloat16)
    args.run_on_hpu = eval(args.run_on_hpu)

    if args.skip_eval or args.only_eval == None:
        tf.keras.backend.set_learning_phase(True)

    if args.run_on_hpu:
        log_info_devices = load_habana_module()
        print(f"Devices:\n {log_info_devices}")
    else:
        config_keras_backend_for_gpu()
    tf.debugging.set_log_device_placement(args.log_device_placement)

    if args.use_lookahead and args.iter_size > 1:
        raise ValueError('cannot set both use_lookahead and iter_size')

    os.makedirs(config.SAVE_DIR, exist_ok=True)
    os.makedirs(config.LOG_DIR, exist_ok=True)

    print("model:           " + str(args.model))
    print("dropout_rate:    " + str(args.dropout_rate))
    print("optimizer:       " + str(args.optimizer))
    print("epsilon:         " + str(args.epsilon))
    print("label_smoothing: " + str(args.label_smoothing))
    print("use_lookahead:   " + str(args.use_lookahead))
    print("batch_size:      " + str(args.batch_size))
    print("iter_size:       " + str(args.iter_size))
    print("lr_sched:        " + str(args.lr_sched))
    print("initial_lr:      " + str(args.initial_lr))
    print("final_lr:        " + str(args.final_lr))
    print("weight_decay:    " + str(args.weight_decay))
    print("epochs:          " + str(args.epochs))
    print("iterations:      " + str(args.iterations))
    print("dataset_dir:     " + str(args.dataset_dir))
    print("skip_eval:       " + str(args.skip_eval))
    print("only_eval:       " + str(args.only_eval))
    print("run_on_hpu:      " + str(args.run_on_hpu))
    print("bfloat16:        " + str(args.bfloat16))
    print("train subset:    " + str(args.train_subset))
    print("val subset:      " + str(args.val_subset))

    train(args.model, args.dropout_rate, args.optimizer, args.epsilon,
          args.label_smoothing, args.use_lookahead, args.batch_size,
          args.iter_size, args.lr_sched, args.initial_lr, args.final_lr,
          args.weight_decay, args.epochs, args.iterations, args.dataset_dir,
          args.skip_eval, args.only_eval, args.run_on_hpu, args.measure_perf,
          args.extract_tensors, args.bfloat16, args.train_subset,
          args.val_subset)
    clear_keras_session()
def main():
    parser = argparse.ArgumentParser(description=DESCRIPTION)
    parser.add_argument('--dataset_dir',
                        type=str,
                        default=config.DEFAULT_DATASET_DIR)
    parser.add_argument('--batch_size', type=int, default=5)
    args = parser.parse_args()
    config_keras_backend()

    ds_validation = get_dataset(args.dataset_dir, 'validation',
                                args.batch_size)

    # InceptionV3
    inception_model = InceptionV3(include_top=True,
                                  weights='imagenet',
                                  classes=1000)
    inception_model.compile(optimizer='sgd',
                            loss='sparse_categorical_crossentropy',
                            metrics=['accuracy'])

    # Process batches
    iteration = 0
    sum1 = 0
    sum2 = 0
    for images, labels in tfds.as_numpy(ds_validation):

        if iteration < 31:
            print('continuing')
            iteration += 1
            continue
        if iteration == 1000:
            exit()

        labels = np.argmax(labels, axis=1)

        #adv_imgs = run_attack(False, 'CarliniL2Method', inception_model, images, labels, batch_size=5, dataset='cifar', fgsm_epsilon=0.3, cwl2_confidence=40)
        adv_imgs = run_attack(False,
                              'CarliniLInfMethod',
                              inception_model,
                              images,
                              labels,
                              batch_size=5,
                              dataset='cifar',
                              fgsm_epsilon=0.3,
                              cwl2_confidence=0)
        #adv_imgs = run_attack(False, 'DeepFool', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.3, cwl2_confidence=0)
        #adv_imgs = run_attack(True, 'FastGradientMethod', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.1, cwl2_confidence=0)
        #adv_imgs = run_attack(True, 'ProjectedGradientDescent', inception_model, images, labels, batch_size=args.batch_size, dataset='cifar', fgsm_epsilon=0.1, cwl2_confidence=0)
        ## VGG ################################################

        inc_imgs = []
        adv_inc_imgs = []
        for ii in range(images.shape[0]):
            img = copy.deepcopy(images[ii, :, :, :])
            img += 1.0
            #img /= (2.0/255)
            img *= (255.0 / 2.0)

            ## InceptionV3
            inc_img = copy.deepcopy(img)
            inc_img = cv2.resize(inc_img, (299, 299))
            inc_img = inception_preprocess_input(inc_img)
            inc_imgs.append(inc_img)

            #==========================================
            # ADVERSARIAL ---------------
            adv_img = copy.deepcopy(adv_imgs[ii, :, :, :])
            adv_img += 1.0
            #adv_img /= (2.0/255)
            adv_img *= (255.0 / 2.0)

            # InceptionV3
            adv_inc_img = copy.deepcopy(adv_img)
            adv_inc_img = cv2.resize(adv_inc_img, (299, 299))
            adv_inc_img = inception_preprocess_input(adv_inc_img)
            adv_inc_imgs.append(adv_inc_img)

        inc_imgs = np.asarray(inc_imgs)

        adv_inc_imgs = np.asarray(adv_inc_imgs)

        # Default ResNet accuracy

        #        _, results3 = inception_model.evaluate(x=inc_imgs, y=labels, verbose=0)
        #        _, results8 = inception_model.evaluate(x=adv_inc_imgs, y=labels, verbose=0)

        adv_inc_imgs = np.nan_to_num(adv_inc_imgs)
        inc_imgs = np.nan_to_num(inc_imgs)

        norm_diffs_1 = [
            np.linalg.norm(
                np.subtract(adv_inc_imgs[ii].flatten(),
                            inc_imgs[ii].flatten()), 1)
            for ii in range(inc_imgs.shape[0])
        ]
        norm_diffs_2 = [
            np.linalg.norm(
                np.subtract(adv_inc_imgs[ii].flatten(),
                            inc_imgs[ii].flatten()), 2)
            for ii in range(inc_imgs.shape[0])
        ]
        norm_diffs_inf = [
            np.linalg.norm(
                np.subtract(adv_inc_imgs[ii].flatten(),
                            inc_imgs[ii].flatten()), np.inf)
            for ii in range(inc_imgs.shape[0])
        ]

        print(iteration)
        print(np.mean(norm_diffs_1), np.mean(norm_diffs_2),
              np.mean(norm_diffs_inf))

        #with open("distances_cw0_untarg.txt", "a") as myfile:
        #    myfile.write(str(np.mean(norm_diffs_1)) + ' ' + str(np.mean(norm_diffs_2)) +  ' ' +  str(np.mean(norm_diffs_inf)) +  '\n'     )

        iteration += 1

        print(norm_diffs_1)
        #print(adv_inc_imgs[0])
        #print(inc_imgs[0])
        exit()

        #results = resnet_model.evaluate(x=adv_imgs, y=to_categorical(labels, 1000))
        #print('RESNET test loss, test acc:', results)
        #results = vgg_model.evaluate(x=adv_imgs, y=to_categorical(labels, 1000))
        #print('VGG    test loss, test acc:', results)

#        labels = np.argmax(labels, axis=1)
#
#        #results = model.evaluate(
#        #               x=images, y=to_categorical(labels, 1000))
#        #print('test loss, test acc:', results)
#        total = total + images.shape[0]
#    print(total)
    exit()

    results = resnet_model.evaluate(x=ds_validation,
                                    steps=50000 // args.batch_size)
    print('test loss, test acc:', results)
    clear_keras_session()