예제 #1
0
        learning_rate=FLAGS.learning_rate,
        nb_epochs=FLAGS.nb_epochs,
        holdout=FLAGS.holdout,
        data_aug=FLAGS.data_aug,
        nb_epochs_s=FLAGS.nb_epochs_s,
        lmbda=FLAGS.lmbda,
        aug_batch_size=FLAGS.data_aug_batch_size,
    )


if __name__ == "__main__":

    # General flags
    flags.DEFINE_integer("nb_classes", NB_CLASSES, "Number of classes in problem")
    flags.DEFINE_integer("batch_size", BATCH_SIZE, "Size of training batches")
    flags.DEFINE_float("learning_rate", LEARNING_RATE, "Learning rate for training")

    # Flags related to oracle
    flags.DEFINE_integer("nb_epochs", NB_EPOCHS, "Number of epochs to train model")

    # Flags related to substitute
    flags.DEFINE_integer("holdout", HOLDOUT, "Test set holdout for adversary")
    flags.DEFINE_integer(
        "data_aug", DATA_AUG, "Number of substitute data augmentations"
    )
    flags.DEFINE_integer("nb_epochs_s", NB_EPOCHS_S, "Training epochs for substitute")
    flags.DEFINE_float("lmbda", LMBDA, "Lambda from arxiv.org/abs/1602.02697")
    flags.DEFINE_integer(
        "data_aug_batch_size", AUG_BATCH_SIZE, "Batch size for augmentation"
    )
예제 #2
0
파일: test.py 프로젝트: Leohoo27/learn_git
                   learning_rate=FLAGS.learning_rate,
                   nb_epochs=FLAGS.nb_epochs,
                   holdout=FLAGS.holdout,
                   data_aug=FLAGS.data_aug,
                   nb_epochs_s=FLAGS.nb_epochs_s,
                   lmbda=FLAGS.lmbda,
                   aug_batch_size=FLAGS.data_aug_batch_size)


if __name__ == '__main__':

    # General flags
    flags.DEFINE_integer('nb_classes', NB_CLASSES,
                         'Number of classes in problem')
    flags.DEFINE_integer('batch_size', BATCH_SIZE, 'Size of training batches')
    flags.DEFINE_float('learning_rate', LEARNING_RATE,
                       'Learning rate for training')

    # Flags related to oracle
    flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
                         'Number of epochs to train model')

    # Flags related to substitute
    flags.DEFINE_integer('holdout', HOLDOUT, 'Test set holdout for adversary')
    flags.DEFINE_integer('data_aug', DATA_AUG,
                         'Number of substitute data augmentations')
    flags.DEFINE_integer('nb_epochs_s', NB_EPOCHS_S,
                         'Training epochs for substitute')
    flags.DEFINE_float('lmbda', LMBDA, 'Lambda from arxiv.org/abs/1602.02697')
    flags.DEFINE_integer('data_aug_batch_size', AUG_BATCH_SIZE,
                         'Batch size for augmentation')
예제 #3
0
    check_installation(__file__)

    mnist_tutorial(
        nb_epochs=FLAGS.nb_epochs,
        batch_size=FLAGS.batch_size,
        learning_rate=FLAGS.learning_rate,
        clean_train=FLAGS.clean_train,
        backprop_through_attack=FLAGS.backprop_through_attack,
        nb_filters=FLAGS.nb_filters,
        attack_string=FLAGS.attack,
    )


if __name__ == "__main__":
    flags.DEFINE_integer("nb_filters", NB_FILTERS, "Model size multiplier")
    flags.DEFINE_integer("nb_epochs", NB_EPOCHS,
                         "Number of epochs to train model")
    flags.DEFINE_integer("batch_size", BATCH_SIZE, "Size of training batches")
    flags.DEFINE_float("learning_rate", LEARNING_RATE,
                       "Learning rate for training")
    flags.DEFINE_bool("clean_train", True, "Train on clean examples")
    flags.DEFINE_bool(
        "backprop_through_attack",
        False,
        ("If True, backprop through adversarial example "
         "construction process during adversarial training"),
    )
    flags.DEFINE_string("attack", "fgsm",
                        "Adversarial attack crafted and used for training")
    tf.app.run()
예제 #4
0
    check_installation(__file__)

    cifar10_tutorial_bim(viz_enabled=FLAGS.viz_enabled,
                         nb_epochs=FLAGS.nb_epochs,
                         batch_size=FLAGS.batch_size,
                         source_samples=FLAGS.source_samples,
                         learning_rate=FLAGS.learning_rate,
                         attack_iterations=FLAGS.attack_iterations,
                         model_path=FLAGS.model_path,
                         targeted=FLAGS.targeted)


if __name__ == '__main__':
    flags.DEFINE_boolean('viz_enabled', VIZ_ENABLED,
                         'Visualize adversarial ex.')
    flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
                         'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', BATCH_SIZE, 'Size of training batches')
    flags.DEFINE_integer('source_samples', SOURCE_SAMPLES,
                         'Number of test inputs to attack')
    flags.DEFINE_float('learning_rate', LEARNING_RATE,
                       'Learning rate for training')
    flags.DEFINE_string('model_path', MODEL_PATH,
                        'Path to save or load the model file')
    flags.DEFINE_integer('attack_iterations', ATTACK_ITERATIONS,
                         'Number of iterations to run attack; 1000 is good')
    flags.DEFINE_boolean('targeted', TARGETED,
                         'Run the tutorial in targeted mode?')

    tf.app.run()
                           batch_size=FLAGS.batch_size,
                           save_advx=FLAGS.save_advx)


if __name__ == '__main__':
    flags.DEFINE_integer(
        'train_start', TRAIN_START, 'Starting point (inclusive)'
        'of range of train examples to use')
    flags.DEFINE_integer(
        'train_end', TRAIN_END, 'Ending point (non-inclusive) '
        'of range of train examples to use')
    flags.DEFINE_integer(
        'test_start', TEST_START, 'Starting point (inclusive) '
        'of range of test examples to use')
    flags.DEFINE_integer(
        'test_end', TEST_END, 'End point (non-inclusive) of '
        'range of test examples to use')
    flags.DEFINE_integer('nb_iter', NB_ITER, 'Number of iterations of PGD')
    flags.DEFINE_string('which_set', WHICH_SET, '"train" or "test"')
    flags.DEFINE_string('report_path', REPORT_PATH, 'Path to save to')
    flags.DEFINE_integer('mc_batch_size', MC_BATCH_SIZE,
                         'Batch size for MaxConfidence')
    flags.DEFINE_integer('batch_size', BATCH_SIZE, 'Batch size for most jobs')
    flags.DEFINE_float('base_eps_iter', BASE_EPS_ITER,
                       'epsilon per iteration, if data were in [0, 1]')
    flags.DEFINE_integer(
        'save_advx', SAVE_ADVX,
        'If True, saves the adversarial examples to the '
        'filesystem.')
    tf.compat.v1.app.run()
예제 #6
0
        fontsize=42,
    )
    imscatter(X_embedded, x_test[:batch_size], zoom=2, cmap="Purples")
    plt.savefig(
        output_dir + "adversarial_gradients_SNNL_factor_" + str(SNNL_factor) + ".png"
    )


def main(argv=None):
    SNNL_example(
        nb_epochs=FLAGS.nb_epochs,
        batch_size=FLAGS.batch_size,
        learning_rate=FLAGS.learning_rate,
        nb_filters=FLAGS.nb_filters,
        SNNL_factor=FLAGS.SNNL_factor,
        output_dir=FLAGS.output_dir,
    )


if __name__ == "__main__":
    flags.DEFINE_integer("nb_filters", NB_FILTERS, "Model size multiplier")
    flags.DEFINE_integer("nb_epochs", NB_EPOCHS, "Number of epochs to train model")
    flags.DEFINE_integer("batch_size", BATCH_SIZE, "Size of training batches")
    flags.DEFINE_float(
        "SNNL_factor", SNNL_FACTOR, "Multiplier for Soft Nearest Neighbor Loss"
    )
    flags.DEFINE_float("learning_rate", LEARNING_RATE, "Learning rate for training")
    flags.DEFINE_string("output_dir", OUTPUT_DIR, "output directory for saving figures")

    tf.app.run()
    generate_adv_images(gpu=FLAGS.gpus,
                        attack_algo=FLAGS.attack,
                        dataset=FLAGS.dataset,
                        source_data_dir=DATASET_SOURCE_PATH[FLAGS.dataset],
                        nb_epochs=FLAGS.nb_epochs,
                        batch_size=FLAGS.batch_size,
                        learning_rate=FLAGS.learning_rate,
                        testing=True,
                        args=FLAGS)


if __name__ == '__main__':
    flags.DEFINE_integer('nb_filters', NB_FILTERS, 'Model size multiplier')
    flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
                         'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', config.BATCH_SIZE,
                         'Size of training batches')
    flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate for training')
    flags.DEFINE_string('gpus', "0", 'GPU for training')
    flags.DEFINE_enum("attack", "FGSM", META_ATTACKER_INDEX,
                      "the attack method")
    flags.DEFINE_enum("dataset", "CIFAR10", [
        "CIFAR10", "CIFAR100", "TinyImageNet", "CIFAR100_coarse_label",
        "MNIST", "FashionMNIST", "ImageNet", "SVHN", "AWA2", "CUB"
    ], "the dataset we want to generate")
    flags.DEFINE_enum("arch", "conv4", [
        "conv10", "conv4", "vgg16", "vgg16small", "resnet10", "resnet18",
        "resnet50", "resnet101"
    ], "the network be used to generate adversarial examples")
    tf.app.run()
예제 #8
0
  plt.title("TSNE of Sign of Adv Gradients, SNNLCrossEntropy Model, factor:" +
            str(FLAGS.SNNL_factor), fontsize=42)
  imscatter(X_embedded, x_test[:batch_size], zoom=2, cmap="Purples")
  plt.savefig(output_dir + 'adversarial_gradients_SNNL_factor_' +
              str(SNNL_factor) + '.png')


def main(argv=None):
  SNNL_example(nb_epochs=FLAGS.nb_epochs, batch_size=FLAGS.batch_size,
               learning_rate=FLAGS.learning_rate,
               nb_filters=FLAGS.nb_filters,
               SNNL_factor=FLAGS.SNNL_factor,
               output_dir=FLAGS.output_dir)


if __name__ == '__main__':
  flags.DEFINE_integer('nb_filters', NB_FILTERS,
                       'Model size multiplier')
  flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
                       'Number of epochs to train model')
  flags.DEFINE_integer('batch_size', BATCH_SIZE,
                       'Size of training batches')
  flags.DEFINE_float('SNNL_factor', SNNL_FACTOR,
                     'Multiplier for Soft Nearest Neighbor Loss')
  flags.DEFINE_float('learning_rate', LEARNING_RATE,
                     'Learning rate for training')
  flags.DEFINE_string('output_dir', OUTPUT_DIR,
                      'output directory for saving figures')

  tf.app.run()
예제 #9
0
          train_set.discard('white_box')
          train_set = train_set - child_set

    if not(adv_training or testing):
      continue
    for tm in train_set:
      print('running',tm) #for each threat model availible
      report=cifar10_train_on_untargeted(nb_epochs=FLAGS.nb_epochs, 
                     batch_size=FLAGS.batch_size, learning_rate=FLAGS.learning_rate, 
                     testing=testing, adv_training=adv_training,
                     model_key=FLAGS.model_key, attacker_key=key,threat_model=tm,
                     backprop_through_attack=FLAGS.backprop_through_attack)
if __name__ == '__main__':
  flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
                       'Number of epochs to train model')
  flags.DEFINE_integer('batch_size', BATCH_SIZE,
                       'Size of training batches'                     'Learning rate for training')
  flags.DEFINE_bool('backprop_through_attack', BACKPROP_THROUGH_ATTACK,
                    ('If True, backprop through adversarial example '
                     'construction process during adversarial training'))
  flags.DEFINE_bool('adv_training', ADV_TRAINING,
                    'If True, train the classifier on the adversarial examples.')
  flags.DEFINE_bool('testing', TESTING,
                    'If True, test the trained classifier on the adversarial training examples.')
  flags.DEFINE_string('model_key', MODEL_KEY,
                    'model key for the model to be adversarially trained. See meta.json')
  flags.DEFINE_float('learning_rate',LEARNING_RATE,
                      'The starting learning rate for adversarial training')
  flags.DEFINE_list('attacker_keys',ATTACKER_KEYS,'list of attacker keys to train as defined in meta file')
  tf.app.run()
예제 #10
0
    flags.DEFINE_boolean('save', True, 'Whether to save from a checkpoint.')
    flags.DEFINE_string('save_dir', 'runs/X', 'Location to store logs/model.')
    flags.DEFINE_string('model_type', 'madry',
                        'Model type: basic|madry|resnet_tf.')
    flags.DEFINE_string(
        'attack_type_train', 'MadryEtAl_y_multigpu',
        'Attack type for adversarial training:\
                        FGSM|MadryEtAl{,_y}{,_multigpu}.')
    flags.DEFINE_string('attack_type_test', 'FGSM',
                        'Attack type for test: FGSM|MadryEtAl{,_y}.')
    flags.DEFINE_string('dataset', 'mnist', 'Dataset mnist|cifar10.')
    flags.DEFINE_boolean(
        'only_adv_train', False,
        'Do not train with clean examples when adv training.')
    flags.DEFINE_integer('save_steps', 50, 'Save model per X steps.')
    flags.DEFINE_integer('attack_nb_iter_train', None,
                         'Number of iterations of training attack.')
    flags.DEFINE_integer('eval_iters', 1, 'Evaluate every X steps.')
    flags.DEFINE_integer('lrn_step', 30000, 'Step to decrease learning rate'
                         'for ResNet.')
    flags.DEFINE_float('adam_lrn', 0.001, 'Learning rate for Adam Optimizer.')
    flags.DEFINE_float('mom_lrn', 0.1, 'Learning rate for Momentum Optimizer.')
    flags.DEFINE_integer('ngpu', 1, 'Number of gpus.')
    flags.DEFINE_integer('sync_step', 1, 'Sync params frequency.')
    flags.DEFINE_boolean('fast_tests', False, 'Fast tests against attacks.')
    flags.DEFINE_string(
        'data_path', './datasets/', 'Path to datasets.'
        'Each dataset should be in a subdirectory.')

    app.run()
예제 #11
0
    flags.DEFINE_integer(
        "train_start",
        TRAIN_START,
        "Starting point (inclusive)"
        "of range of train examples to use",
    )
    flags.DEFINE_integer(
        "train_end",
        TRAIN_END,
        "Ending point (non-inclusive) "
        "of range of train examples to use",
    )
    flags.DEFINE_integer(
        "test_start",
        TEST_START,
        "Starting point (inclusive) "
        "of range of test examples to use",
    )
    flags.DEFINE_integer(
        "test_end",
        TEST_END,
        "End point (non-inclusive) of "
        "range of test examples to use",
    )
    flags.DEFINE_integer("nb_iter", NB_ITER, "Number of iterations of PGD")
    flags.DEFINE_string("which_set", WHICH_SET, '"train" or "test"')
    flags.DEFINE_integer("batch_size", BATCH_SIZE, "Batch size for most jobs")
    flags.DEFINE_float("base_eps_iter", BASE_EPS_ITER,
                       "epsilon per iteration, if data were in [0, 1]")
    tf.app.run()