def model_train(sess,
                x,
                y,
                predictions,
                X_train,
                Y_train,
                save=False,
                predictions_adv=None,
                init_all=True,
                evaluate=None,
                feed=None,
                args=None,
                rng=None,
                var_list=None):
    """
  Train a TF graph
  :param sess: TF session to use when training the graph
  :param x: input placeholder
  :param y: output placeholder (for labels)
  :param predictions: model output predictions
  :param X_train: numpy array with training inputs
  :param Y_train: numpy array with training outputs
  :param save: boolean controlling the save operation
  :param predictions_adv: if set with the adversarial example tensor,
                          will run adversarial training
  :param init_all: (boolean) If set to true, all TF variables in the session
                   are (re)initialized, otherwise only previously
                   uninitialized variables are initialized before training.
  :param evaluate: function that is run after each training iteration
                   (typically to display the test/validation accuracy).
  :param feed: An optional dictionary that is appended to the feeding
               dictionary before the session runs. Can be used to feed
               the learning phase of a Keras model for instance.
  :param args: dict or argparse `Namespace` object.
               Should contain `nb_epochs`, `learning_rate`,
               `batch_size`
               If save is True, should also contain 'train_dir'
               and 'filename'
  :param rng: Instance of numpy.random.RandomState
  :param var_list: Optional list of parameters to train.
  :return: True if model trained
  """
    warnings.warn("This function is deprecated and will be removed on or after"
                  " 2019-04-05. Switch to cleverhans.train.train.")
    args = _ArgsWrapper(args or {})

    # Check that necessary arguments were given (see doc above)
    assert args.nb_epochs, "Number of epochs was not given in args dict"
    assert args.learning_rate, "Learning rate was not given in args dict"
    assert args.batch_size, "Batch size was not given in args dict"

    if save:
        assert args.train_dir, "Directory for save was not given in args dict"
        assert args.filename, "Filename for save was not given in args dict"

    if rng is None:
        rng = np.random.RandomState()

    # Define loss
    loss = model_loss(y, predictions)
    if predictions_adv is not None:
        loss = (loss + model_loss(y, predictions_adv)) / 2

    train_step = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
    train_step = train_step.minimize(loss, var_list=var_list)

    with sess.as_default():
        if hasattr(tf, "global_variables_initializer"):
            if init_all:
                tf.global_variables_initializer().run()
            else:
                initialize_uninitialized_global_variables(sess)
        else:
            warnings.warn("Update your copy of tensorflow; future versions of "
                          "CleverHans may drop support for this version.")
            sess.run(tf.initialize_all_variables())

        for epoch in xrange(args.nb_epochs):
            # Compute number of batches
            nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
            assert nb_batches * args.batch_size >= len(X_train)

            # Indices to shuffle training set
            index_shuf = list(range(len(X_train)))
            rng.shuffle(index_shuf)

            prev = time.time()
            for batch in range(nb_batches):

                # Compute batch start and end indices
                start, end = batch_indices(batch, len(X_train),
                                           args.batch_size)

                # Perform one training step
                feed_dict = {
                    x: X_train[index_shuf[start:end]],
                    y: Y_train[index_shuf[start:end]]
                }
                if feed is not None:
                    feed_dict.update(feed)
                train_step.run(feed_dict=feed_dict)
            assert end >= len(X_train)  # Check that all examples were used
            cur = time.time()
            _logger.info("Epoch " + str(epoch) + " took " + str(cur - prev) +
                         " seconds")
            if evaluate is not None:
                evaluate()

        if save:
            save_path = os.path.join(args.train_dir, args.filename)
            saver = tf.train.Saver()
            saver.save(sess, save_path)
            _logger.info("Completed model training and saved at: " +
                         str(save_path))
        else:
            _logger.info("Completed model training.")

    return True
def train(sess,
          loss,
          x,
          y,
          X_train,
          Y_train,
          save=False,
          init_all=False,
          evaluate=None,
          feed=None,
          args=None,
          rng=None,
          var_list=None,
          fprop_args=None,
          optimizer=None):
    """
  Train a TF graph.
  This function is deprecated. Prefer cleverhans.train.train when possible.
  cleverhans.train.train supports multiple GPUs but this function is still
  needed to support legacy models that do not support calling fprop more
  than once.

  :param sess: TF session to use when training the graph
  :param loss: tensor, the model training loss.
  :param x: input placeholder
  :param y: output placeholder (for labels)
  :param X_train: numpy array with training inputs
  :param Y_train: numpy array with training outputs
  :param save: boolean controlling the save operation
  :param init_all: (boolean) If set to true, all TF variables in the session
                   are (re)initialized, otherwise only previously
                   uninitialized variables are initialized before training.
  :param evaluate: function that is run after each training iteration
                   (typically to display the test/validation accuracy).
  :param feed: An optional dictionary that is appended to the feeding
               dictionary before the session runs. Can be used to feed
               the learning phase of a Keras model for instance.
  :param args: dict or argparse `Namespace` object.
               Should contain `nb_epochs`, `learning_rate`,
               `batch_size`
               If save is True, should also contain 'train_dir'
               and 'filename'
  :param rng: Instance of numpy.random.RandomState
  :param var_list: Optional list of parameters to train.
  :param fprop_args: dict, extra arguments to pass to fprop (loss and model).
  :param optimizer: Optimizer to be used for training
  :return: True if model trained
  """
    warnings.warn("This function is deprecated and will be removed on or after"
                  " 2019-04-05. Switch to cleverhans.train.train.")

    args = _ArgsWrapper(args or {})
    fprop_args = fprop_args or {}

    # Check that necessary arguments were given (see doc above)
    assert args.nb_epochs, "Number of epochs was not given in args dict"
    if optimizer is None:
        assert args.learning_rate is not None, ("Learning rate was not given "
                                                "in args dict")
    assert args.batch_size, "Batch size was not given in args dict"

    if save:
        assert args.train_dir, "Directory for save was not given in args dict"
        assert args.filename, "Filename for save was not given in args dict"

    if rng is None:
        rng = np.random.RandomState()

    # Define optimizer
    loss_value = loss.fprop(x, y, **fprop_args)
    if optimizer is None:
        optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
    else:
        if not isinstance(optimizer, tf.train.Optimizer):
            raise ValueError("optimizer object must be from a child class of "
                             "tf.train.Optimizer")
    # Trigger update operations within the default graph (such as batch_norm).
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        train_step = optimizer.minimize(loss_value, var_list=var_list)

    with sess.as_default():
        if hasattr(tf, "global_variables_initializer"):
            if init_all:
                tf.global_variables_initializer().run()
            else:
                initialize_uninitialized_global_variables(sess)
        else:
            warnings.warn("Update your copy of tensorflow; future versions of "
                          "CleverHans may drop support for this version.")
            sess.run(tf.initialize_all_variables())

        for epoch in xrange(args.nb_epochs):
            # Compute number of batches
            nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
            assert nb_batches * args.batch_size >= len(X_train)

            # Indices to shuffle training set
            index_shuf = list(range(len(X_train)))
            rng.shuffle(index_shuf)

            prev = time.time()
            for batch in range(nb_batches):

                # Compute batch start and end indices
                start, end = batch_indices(batch, len(X_train),
                                           args.batch_size)

                # Perform one training step
                feed_dict = {
                    x: X_train[index_shuf[start:end]],
                    y: Y_train[index_shuf[start:end]]
                }
                if feed is not None:
                    feed_dict.update(feed)
                train_step.run(feed_dict=feed_dict)
            assert end >= len(X_train)  # Check that all examples were used
            cur = time.time()
            _logger.info("Epoch " + str(epoch) + " took " + str(cur - prev) +
                         " seconds")
            if evaluate is not None:
                evaluate()

        if save:
            save_path = os.path.join(args.train_dir, args.filename)
            saver = tf.train.Saver()
            saver.save(sess, save_path)
            _logger.info("Completed model training and saved at: " +
                         str(save_path))
        else:
            _logger.info("Completed model training.")

    return True
Esempio n. 3
0
def effective_train_jsma(train_start=0,
                         train_end=20,
                         test_start=0,
                         test_end=10000,
                         viz_enabled=False,
                         nb_epochs=6,
                         batch_size=128,
                         nb_classes=10,
                         source_samples=10,
                         learning_rate=0.001):

    # Object used to keep track of (and return) key accuracies
    report = AccuracyReport()

    # Set logging level to see debug information
    set_log_level(logging.DEBUG)

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
                                                  train_end=train_end,
                                                  test_start=test_start,
                                                  test_end=test_end)
    # Create TF session and set as Keras backend session
    sess = tf.Session()
    print("Created TensorFlow session.")

    model_path = "./"
    model_name = "adv_trained_jsma_model_alpha0.4_fortest"

    # sess.run(tf.global_variables_initializer())
    rng = np.random.RandomState([2017, 8, 30])

    # Define input TF placeholder
    x1 = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))  # for clean data
    x2 = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))  # for adv data
    y = tf.placeholder(tf.float32, shape=(None, 10))  # for adv clean targets

    # Initialize the model
    model = make_basic_cnn()
    preds = model(x1)
    preds_adv = model(x2)

    # Instantiate a SaliencyMapMethod attack object
    # jsma = SaliencyMapMethod(model, back='tf', sess=sess)
    jsma_params = {
        'theta': 1.,
        'gamma': 0.1,
        'clip_min': 0.,
        'clip_max': 1.,
        'y_target': None
    }

    # Define loss
    loss = 0.4 * model_loss(y, preds) + 0.6 * model_loss(y, preds_adv)

    train_step = tf.train.AdamOptimizer(learning_rate=learning_rate)
    train_step = train_step.minimize(loss)

    def evaluate_2(adv_examples_last_batch, adv_clean_labels_last_batch):
        # Accuracy of adversarially trained model on legitimate test inputs
        eval_params = {'batch_size': batch_size}
        accuracy = model_eval(sess,
                              x1,
                              y,
                              preds,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on legitimate examples: %0.4f' % accuracy)
        report.adv_train_clean_eval = accuracy

        # Accuracy of the adversarially trained model on adversarial examples
        accuracy = model_eval(sess,
                              x2,
                              y,
                              preds_adv,
                              adv_examples_last_batch,
                              adv_clean_labels_last_batch,
                              args=eval_params)
        print('Test accuracy on last batch of adversarial examples: %0.4f' %
              accuracy)
        report.adv_train_adv_eval = accuracy

    with sess.as_default():
        tf.global_variables_initializer().run()

        for epoch in xrange(nb_epochs):
            print('Training for epoch %i/%i' % (epoch, nb_epochs - 1))

            # Compute number of batches
            nb_batches = int(math.ceil(float(len(X_train)) / batch_size))
            assert nb_batches * batch_size >= len(X_train)

            # Indices to shuffle training set
            index_shuf = list(range(len(X_train)))
            rng.shuffle(index_shuf)

            prev = time.time()
            for batch in range(nb_batches):
                # re-instantiate Saliency object with new trained model
                jsma = SaliencyMapMethod(model, back='tf', sess=sess)
                print('--------------------------------------')
                # create an array for storing adv examples
                print('batch: %i/%i' % (batch + 1, nb_batches))
                # adv_examples = np.empty([1,28,28,1])
                adv_examples = []
                # for target labels
                #adv_targets = np.empty([1,10])
                # corresponding clean/correct label
                # adv_clean_labels = np.empty([1,10])
                adv_clean_labels = []
                # correspongding clean data
                # adv_clean_examples = np.empty([1,28,28,1])
                adv_clean_examples = []

                for sample_ind in xrange(0, batch_size):

                    print('Attacking input %i/%i' %
                          (sample_ind + 1, batch_size))
                    # Compute batch start and end indices
                    start, end = batch_indices(batch, len(X_train), batch_size)
                    X_this_batch = X_train[index_shuf[start:end]]
                    Y_this_batch = Y_train[index_shuf[start:end]]
                    # Perform one training step
                    # feed_dict = {x: X_train[index_shuf[start:end]],y: Y_train[index_shuf[start:end]]}

                    sample = X_this_batch[sample_ind:(
                        sample_ind + 1)]  # generate from training data

                    # We want to find an adversarial example for each possible target class
                    # (i.e. all classes that differ from the label given in the dataset)
                    current_class = int(np.argmax(Y_this_batch[sample_ind])
                                        )  # generate from training data
                    target_classes = other_classes(nb_classes, current_class)
                    print('Current class is ', current_class)

                    # For the grid visualization, keep original images along the diagonal
                    # grid_viz_data[current_class, current_class, :, :, :] = np.reshape(
                    #     sample, (img_rows, img_cols, channels))

                    # Loop over all target classes
                    for target in target_classes:
                        print('Generating adv. example for target class %i' %
                              target)

                        # This call runs the Jacobian-based saliency map approach
                        one_hot_target = np.zeros((1, nb_classes),
                                                  dtype=np.float32)
                        #create fake target
                        one_hot_target[0, target] = 1
                        jsma_params['y_target'] = one_hot_target
                        adv_x = jsma.generate_np(
                            sample, **jsma_params
                        )  # get numpy array (1, 28, 28, 1), not Tensor

                        # Check if success was achieved
                        # res = int(model_argmax(sess, x, preds, adv_x) == target)
                        # if succeeds
                        # if res == 1:
                        # append new adv_x to adv_examples array
                        # append sample here, so that the number of times sample is appended mmatches number of adv_ex.
                        # adv_examples = np.append(adv_examples, adv_x, axis=0)
                        adv_examples.append(adv_x)
                        #adv_targets = np.append(adv_targets, one_hot_target, axis=0)
                        # adv_clean_labels = np.append(adv_clean_labels, np.expand_dims(Y_this_batch[sample_ind],axis=0), axis=0) # generate from training data
                        adv_clean_labels.append(Y_this_batch[sample_ind])
                        # adv_clean_examples = np.append(adv_clean_examples, sample, axis=0)
                        adv_clean_examples.append(sample)

                # what we have for this batch, batch_size * 9 data
                # adv_examples = adv_examples[1:,:,:,:]
                #adv_targets = adv_targets[1:,:]
                # adv_clean_labels = adv_clean_labels[1:,:]
                # adv_clean_examples = adv_clean_examples[1:,:,:,:]
                adv_examples = np.reshape(
                    adv_examples, (batch_size * (nb_classes - 1), 28, 28, 1))
                adv_clean_examples = np.reshape(adv_clean_examples,
                                                (batch_size *
                                                 (nb_classes - 1), 28, 28, 1))
                feed_dict = {
                    x1: adv_clean_examples,
                    x2: adv_examples,
                    y: adv_clean_labels
                }
                train_step.run(feed_dict=feed_dict)

            cur = time.time()
            _logger.info("Epoch " + str(epoch) + " took " + str(cur - prev) +
                         " seconds")

            evaluate_2(adv_examples, adv_clean_labels)
        print('Training finished.')

        # report on clean test data
        preds_test = model(x1)
        eval_par = {'batch_size': 10}
        acc_clean = model_eval(sess,
                               x1,
                               y,
                               preds_test,
                               X_test,
                               Y_test,
                               args=eval_par)
        print('Test accuracy on legitimate examples: %0.4f\n' % acc_clean)
        # reload fgsm successfully attacking adv test data
        # with np.load("adversarial_fgsm.npz") as data:
        #     adv_X_test, adv_clean_Y_test, adv_clean_X_test = data['adv_examples'], data['adv_clean_labels'], data['adv_clean_examples']
        # print('FGSM adversarial data are successfully reloaded.')
        # preds_adv_test = model(x1)
        # # Evaluate the accuracy of the MNIST model on adversarial examples
        # # eval_par = {'batch_size': 10}
        # acc = model_eval(sess, x1, y, preds_adv_test, adv_X_test, adv_clean_Y_test, args=eval_par)
        # print('Test accuracy on pre-generated adversarial examples of fgsm: %0.4f\n' % acc)
        # # reload fgsm successfully attacking adv test data
        # with np.load("adversarial_mnist_test_from_1500.npz") as data:
        #     adv_X_test, adv_clean_Y_test, adv_clean_X_test = data['adv_examples'], data['adv_clean_labels'], data['adv_clean_examples']
        # print('JSMA adversarial data are successfully reloaded.')
        # # Evaluate the accuracy of the MNIST model on adversarial examples
        # acc2 = model_eval(sess, x1, y, preds_adv_test, adv_X_test, adv_clean_Y_test, args=eval_par)
        # print('Test accuracy on pre-generated adversarial examples of jsma: %0.4f\n' % acc2)
        save_path = os.path.join(model_path, model_name)
        saver = tf.train.Saver()
        saver.save(sess, save_path)
        _logger.info("Completed model training and saved at: " +
                     str(save_path))
        # Close TF session
        sess.close()
Esempio n. 4
0
def mnist_tutorial(train_start=0,
                   train_end=60000,
                   test_start=0,
                   test_end=10000,
                   nb_epochs=6,
                   batch_size=128,
                   learning_rate=0.001,
                   clean_train=True,
                   testing=False,
                   backprop_through_attack=False,
                   nb_filters=64):
    """
    MNIST cleverhans tutorial
    :param train_start: index of first training set example
    :param train_end: index of last training set example
    :param test_start: index of first test set example
    :param test_end: index of last test set example
    :param nb_epochs: number of epochs to train model
    :param batch_size: size of training batches
    :param learning_rate: learning rate for training
    :param clean_train: perform normal training on clean examples only
                        before performing adversarial training.
    :param testing: if true, complete an AccuracyReport for unit tests
                    to verify that performance is adequate
    :param backprop_through_attack: If True, backprop through adversarial
                                    example construction process during
                                    adversarial training.
    :param clean_train: if true, train on clean examples
    :return: an AccuracyReport object
    """

    # Object used to keep track of (and return) key accuracies
    report = AccuracyReport()

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    # Set logging level to see debug information
    set_log_level(logging.DEBUG)

    # Get MNIST test data
    # X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
    #                                               train_end=train_end,
    #                                               test_start=test_start,
    #                                               test_end=test_end)

    # Get notMNIST data
    with np.load("notmnist.npz") as data:
        X_train, Y_train, X_test, Y_test = data['examples_train'], data[
            'labels_train'], data['examples_test'], data['labels_test']

    # Use label smoothing
    assert Y_train.shape[1] == 10
    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
    y = tf.placeholder(tf.float32, shape=(None, 10))

    model_path = "./"
    model_name = "adv_trained_fgsm_model_mix_data_notmnist"

    fgsm_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1.}
    rng = np.random.RandomState([1992, 8, 3])

    model = make_basic_cnn(nb_filters=nb_filters)
    preds = model(x)

    # Create TF session
    sess = tf.Session()

    fgsm = FastGradientMethod(model, sess=sess)
    adv_x = fgsm.generate(x, **fgsm_params)
    preds_adv = model(adv_x)
    mixed_x = tf.concat([x, adv_x], 0)
    mixed_y = tf.concat([y, y], 0)
    # length = tf.shape(mixed_x)[0]
    index_shuffle = list(range(batch_size * 2))
    rng.shuffle(index_shuffle)
    mixed_x = tf.gather(mixed_x, index_shuffle)
    mixed_y = tf.gather(mixed_y, index_shuffle)
    preds_mixed = model(mixed_x)

    loss = model_loss(mixed_y, preds_mixed)

    train_step = tf.train.AdamOptimizer(learning_rate=learning_rate)
    train_step = train_step.minimize(loss)

    tf.global_variables_initializer().run(session=sess)

    for epoch in xrange(nb_epochs):
        print('Training for epoch %i/%i' % (epoch, nb_epochs - 1))

        # Compute number of batches
        nb_batches = int(math.ceil(float(len(X_train)) / batch_size))
        assert nb_batches * batch_size >= len(X_train)

        # Indices to shuffle training set
        index_shuf = list(range(len(X_train)))
        rng.shuffle(index_shuf)

        prev = time.time()
        for batch in range(nb_batches):
            # re-instantiate FGSM object with new trained model
            # fgsm = FastGradientMethod(model, sess=sess)
            # adv_x = fgsm.generate(x, **fgsm_params)
            print('--------------------------------------')
            # create an array for storing adv examples
            print('batch: %i/%i' % (batch + 1, nb_batches))
            # adv_examples = np.empty([1,28,28,1])
            start, end = batch_indices(batch, len(X_train), batch_size)
            X_this_batch = X_train[index_shuf[start:end]]
            Y_this_batch = Y_train[index_shuf[start:end]]

            # adv_examples = sess.run(adv_x, feed_dict={x:X_this_batch})
            # for target labels
            #adv_targets = np.empty([1,10])
            # corresponding clean/correct label
            # adv_clean_labels = np.empty([1,10])
            # correspongding clean data
            # adv_clean_examples = np.empty([1,28,28,1])

            # adv_examples = np.reshape(adv_examples, (batch_size*(nb_classes-1),28,28,1))
            # adv_clean_examples = np.reshape(adv_clean_examples, (batch_size*(nb_classes-1),28,28,1))
            # mixed_X = np.concatenate((X_this_batch, adv_examples), axis=0)
            # mixed_Y = np.concatenate((Y_this_batch, Y_this_batch), axis=0)
            # print('mixed data have shape', np.shape(mixed_X))
            # print('mixed labels have shape', np.shape(mixed_Y))

            #shuffle the mixed data before training
            # index_of_batch = list(range(np.shape(mixed_Y)[0]))
            # rng.shuffle(index_of_batch)
            # mixed_X = mixed_X[index_of_batch]
            # mixed_Y = mixed_Y[index_of_batch]
            feed_dict = {x: X_this_batch, y: Y_this_batch}
            train_step.run(feed_dict=feed_dict, session=sess)

        cur = time.time()
        _logger.info("Epoch " + str(epoch) + " took " + str(cur - prev) +
                     " seconds")

        eval_params = {'batch_size': batch_size}
        acc = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_params)
        assert X_test.shape[0] == test_end - test_start, X_test.shape
        print('Test accuracy on legitimate examples: %0.4f' % acc)

        acc2 = model_eval(sess,
                          x,
                          y,
                          preds_adv,
                          X_test,
                          Y_test,
                          args=eval_params)
        assert X_test.shape[0] == test_end - test_start, X_test.shape
        print('Test accuracy on adversarial examples: %0.4f' % acc2)

    print('Training finished.')

    # reload fgsm successfully attacking adv test data
    # with np.load("adversarial_fgsm.npz") as data:
    #     adv_X_test, adv_clean_Y_test, adv_clean_X_test = data['adv_examples'], data['adv_clean_labels'], data['adv_clean_examples']
    # print('FGSM adversarial data are successfully reloaded.')
    # preds_adv_test = model(x1)
    # # Evaluate the accuracy of the MNIST model on adversarial examples
    # # eval_par = {'batch_size': 10}
    # acc = model_eval(sess, x1, y, preds_adv_test, adv_X_test, adv_clean_Y_test, args=eval_par)
    # print('Test accuracy on pre-generated adversarial examples of fgsm: %0.4f\n' % acc)
    # # reload fgsm successfully attacking adv test data
    # with np.load("adversarial_mnist_test_from_1500.npz") as data:
    #     adv_X_test, adv_clean_Y_test, adv_clean_X_test = data['adv_examples'], data['adv_clean_labels'], data['adv_clean_examples']
    # print('JSMA adversarial data are successfully reloaded.')
    # # Evaluate the accuracy of the MNIST model on adversarial examples
    # acc2 = model_eval(sess, x1, y, preds_adv_test, adv_X_test, adv_clean_Y_test, args=eval_par)
    # print('Test accuracy on pre-generated adversarial examples of jsma: %0.4f\n' % acc2)
    save_path = os.path.join(model_path, model_name)
    saver = tf.train.Saver()
    saver.save(sess, save_path)
    _logger.info("Completed model training and saved at: " + str(save_path))
    # Close TF session
    sess.close()

    return
Esempio n. 5
0
def model_eval_ae(sess,
                  x_orig,
                  x_target,
                  recon,
                  X_test=None,
                  X_test_target=None,
                  x_adv=None,
                  adv_recon=None,
                  lat_orig=None,
                  lat_recon=None,
                  feed=None,
                  args=None):
    global _model_eval_cache
    args = _ArgsWrapper(args or {})

    #print("shape of X_test: ", np.shape(X_test))
    #print("shape of X_test_target: ", np.shape(X_test_target))
    #print("shape of x_adv: ", np.shape(x_adv))
    assert args.batch_size, "Batch size was not given in args dict"
    if X_test is None or X_test_target is None:
        raise ValueError("X_test argument and X_test_target argument "
                         "must be supplied.")

    shape = np.shape(x_orig)
    w = shape[1]
    h = shape[2]
    c = shape[3]
    # Define accuracy symbolically
    key = (recon, x_orig, x_target)
    if x_adv is not None and lat_orig is not None and key in _model_eval_cache:
        d1, d2, dist_diff, noise, dist_lat = _model_eval_cache[key]
    else:
        if x_adv is not None and lat_orig is None and key in _model_eval_cache:
            d1, d2, dist_diff, noise = _model_eval_cache[key]
        else:
            if x_adv is None and lat_orig is not None and key in _model_eval_cache:
                d1, d2, dist_diff, dist_lat = _model_eval_cache[key]
            else:
                if key in _model_eval_cache:
                    d1, d2, dist_diff = _model_eval_cache[key]
                else:
                    d1 = tf.reduce_sum(
                        tf.squared_difference(
                            tf.reshape(recon, (tf.shape(recon)[0], w * h * c)),
                            tf.reshape(x_orig,
                                       (tf.shape(x_orig)[0], w * h * c))), 1)
                    d2 = tf.reduce_sum(
                        tf.squared_difference(
                            tf.reshape(recon, (tf.shape(recon)[0], w * h * c)),
                            tf.reshape(x_target,
                                       (tf.shape(x_target)[0], w * h * c))), 1)
                    dist_diff = d1 - d2

                    if (x_adv is not None and lat_orig is not None):
                        #noise = tf.sqrt(tf.reduce_sum(tf.squared_difference(tf.reshape(x_orig,(tf.shape(x_orig)[0],784)), tf.reshape(x_adv, (tf.shape(x_adv)[0],784))),1))
                        noise = reduce_sum(tf.square(x_orig - x_adv),
                                           list(range(1, len(shape))))
                        noise = pow(noise, 0.5)
                        dist_lat = tf.reduce_sum(
                            tf.squared_difference(lat_orig, lat_recon), 1)
                        _model_eval_cache[
                            key] = d1, d2, dist_diff, noise, dist_lat
                    else:
                        if (x_adv is not None and lat_orig is None):
                            #noise = tf.sqrt(tf.reduce_sum(tf.squared_difference(tf.reshape(x_orig,(tf.shape(x_orig)[0],784)), tf.reshape(x_adv, (tf.shape(x_adv)[0],784))),1))
                            noise = reduce_sum(tf.square(x_orig - x_adv),
                                               list(range(1, len(shape))))
                            noise = pow(noise, 0.5)
                            _model_eval_cache[key] = d1, d2, dist_diff, noise
                        else:
                            if (x_adv is None and lat_orig is not None):
                                dist_lat = tf.reduce_sum(
                                    tf.squared_difference(lat_orig, lat_recon),
                                    1)
                                _model_eval_cache[
                                    key] = d1, d2, dist_diff, dist_lat
                            else:
                                _model_eval_cache[key] = d1, d2, dist_diff

    # Init result var
    #accuracy = 0.0

    avg_dist_diff = 0
    avg_dist_orig = 0
    avg_dist_targ = 0
    avg_noise = 0
    avg_dist_lat = 0
    with sess.as_default():
        # Compute number of batches
        nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
        assert nb_batches * args.batch_size >= len(X_test)

        l1 = np.shape(X_test)
        l2 = np.shape(X_test_target)
        X_cur = np.zeros((args.batch_size, l1[1], l1[2], l1[3]),
                         dtype='float64')
        X_targ_cur = np.zeros((args.batch_size, l2[1], l2[2], l2[3]),
                              dtype='float64')
        #X_cur = np.zeros((args.batch_size, X_test.shape[1:]),
        #                  dtype=X_test.dtype)
        #X_targ_cur = np.zeros((args.batch_size,X_test_target.shape[1:]),
        #                dtype=X_test_target.dtype)
        start, end = batch_indices(0, len(X_test), args.batch_size)

        #feed_dict_1 = {x_orig: X_test[index_shuf[start:end]],
        #                x_target: X_test_target[index_shuf[start:end]]}
        for batch in range(nb_batches):
            if batch % 100 == 0 and batch > 0:
                _logger.debug("Batch " + str(batch))

            # Must not use the `batch_indices` function here, because it
            # repeats some examples.
            # It's acceptable to repeat during training, but not eval.
            start = batch * args.batch_size
            end = min(len(X_test), start + args.batch_size)

            # The last batch may be smaller than all others. This should not
            # affect the accuarcy disproportionately.
            cur_batch_size = end - start
            #print()
            #print("np.shape(X_test_target[start:end]: ", np.shape(X_test_target[start:end]))
            #print("np.shape(X_targ_cur[:cur_batch_size]: ",np.shape(X_targ_cur[:cur_batch_size]))
            X_cur[:cur_batch_size] = X_test[start:end]
            X_targ_cur[:cur_batch_size] = X_test_target[start:end]
            feed_dict_1 = {x_orig: X_cur, x_target: X_targ_cur}
            if feed is not None:
                feed_dict_1.update(feed)
            cur_avg_dist_diff = dist_diff.eval(feed_dict=feed_dict_1)
            cur_avg_dist_orig = d1.eval(feed_dict=feed_dict_1)
            cur_avg_dist_targ = d2.eval(feed_dict=feed_dict_1)
            if (lat_orig is not None):
                cur_avg_dist_lat = dist_lat.eval(feed_dict=feed_dict_1)
            if x_adv is not None:
                cur_avg_noise = noise.eval(feed_dict=feed_dict_1)

            avg_dist_diff += cur_avg_dist_diff[:cur_batch_size].sum()
            avg_dist_orig += cur_avg_dist_orig[:cur_batch_size].sum()
            avg_dist_targ += cur_avg_dist_targ[:cur_batch_size].sum()
            if lat_orig is not None:
                avg_dist_lat += cur_avg_dist_lat[:cur_batch_size].sum()
            if x_adv is not None:
                avg_noise += cur_avg_noise[:cur_batch_size].sum()
        assert end >= len(X_test)

        # Divide by number of examples to get final value
        avg_dist_diff /= len(X_test)
        avg_dist_orig /= len(X_test)
        avg_dist_targ /= len(X_test)
        avg_noise /= len(X_test)
        avg_dist_lat /= len(X_test)
    return avg_noise, avg_dist_orig, avg_dist_targ, avg_dist_diff, avg_dist_lat
Esempio n. 6
0
    logging.info(f"Noise ratio: {normalized_current_weights}")

    ### Train model with weighted loss for each noise ###
    loss_total_list = np.zeros((args.num_noises, ))
    for epoch in range(args.epochs):
        logging.info(f'epoch: {epoch}')
        # Compute number of batches
        num_batches = int(math.ceil(num_train / args.batch_size))
        assert num_batches * args.batch_size >= num_train

        # Indices to shuffle training set
        index_shuf = list(range(num_train))
        rng.shuffle(index_shuf)
        for batch in range(num_batches):
            # Compute batch start and end indices
            start, end = batch_indices(batch, num_train, args.batch_size)
            feed_dict = {
                x: X_train_adv[:, index_shuf[start:end]],
                y: Y_train[index_shuf[start:end]],
                w: weights_distribution[oracle_iter]
            }
            loss_vals, _ = sess.run([losses, train_step], feed_dict=feed_dict)
            # Normalize and log loss
            loss_total_list += loss_vals / (args.epochs * num_batches)
    # Save average loss for the next iteration
    losses_np[oracle_iter, :] = loss_total_list
    # Save the lastest trained model
    os.makedirs("../model/robust_optimization/", exist_ok=True)
    saver.save(sess, "../model/robust_optimization/model.ckpt")

# solve https://stackoverflow.com/questions/46079644/tensorflow-attempting-to-use-uninitialized-value-error-when-restoring
Esempio n. 7
0
def model_train(sess, x, y, predictions, X_train, Y_train, save=False,
                predictions_adv=None, init_all=True, evaluate=None,
                verbose=True, feed=None, args=None, rng=None, aux_loss=None,
                opt_type=None, summary=None):
    """
    Train a TF graph
    :param sess: TF session to use when training the graph
    :param x: input placeholder
    :param y: output placeholder (for labels)
    :param predictions: model output predictions
    :param X_train: numpy array with training inputs
    :param Y_train: numpy array with training outputs
    :param save: boolean controlling the save operation
    :param predictions_adv: if set with the adversarial example tensor,
                            will run adversarial training
    :param init_all: (boolean) If set to true, all TF variables in the session
                     are (re)initialized, otherwise only previously
                     uninitialized variables are initialized before training.
    :param evaluate: function that is run after each training iteration
                     (typically to display the test/validation accuracy).
    :param verbose: (boolean) all print statements disabled when set to False.
    :param feed: An optional dictionary that is appended to the feeding
                 dictionary before the session runs. Can be used to feed
                 the learning phase of a Keras model for instance.
    :param args: dict or argparse `Namespace` object.
                 Should contain `nb_epochs`, `learning_rate`,
                 `batch_size`
                 If save is True, should also contain 'train_dir'
                 and 'filename'
    :param rng: Instance of numpy.random.RandomState
    :return: True if model trained
    """
    args = _ArgsWrapper(args or {})

    train_writer = tf.summary.FileWriter('./logs/train', sess.graph)

    # Check that necessary arguments were given (see doc above)
    assert args.nb_epochs, "Number of epochs was not given in args dict"
    assert args.learning_rate, "Learning rate was not given in args dict"
    assert args.batch_size, "Batch size was not given in args dict"

    if save:
        assert args.train_dir, "Directory for save was not given in args dict"
        assert args.filename, "Filename for save was not given in args dict"

    if not verbose:
        old_log_level = get_log_level(name=_logger.name)
        set_log_level(logging.WARNING, name=_logger.name)
        warnings.warn("verbose argument is deprecated and will be removed"
                      " on 2018-02-11. Instead, use utils.set_log_level()."
                      " For backward compatibility, log_level was set to"
                      " logging.WARNING (30).")

    if rng is None:
        rng = np.random.RandomState()

    # Define loss
    loss = model_loss(y, predictions, aux_loss=aux_loss)
    if predictions_adv is not None:
        loss = (loss + model_loss(y, predictions_adv, aux_loss=aux_loss)) / 2

    #XXX this is new
    if opt_type == "momentum":
        initial_learning_rate = 0.1 * args.batch_size / 128 * 0.01
        batches_per_epoch = X_train.shape[0] / args.batch_size
        global_step = tf.train.get_or_create_global_step()
        _MOMENTUM = 0.9

        # Multiply the learning rate by 0.1 at 100, 150, and 200 epochs.
        boundaries = [int(batches_per_epoch * epoch) for epoch in [100, 150, 200]]
        values = [initial_learning_rate * decay for decay in [1, 0.1, 0.01, 0.001]]
        learning_rate = tf.train.piecewise_constant(
            tf.cast(global_step, tf.int32), boundaries, values)

        # Create a tensor named learning_rate for logging purposes
        tf.identity(learning_rate, name='learning_rate')
        tf.summary.scalar('learning_rate', learning_rate)

        optimizer = tf.train.MomentumOptimizer(
            learning_rate=learning_rate,
            momentum=_MOMENTUM)
    elif opt_type == "adam":
        optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
        global_step = tf.train.get_or_create_global_step()
    else:
        raise ValueError

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_step = optimizer.minimize(loss, global_step)
    #XXX original version:
    #train_step = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
    #train_step = train_step.minimize(loss)

    with sess.as_default():
        if init_all:
            tf.global_variables_initializer().run()
        else:
            initialize_uninitialized_global_variables(sess)

        for epoch in range(args.nb_epochs):
            # Compute number of batches
            nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
            assert nb_batches * args.batch_size >= len(X_train)

            # Indices to shuffle training set
            index_shuf = list(range(len(X_train)))
            rng.shuffle(index_shuf)

            prev = time.time()
            for batch in range(nb_batches):

                # Compute batch start and end indices
                start, end = batch_indices(
                    batch, len(X_train), args.batch_size)

                # Perform one training step
                feed_dict = {x: X_train[index_shuf[start:end]],
                             y: Y_train[index_shuf[start:end]]}
                if feed is not None:
                    feed_dict.update(feed)
                if summary is None:
                    train_step.run(feed_dict=feed_dict)
                else:
                    summary_val, _ = sess.run([summary, train_step], feed_dict=feed_dict)
                    train_writer.add_summary(summary_val, batch + epoch * nb_batches)
            assert end >= len(X_train)  # Check that all examples were used
            cur = time.time()
            if verbose:
                _logger.info("Epoch " + str(epoch) + " took " +
                             str(cur - prev) + " seconds")
            if evaluate is not None:
                evaluate()

        if save:
            save_path = os.path.join(args.train_dir, args.filename)
            saver = tf.train.Saver()
            saver.save(sess, save_path)
            _logger.info("Completed model training and saved at: " +
                         str(save_path))
        else:
            _logger.info("Completed model training.")

    if not verbose:
        set_log_level(old_log_level, name=_logger.name)

    return True
Esempio n. 8
0
def model_train(sess, x_reg, x_seq, y, predictions, X_reg_train, X_seq_train, Y_train, 
	X_reg_val, X_seq_val, Y_val, predictions_adv=None, init_all=True, 
	verbose=True, feed=None, args=None):
	"""
	Train a TF graph
	:param sess: TF session to use when training the graph
	:param x: input placeholder, can be a dict for multiple inputs
	:param y: output placeholder (for labels)
	:param predictions: model output predictions
	:param X_train: numpy array with training inputs
	:param Y_train: numpy array with training outputs
	:param save: boolean controlling the save operation
	:param predictions_adv: if set with the adversarial example tensor,
							will run adversarial training
	:param init_all: (boolean) If set to true, all TF variables in the session
					 are (re)initialized, otherwise only previously
					 uninitialized variables are initialized before training.
	:param evaluate: function that is run after each training iteration
					 (typically to display the test/validation accuracy).
	:param verbose: (boolean) all print statements disabled when set to False.
	:param feed: An optional dictionary that is appended to the feeding
				 dictionary before the session runs. Can be used to feed
				 the learning phase of a Keras model for instance.
	:param args: dict or argparse `Namespace` object.
				 Should contain `nb_epochs`, `learning_rate`,
				 `batch_size`
				 If save is True, should also contain 'train_dir'
				 and 'filename'
	:return: True if model trained
	"""
	args = _FlagsWrapper(args or {})

	# Check that necessary arguments were given (see doc above)
	assert args.nb_epochs, "Number of epochs was not given in args dict"
	assert args.learning_rate, "Learning rate was not given in args dict"
	assert args.batch_size, "Batch size was not given in args dict"

	# Define loss
	loss = model_loss(y, predictions)
	if predictions_adv is not None:
		loss = (loss + model_loss(y, predictions_adv)) / 2

	train_step = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
	train_step = train_step.minimize(loss)

	with sess.as_default():
		if init_all:
			tf.global_variables_initializer().run()
		else:
			initialize_uninitialized_global_variables(sess)
		
		for epoch in six.moves.xrange(args.nb_epochs):
			if verbose:
				print("Epoch " + str(epoch))

			# Compute number of batches
			nb_batches = int(math.ceil(float(len(Y_train)) / args.batch_size))
			assert nb_batches * args.batch_size >= len(Y_train)

			prev = time.time()
			for batch in range(nb_batches):

				# Compute batch start and end indices
				start, end = batch_indices(batch, len(Y_train), args.batch_size)

				# Perform one training step
				feed_dict = {
					x_reg: X_reg_train[start:end], 
					x_seq: X_seq_train[start:end], 
					y: Y_train[start:end],
					K.learning_phase(): 1
				}

				if feed is not None:
					feed_dict.update(feed)

				train_step.run(feed_dict=feed_dict)
			assert end >= len(Y_train)  # Check that all examples were used
			cur = time.time()
			if verbose:
				print("\tEpoch took " + str(cur - prev) + " seconds")
			prev = cur

			eval_params = {'batch_size': 100}
			eval_accuracy=model_eval(sess, x_reg, x_seq, y, predictions, X_reg_val, X_seq_val, Y_val, args=eval_params)
			save_path = os.path.join(args.train_dir, args.filename)
			save_path = "%s.%s_%.04f.ckpt"%(save_path, epoch, eval_accuracy)
			saver = tf.train.Saver()
			saver.save(sess, save_path)
			print("Completed model training and saved at: " + str(save_path))


	return True
Esempio n. 9
0
def model_train(sess,
                x,
                y,
                predictions,
                X_train,
                Y_train,
                save=False,
                predictions_adv=None,
                evaluate=None,
                verbose=True,
                args=None):
    """
    Train a TF graph
    :param sess: TF session to use when training the graph
    :param x: input placeholder
    :param y: output placeholder (for labels)
    :param predictions: model output predictions
    :param X_train: numpy array with training inputs
    :param Y_train: numpy array with training outputs
    :param save: boolean controling the save operation
    :param predictions_adv: if set with the adversarial example tensor,
                            will run adversarial training
    :param args: dict or argparse `Namespace` object.
                 Should contain `nb_epochs`, `learning_rate`,
                 `batch_size`
                 If save is True, should also contain 'train_dir'
                 and 'filename'
    :return: True if model trained
    """
    args = _FlagsWrapper(args or {})

    # Check that necessary arguments were given (see doc above)
    assert args.nb_epochs, "Number of epochs was not given in args dict"
    assert args.learning_rate, "Learning rate was not given in args dict"
    assert args.batch_size, "Batch size was not given in args dict"

    if save:
        assert args.train_dir, "Directory for save was not given in args dict"
        assert args.filename, "Filename for save was not given in args dict"

    # Define loss
    loss = model_loss(y, predictions)
    if predictions_adv is not None:
        loss = (loss + model_loss(y, predictions_adv)) / 2

    train_step = tf.train.AdadeltaOptimizer(learning_rate=args.learning_rate,
                                            rho=0.95,
                                            epsilon=1e-08).minimize(loss)

    with sess.as_default():
        if hasattr(tf, "global_variables_initializer"):
            tf.global_variables_initializer().run()
        else:
            warnings.warn("Update your copy of tensorflow; future versions of "
                          "cleverhans may drop support for this version.")
            sess.run(tf.initialize_all_variables())

        for epoch in six.moves.xrange(args.nb_epochs):
            if verbose:
                print("Epoch " + str(epoch))

            # Compute number of batches
            nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
            assert nb_batches * args.batch_size >= len(X_train)

            prev = time.time()
            for batch in range(nb_batches):

                # Compute batch start and end indices
                start, end = batch_indices(batch, len(X_train),
                                           args.batch_size)

                # Perform one training step
                train_step.run(
                    feed_dict={
                        x: X_train[start:end],
                        y: Y_train[start:end],
                        keras.backend.learning_phase(): 1
                    })
            assert end >= len(X_train)  # Check that all examples were used
            cur = time.time()
            if verbose:
                print("\tEpoch took " + str(cur - prev) + " seconds")
            prev = cur
            if evaluate is not None:
                evaluate()

        if save:
            save_path = os.path.join(args.train_dir, args.filename)
            saver = tf.train.Saver()
            saver.save(sess, save_path)
            print("Completed model training and model saved at:" +
                  str(save_path))
        else:
            print("Completed model training.")

    return True
Esempio n. 10
0
def gan_train_v2(
    sess,
    x,
    y,
    predictions,
    X_train,
    Y_train,
    loss_func=None,
    optimizer=None,
    predictions_adv=None,
    init_all=True,
    evaluate=None,
    feed=None,
    args=None,
    rng=None,
    var_list=None,
):
    """
    Train a TF graph
    :param sess: TF session to use when training the graph
    :param x: input placeholder
    :param y: output placeholder (for labels)
    :param predictions: model output predictions [class_pred, source_pred]
    :param X_train: numpy array with training inputs
    :param Y_train: numpy array with training outputs
    :param trade_off: balance trade off between classifier and discriminator loss
    :param loss_func: list of loss functions [clf_loss, dic_loss]
    :param optimizer: tensorflow optimizer
    :param predictions_adv: if set with the adversarial example tensor,
                            will run adversarial training [adv_class_pred, adv_source_pred]
    :param init_all: (boolean) If set to true, all TF variables in the session
                     are (re)initialized, otherwise only previously
                     uninitialized variables are initialized before training.
    :param evaluate: function that is run after each training iteration
                     (typically to display the test/validation accuracy).
    :param feed: An optional dictionary that is appended to the feeding
                 dictionary before the session runs. Can be used to feed
                 the learning phase of a Keras model for instance.
    :param args: dict or argparse `Namespace` object.
                 Should contain `nb_epochs`, `learning_rate`,
                 `batch_size`
                 If save is True, should also contain 'train_dir'
                 and 'filename'
    :param rng: Instance of numpy.random.RandomState
    :param var_list: Optional list of parameters to train.
    :return: True if model trained
    """
    args = _ArgsWrapper(args or {})

    # Check that necessary inputs were given
    assert len(predictions) == 2, "Number of prediction inputs was not match"
    assert len(predictions_adv
               ) == 2, "Number of adversarial prediction inputs was not match"
    assert len(var_list) == 2, "Number of variable list was not match"

    # Check that necessary arguments were given (see doc above)
    assert args.nb_epochs, "Number of epochs was not given in args dict"
    assert args.batch_size, "Batch size was not given in args dict"
    assert args.trade_off, "Balance parameter was not given in args dict"
    assert args.inner_epochs, "Number of inner epochs was not given in args dict"

    # Check that necessary operators were given
    assert len(loss_func) == 2, "Number of loss function was not match"
    assert len(optimizer) == 2, "Number of optimizer was not match"

    if rng is None:
        rng = np.random.RandomState()

    # Define discriminator loss
    adv_source_loss = loss_func[1](tf.ones(shape=[tf.shape(y)[0], 1]),
                                   predictions_adv[1])
    dic_loss = (loss_func[1](tf.zeros(shape=[tf.shape(y)[0], 1]),
                             predictions[1]) + adv_source_loss) / 2

    # Define classifier loss
    class_loss = loss_func[0](y, predictions[0])
    pre_loss = (class_loss + loss_func[0](y, predictions_adv[0])) / 2
    clf_loss = pre_loss - args.trade_off * adv_source_loss

    # Add weight decay
    if args.weight_decay is not None:
        weights = []
        for var in tf.trainable_variables():
            if var.op.name.find('clf') > 0 and var.op.name.find('kernel') > 0:
                weights.append(tf.nn.l2_loss(var))
        weight_loss = args.weight_decay * tf.add_n(weights)
        pre_loss += weight_loss
        clf_loss += weight_loss

    # Define training operation
    if args.global_step is not None:
        pre_step = optimizer[0].minimize(pre_loss,
                                         var_list=var_list[0],
                                         global_step=args.global_step)
        clf_step = optimizer[0].minimize(clf_loss,
                                         var_list=var_list[0],
                                         global_step=args.global_step)
    else:
        pre_step = optimizer[0].minimize(pre_loss, var_list=var_list[0])
        clf_step = optimizer[0].minimize(clf_loss, var_list=var_list[0])
    dic_step = optimizer[1].minimize(dic_loss, var_list=var_list[1])

    with sess.as_default():
        if hasattr(tf, "global_variables_initializer"):
            if init_all:
                tf.global_variables_initializer().run()
            else:
                initialize_uninitialized_global_variables(sess)
        else:
            warnings.warn("Update your copy of tensorflow; future versions of "
                          "CleverHans may drop support for this version.")
            sess.run(tf.initialize_all_variables())

        for epoch in xrange(args.nb_epochs):
            # Compute number of batches
            nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
            assert nb_batches * args.batch_size >= len(X_train)

            # Indices to shuffle training set
            index_shuf = list(range(len(X_train)))
            rng.shuffle(index_shuf)

            prev = time.time()

            if epoch < args.pretrain_epochs:
                # Pre-train Classifier
                _logger.info("Pre-train Epoch")
                for batch in range(nb_batches):
                    # Train Classifier
                    # Compute batch start and end indices
                    start, end = batch_indices(batch, len(X_train),
                                               args.batch_size)
                    # Perform one training step
                    feed_dict = {
                        x: X_train[index_shuf[start:end]],
                        y: Y_train[index_shuf[start:end]]
                    }
                    if feed is not None:
                        feed_dict.update(feed)
                    pre_step.run(feed_dict=feed_dict)
            else:
                # GAN Training
                _logger.info("GAN-train Epoch")
                for batch in range(nb_batches):
                    # Train Discriminator
                    inner_batches = np.random.choice(nb_batches,
                                                     args.inner_epochs)
                    for inner_batch in inner_batches:
                        # Compute batch start and end indices
                        inner_start, inner_end = batch_indices(
                            inner_batch, len(X_train), args.batch_size)
                        # Perform one training step
                        feed_dict = {
                            x: X_train[index_shuf[inner_start:inner_end]],
                            y: Y_train[index_shuf[inner_start:inner_end]]
                        }
                        if feed is not None:
                            feed_dict.update(feed)
                        dic_step.run(feed_dict=feed_dict)
                    # Train Classifier
                    # Compute batch start and end indices
                    start, end = batch_indices(batch, len(X_train),
                                               args.batch_size)
                    # Perform one training step
                    feed_dict = {
                        x: X_train[index_shuf[start:end]],
                        y: Y_train[index_shuf[start:end]]
                    }
                    if feed is not None:
                        feed_dict.update(feed)
                    '''
                    clf_step.run(feed_dict=feed_dict)
                    '''
                    _, cl, dl = sess.run(
                        fetches=[clf_step, pre_loss, dic_loss],
                        feed_dict=feed_dict)

            # check loss
            _logger.info(
                "Epoch %d - Classifier Loss %4f - Discriminator Loss %4f " %
                (epoch, cl, dl))

            # Check that all examples were used
            assert end >= len(X_train)
            cur = time.time()
            _logger.info("Epoch " + str(epoch) + " took " + str(cur - prev) +
                         " seconds")

            if evaluate is not None:
                evaluate()

        _logger.info("Completed model training.")

    return True
Esempio n. 11
0
for epoch in range(args.epochs):
    # Compute number of batches
    num_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
    assert num_batches * args.batch_size >= len(X_train)

    # Indices to shuffle training set
    index_shuf = list(range(len(X_train)))
    rng.shuffle(index_shuf)

    if args.verbose:
        print("verbose: in epoch for loop, about to do batch loop")

    for batch in range(num_batches):

        # Compute batch start and end indices
        start, end = batch_indices(batch, len(X_train), args.batch_size)

        # Perform one training step
        feed_dict = {
            x: X_train[index_shuf[start:end]],
            y: Y_train[index_shuf[start:end]]
        }
        sess.run(train_step, feed_dict=feed_dict)

    feed_dict = {x: X_train, y: Y_train}
    acc_val = sess.run(acc_op, feed_dict=feed_dict)
    logger.info(f"Epoch: {epoch}, Train Acc: {acc_val:.5f}")

if args.verbose:
    print("verbose: epoch loop done")