history = model.fit(x=train_ds,
                                        epochs=num_epoch,
                                        steps_per_epoch=steps,
                                        validation_data=val_ds,
                                        validation_steps=steps,
                                        callbacks=callbacks_for_fit)
                self.assertIsInstance(history, keras.callbacks.History)

        threads = self.run_multiple_tasks_in_threads(
            _independent_worker_fn,
            cluster_spec,
            verification_callback=verification_callback)

        threads_to_join = []
        for task_type, ts in threads.items():
            # This test can finish once the worker threads complete, and thus
            # the ps threads don't need to be joined.
            if task_type == 'ps':
                continue
            threads_to_join.extend(ts)
        self.join_independent_workers(threads_to_join)
        verification_callback.verify(self)


if __name__ == '__main__':
    # Enable manual variable initialization to make sure variables are initialized
    # by `init_restore_or_wait_for_variables`.
    backend.manual_variable_initialization(True)
    with test.mock.patch.object(sys, 'exit', os._exit):
        test.main()
Пример #2
0
              x=train_ds,
              epochs=num_epoch,
              steps_per_epoch=steps,
              validation_data=val_ds,
              validation_steps=steps,
              callbacks=callbacks_for_fit)
        self.assertIsInstance(history, keras.callbacks.History)

    threads = self.run_multiple_tasks_in_threads(
        _independent_worker_fn,
        cluster_spec,
        verification_callback=verification_callback)

    threads_to_join = []
    for task_type, ts in threads.items():
      # This test can finish once the worker threads complete, and thus
      # the ps threads don't need to be joined.
      if task_type == 'ps':
        continue
      threads_to_join.extend(ts)
    self.join_independent_workers(threads_to_join)
    verification_callback.verify(self)


if __name__ == '__main__':
  # Enable manual variable initialization to make sure variables are initialized
  # by `init_restore_or_wait_for_variables`.
  backend.manual_variable_initialization(True)
  with test.mock.patch.object(sys, 'exit', os._exit):
    test.main()
Пример #3
0
def main(argv=None):

    keras.backend.set_learning_phase(1)
    manual_variable_initialization(True)

    # Create TF session and set as Keras backend session
    sess = tf.Session()
    keras.backend.set_session(sess)

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()
    assert Y_train.shape[1] == 10.
    label_smooth = .1
    Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
    y = tf.placeholder(tf.float32, shape=(None, 10))
    wrm_params = {
        'eps': 1.3,
        'ord': 2,
        'y': y,
        'steps': 15,
        'stop_gradient': FLAGS.stop_gradient
    }
    print('wrm params', wrm_params)

    if not FLAGS.skip_clean_train:
        print("Train on clean data")
        # Define TF model graph
        model = cnn_model(activation='elu')
        predictions = model(x)
        wrm = WassersteinRobustMethod(model, sess=sess)
        predictions_adv_wrm = model(wrm.generate(x, **wrm_params))

        def evaluate():
            # Evaluate the accuracy of the MNIST model on legitimate test examples
            accuracy = model_eval(sess,
                                  x,
                                  y,
                                  predictions,
                                  X_test,
                                  Y_test,
                                  args=eval_params)
            print('Test accuracy on legitimate test examples: %0.4f' %
                  accuracy)

            # Accuracy of the model on Wasserstein adversarial examples
            accuracy_adv_wass = model_eval(sess, x, y, predictions_adv_wrm, X_test, \
                                           Y_test, args=eval_params)
            print('Test accuracy on Wasserstein examples: %0.4f\n' %
                  accuracy_adv_wass)

        # Train the model
        model_train(sess, x, y, predictions, X_train, Y_train, evaluate=evaluate, \
                    args=train_params, save=False)
        model.save(FLAGS.train_dir + '/' + FLAGS.filename_erm)

        print('')
    print("Repeating the process, using Wasserstein adversarial training")
    # Redefine TF model graph
    model_adv = cnn_model(activation='elu')
    predictions_adv = model_adv(x)
    wrm2 = WassersteinRobustMethod(model_adv, sess=sess)
    predictions_adv_adv_wrm = model_adv(wrm2.generate(x, **wrm_params))

    def evaluate_adv():
        # Accuracy of adversarially trained model on legitimate test inputs
        accuracy = model_eval(sess,
                              x,
                              y,
                              predictions_adv,
                              X_test,
                              Y_test,
                              args=eval_params)
        print('Test accuracy on legitimate test examples: %0.4f' % accuracy)

        # Accuracy of the adversarially trained model on Wasserstein adversarial examples
        accuracy_adv_wass = model_eval(sess, x, y, predictions_adv_adv_wrm, \
                                       X_test, Y_test, args=eval_params)
        print('Test accuracy on Wasserstein examples: %0.4f\n' %
              accuracy_adv_wass)

    model_train(sess, x, y, predictions_adv_adv_wrm, X_train, Y_train, \
                predictions_adv=predictions_adv_adv_wrm, evaluate=evaluate_adv, \
                args=train_params, save=False)
    model_adv.save(FLAGS.train_dir + '/' + FLAGS.filename_wrm)
Пример #4
0
import tensorflow as tf
import random, pygame, signal, time
from ple.games.pixelcopter import Pixelcopter
from ple import PLE
from pygame.constants import K_w, K_s
import numpy as np
from collections import deque
from tensorflow.python.keras import backend as K

from collections import deque
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Dropout
from tensorflow.python.keras.optimizers import Adam

from tensorflow.python.keras.backend import manual_variable_initialization
manual_variable_initialization(True)


class DQNAgent:
    def __init__(self, state_size, action_size):
        self.state_size = state_size
        self.action_size = action_size
        self.learning_rate = 0.0004
        self.model = self._build_model()

    def _build_model(self):
        # Ensure to use same model architecture as that was trained and loaded
        model = Sequential()
        model.add(Dense(32, input_dim=self.state_size, activation='relu'))
        model.add(Dense(48, activation='relu'))
        model.add(Dense(32, activation='relu'))