def create_model(model_type, model_tree, freeze, dataset_dir, model_file, img_loader):
    cost = GeneralizedCost(costfunc=CrossEntropyMulti())

    if model_type == "alexnet":
        opt = create_alexnet_opt()
        layer_func = create_alexnet_layers
    elif model_type == "vgg":
        opt = create_vgg_opt()
        layer_func = create_vgg_layers
    else:
        raise NotImplementedError(model_type + " has not been implemented")

    if model_tree:
        ctree = ClassTaxonomy("Aves", "taxonomy_dict.p", dataset_dir)
        layers = created_branched(layer_func, ctree, img_loader)
        model = TaxonomicBranchModel(layers=layers)
    else:
        layers = layer_func(img_loader.nclass)
        model = Model(layers=layers)

    if freeze > 0:
        saved_model = Model(layers=layer_func(1000))
        saved_model.load_params(model_file)
        model.initialize(img_loader)
        model.initialized = False
        saved_lto = saved_model.layers.layers_to_optimize
        model_lto = model.layers.layers_to_optimize
        keep_length = len(saved_lto) - freeze * 2

        for i in range(len(saved_lto))[:keep_length]:
            model_lto[i].W[:] = saved_lto[i].W
            model_lto[i].optimize = False
        for i in range(len(model_lto))[keep_length:]:
            model_lto[i].optimize = True

        model.layers = FreezeSequential(layers)
        model.layers_to_optimize = model.layers.layers_to_optimize

    return model, cost, opt
示例#2
0
文件: inference.py 项目: Jokeren/neon
        hidden_size,
        init_glorot,
        activation=Tanh(),
        gate_activation=Logistic(),
        reset_cells=True),
    RecurrentSum(),
    Dropout(keep=0.5),
    Affine(nclass, init_glorot, bias=init_glorot, activation=Softmax())
]

# load the weights
print("Initialized the models - ")
model_new = Model(layers=layers)
print("Loading the weights from {0}".format(args.model_weights))

model_new.load_params(args.model_weights)
model_new.initialize(dataset=(sentence_length, batch_size))

# setup buffers before accepting reviews
xdev = be.zeros((sentence_length, 1), dtype=np.int32)  # bsz is 1, feature size
xbuf = np.zeros((1, sentence_length), dtype=np.int32)
oov = 2
start = 1
index_from = 3
pad_char = 0
vocab, rev_vocab = pickle.load(open(args.vocab_file, 'rb'))

while True:
    line = input('Enter a Review from testData.tsv file \n')

    # clean the input
    MergeMultistream(layers=[image_path, sent_path], merge="recurrent"),
    Dropout(keep=0.5),
    LSTM(hidden_size, init, activation=Logistic(), gate_activation=Tanh(), reset_cells=True),
    Affine(train_set.vocab_size, init, bias=init2, activation=Softmax())
]

cost = GeneralizedCostMask(costfunc=CrossEntropyMulti(usebits=True))

# configure callbacks
checkpoint_model_path = "~/image_caption2.pickle"
if args.callback_args['save_path'] is None:
    args.callback_args['save_path'] = checkpoint_model_path

if args.callback_args['serialize'] is None:
    args.callback_args['serialize'] = 1

model = Model(layers=layers)

callbacks = Callbacks(model, train_set, **args.callback_args)

opt = RMSProp(decay_rate=0.997, learning_rate=0.0005, epsilon=1e-8, gradient_clip_value=1)

# train model
model.fit(train_set, optimizer=opt, num_epochs=num_epochs, cost=cost, callbacks=callbacks)

# load model (if exited) and evaluate bleu score on test set
model.load_params(checkpoint_model_path)
test_set = ImageCaptionTest(path=data_path)
sents, targets = test_set.predict(model)
test_set.bleu_score(sents, targets)
示例#4
0
layers = [
    Conv((5, 5, nfilters[0]), bias=Constant(0.1), padding=0, **common_params),
    Pooling(2, strides=2, padding=0),
    Conv((5, 5, nfilters[1]), bias=Constant(0.1), padding=0, **common_params),
    Pooling(2, strides=2, padding=0),
    Affine(nout=nfilters[2], bias=Constant(0.1), **common_params),
    Affine(nout=10,
           bias=Constant(0.1),
           activation=Softmax(),
           init=Gaussian(scale=0.01))
]
model = Model(layers=layers)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())

model.initialize(train_set, cost)
model.load_params('models/mnist/mnist_cnn.pkl', load_states=False)

# define optimizer
opt_w = GradientDescentMomentum(learning_rate=0.01,
                                momentum_coef=0.9,
                                wdecay=0.0005)
opt_b = GradientDescentMomentum(learning_rate=0.01, momentum_coef=0.9)
opt = MultiOptimizer({'default': opt_w, 'Bias': opt_b}, name='multiopt')

# configure callbacks
callbacks = Callbacks(model,
                      eval_set=valid_set,
                      metric=Misclassification(),
                      **args.callback_args)
callbacks.add_callback(
    TrainByStageCallback(model, valid_set, Misclassification(),
示例#5
0
class DeepQNetwork:
    def __init__(self, num_actions, args):
        # remember parameters
        self.num_actions = num_actions
        self.batch_size = args.batch_size
        self.discount_rate = args.discount_rate
        self.history_length = args.history_length
        self.screen_dim = (args.screen_height, args.screen_width)
        self.clip_error = args.clip_error
        self.min_reward = args.min_reward
        self.max_reward = args.max_reward
        self.batch_norm = args.batch_norm

        # create Neon backend
        self.be = gen_backend(backend=args.backend,
                              batch_size=args.batch_size,
                              rng_seed=args.random_seed,
                              device_id=args.device_id,
                              datatype=np.dtype(args.datatype).type,
                              stochastic_round=args.stochastic_round)

        # prepare tensors once and reuse them
        self.input_shape = (self.history_length, ) + self.screen_dim + (
            self.batch_size, )
        self.input = self.be.empty(self.input_shape)
        self.input.lshape = self.input_shape  # HACK: needed for convolutional networks
        self.targets = self.be.empty((self.num_actions, self.batch_size))

        # create model
        layers = self._createLayers(num_actions)
        self.model = Model(layers=layers)
        self.cost = GeneralizedCost(costfunc=SumSquared())
        # Bug fix
        for l in self.model.layers.layers:
            l.parallelism = 'Disabled'
        self.model.initialize(self.input_shape[:-1], self.cost)
        if args.optimizer == 'rmsprop':
            self.optimizer = RMSProp(learning_rate=args.learning_rate,
                                     decay_rate=args.decay_rate,
                                     stochastic_round=args.stochastic_round)
        elif args.optimizer == 'adam':
            self.optimizer = Adam(learning_rate=args.learning_rate,
                                  stochastic_round=args.stochastic_round)
        elif args.optimizer == 'adadelta':
            self.optimizer = Adadelta(decay=args.decay_rate,
                                      stochastic_round=args.stochastic_round)
        else:
            assert false, "Unknown optimizer"

        # create target model
        self.target_steps = args.target_steps
        self.train_iterations = 0
        if self.target_steps:
            self.target_model = Model(layers=self._createLayers(num_actions))
            # Bug fix
            for l in self.target_model.layers.layers:
                l.parallelism = 'Disabled'
            self.target_model.initialize(self.input_shape[:-1])
            self.save_weights_prefix = args.save_weights_prefix
        else:
            self.target_model = self.model

        self.callback = None

    def _createLayers(self, num_actions):
        # create network
        init_norm = Gaussian(loc=0.0, scale=0.01)
        layers = []
        # The first hidden layer convolves 32 filters of 8x8 with stride 4 with the input image and applies a rectifier nonlinearity.
        layers.append(
            Conv((8, 8, 32),
                 strides=4,
                 init=init_norm,
                 activation=Rectlin(),
                 batch_norm=self.batch_norm))
        # The second hidden layer convolves 64 filters of 4x4 with stride 2, again followed by a rectifier nonlinearity.
        layers.append(
            Conv((4, 4, 64),
                 strides=2,
                 init=init_norm,
                 activation=Rectlin(),
                 batch_norm=self.batch_norm))
        # This is followed by a third convolutional layer that convolves 64 filters of 3x3 with stride 1 followed by a rectifier.
        layers.append(
            Conv((3, 3, 64),
                 strides=1,
                 init=init_norm,
                 activation=Rectlin(),
                 batch_norm=self.batch_norm))
        # The final hidden layer is fully-connected and consists of 512 rectifier units.
        layers.append(
            Affine(nout=512,
                   init=init_norm,
                   activation=Rectlin(),
                   batch_norm=self.batch_norm))
        # The output layer is a fully-connected linear layer with a single output for each valid action.
        layers.append(Affine(nout=num_actions, init=init_norm))
        return layers

    def _setInput(self, states):
        # change order of axes to match what Neon expects
        states = np.transpose(states, axes=(1, 2, 3, 0))
        # copy() shouldn't be necessary here, but Neon doesn't work otherwise
        self.input.set(states.copy())
        # normalize network input between 0 and 1
        self.be.divide(self.input, 255, self.input)

    def train(self, minibatch, epoch):
        # expand components of minibatch
        prestates, actions, rewards, poststates, terminals = minibatch
        assert len(prestates.shape) == 4
        assert len(poststates.shape) == 4
        assert len(actions.shape) == 1
        assert len(rewards.shape) == 1
        assert len(terminals.shape) == 1
        assert prestates.shape == poststates.shape
        assert prestates.shape[0] == actions.shape[0] == rewards.shape[
            0] == poststates.shape[0] == terminals.shape[0]

        if self.target_steps and self.train_iterations % self.target_steps == 0:
            # have to serialize also states for batch normalization to work
            pdict = self.model.get_description(get_weights=True,
                                               keep_states=True)
            self.target_model.deserialize(pdict, load_states=True)

        # feed-forward pass for poststates to get Q-values
        self._setInput(poststates)
        postq = self.target_model.fprop(self.input, inference=True)
        assert postq.shape == (self.num_actions, self.batch_size)

        # calculate max Q-value for each poststate
        maxpostq = self.be.max(postq, axis=0).asnumpyarray()
        assert maxpostq.shape == (1, self.batch_size)

        # feed-forward pass for prestates
        self._setInput(prestates)
        preq = self.model.fprop(self.input, inference=False)
        assert preq.shape == (self.num_actions, self.batch_size)

        # make copy of prestate Q-values as targets
        targets = preq.asnumpyarray()

        # clip rewards between -1 and 1
        rewards = np.clip(rewards, self.min_reward, self.max_reward)

        # update Q-value targets for actions taken
        for i, action in enumerate(actions):
            if terminals[i]:
                targets[action, i] = float(rewards[i])
            else:
                targets[action, i] = float(
                    rewards[i]) + self.discount_rate * maxpostq[0, i]

        # copy targets to GPU memory
        self.targets.set(targets)

        # calculate errors
        deltas = self.cost.get_errors(preq, self.targets)
        assert deltas.shape == (self.num_actions, self.batch_size)
        #assert np.count_nonzero(deltas.asnumpyarray()) == 32

        # calculate cost, just in case
        cost = self.cost.get_cost(preq, self.targets)
        assert cost.shape == (1, 1)

        # clip errors
        if self.clip_error:
            self.be.clip(deltas, -self.clip_error, self.clip_error, out=deltas)

        # perform back-propagation of gradients
        self.model.bprop(deltas)

        # perform optimization
        self.optimizer.optimize(self.model.layers_to_optimize, epoch)

        # increase number of weight updates (needed for target clone interval)
        self.train_iterations += 1

        # calculate statistics
        if self.callback:
            self.callback.on_train(cost.asnumpyarray()[0, 0])

    def predict(self, states):
        # minibatch is full size, because Neon doesn't let change the minibatch size
        assert states.shape == ((
            self.batch_size,
            self.history_length,
        ) + self.screen_dim)

        # calculate Q-values for the states
        self._setInput(states)
        qvalues = self.model.fprop(self.input, inference=True)
        assert qvalues.shape == (self.num_actions, self.batch_size)
        if logger.isEnabledFor(logging.DEBUG):
            logger.debug("Q-values: " + str(qvalues.asnumpyarray()[:, 0]))

        # transpose the result, so that batch size is first dimension
        return qvalues.T.asnumpyarray()

    def load_weights(self, load_path):
        self.model.load_params(load_path)

    def save_weights(self, save_path):
        self.model.save_params(save_path)
示例#6
0
# scale LR by 0.1 every 20 epochs (this assumes batch_size = 256)
weight_sched = Schedule(20, 0.1)
opt_gdm = GradientDescentMomentum(0.01,
                                  0.9,
                                  wdecay=0.0005,
                                  schedule=weight_sched)
opt_biases = GradientDescentMomentum(0.02, 0.9, schedule=weight_sched)
opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})

# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model,
                      eval_set=test,
                      metric=valmetric,
                      **args.callback_args)

if args.model_file is not None:
    model.load_params(args.model_file)
if not args.test_only:
    cost = GeneralizedCost(costfunc=CrossEntropyMulti())
    model.fit(train,
              optimizer=opt,
              num_epochs=args.epochs,
              cost=cost,
              callbacks=callbacks)

mets = model.eval(test, metric=valmetric)
print 'Validation set metrics:'
print 'LogLoss: %.2f, Accuracy: %.1f %% (Top-1), %.1f %% (Top-5)' % (
    mets[0], (1.0 - mets[1]) * 100, (1.0 - mets[2]) * 100)
示例#7
0
class DeepQNetwork:
  def __init__(self, num_actions, args):
    # remember parameters
    self.num_actions = num_actions
    self.batch_size = args.batch_size
    self.discount_rate = args.discount_rate
    self.history_length = args.history_length
    self.screen_dim = (args.screen_height, args.screen_width)
    self.clip_error = args.clip_error
    self.min_reward = args.min_reward
    self.max_reward = args.max_reward
    self.batch_norm = args.batch_norm

    # create Neon backend
    self.be = gen_backend(backend = args.backend,
                 batch_size = args.batch_size,
                 rng_seed = args.random_seed,
                 device_id = args.device_id,
                 datatype = np.dtype(args.datatype).type,
                 stochastic_round = args.stochastic_round)

    # prepare tensors once and reuse them
    self.input_shape = (self.history_length,) + self.screen_dim + (self.batch_size,)
    self.input = self.be.empty(self.input_shape)
    self.input.lshape = self.input_shape # HACK: needed for convolutional networks
    self.targets = self.be.empty((self.num_actions, self.batch_size))

    # create model
    layers = self._createLayers(num_actions)
    self.model = Model(layers = layers)
    self.cost = GeneralizedCost(costfunc = SumSquared())
    # Bug fix
    for l in self.model.layers.layers:
      l.parallelism = 'Disabled'
    self.model.initialize(self.input_shape[:-1], self.cost)
    if args.optimizer == 'rmsprop':
      self.optimizer = RMSProp(learning_rate = args.learning_rate,
          decay_rate = args.decay_rate,
          stochastic_round = args.stochastic_round)
    elif args.optimizer == 'adam':
      self.optimizer = Adam(learning_rate = args.learning_rate,
          stochastic_round = args.stochastic_round)
    elif args.optimizer == 'adadelta':
      self.optimizer = Adadelta(decay = args.decay_rate,
          stochastic_round = args.stochastic_round)
    else:
      assert false, "Unknown optimizer"

    # create target model
    self.target_steps = args.target_steps
    self.train_iterations = 0
    if self.target_steps:
      self.target_model = Model(layers = self._createLayers(num_actions))
      # Bug fix
      for l in self.target_model.layers.layers:
        l.parallelism = 'Disabled'
      self.target_model.initialize(self.input_shape[:-1])
      self.save_weights_prefix = args.save_weights_prefix
    else:
      self.target_model = self.model

    self.callback = None

  def _createLayers(self, num_actions):
    # create network
    init_norm = Gaussian(loc=0.0, scale=0.01)
    layers = []
    # The first hidden layer convolves 32 filters of 8x8 with stride 4 with the input image and applies a rectifier nonlinearity.
    layers.append(Conv((8, 8, 32), strides=4, init=init_norm, activation=Rectlin(), batch_norm=self.batch_norm))
    # The second hidden layer convolves 64 filters of 4x4 with stride 2, again followed by a rectifier nonlinearity.
    layers.append(Conv((4, 4, 64), strides=2, init=init_norm, activation=Rectlin(), batch_norm=self.batch_norm))
    # This is followed by a third convolutional layer that convolves 64 filters of 3x3 with stride 1 followed by a rectifier.
    layers.append(Conv((3, 3, 64), strides=1, init=init_norm, activation=Rectlin(), batch_norm=self.batch_norm))
    # The final hidden layer is fully-connected and consists of 512 rectifier units.
    layers.append(Affine(nout=512, init=init_norm, activation=Rectlin(), batch_norm=self.batch_norm))
    # The output layer is a fully-connected linear layer with a single output for each valid action.
    layers.append(Affine(nout=num_actions, init = init_norm))
    return layers

  def _setInput(self, states):
    # change order of axes to match what Neon expects
    states = np.transpose(states, axes = (1, 2, 3, 0))
    # copy() shouldn't be necessary here, but Neon doesn't work otherwise
    self.input.set(states.copy())
    # normalize network input between 0 and 1
    self.be.divide(self.input, 255, self.input)

  def train(self, minibatch, epoch):
    # expand components of minibatch
    prestates, actions, rewards, poststates, terminals = minibatch
    assert len(prestates.shape) == 4
    assert len(poststates.shape) == 4
    assert len(actions.shape) == 1
    assert len(rewards.shape) == 1
    assert len(terminals.shape) == 1
    assert prestates.shape == poststates.shape
    assert prestates.shape[0] == actions.shape[0] == rewards.shape[0] == poststates.shape[0] == terminals.shape[0]

    if self.target_steps and self.train_iterations % self.target_steps == 0:
      # have to serialize also states for batch normalization to work
      pdict = self.model.get_description(get_weights=True, keep_states=True)
      self.target_model.deserialize(pdict, load_states=True)

    # feed-forward pass for poststates to get Q-values
    self._setInput(poststates)
    postq = self.target_model.fprop(self.input, inference = True)
    assert postq.shape == (self.num_actions, self.batch_size)

    # calculate max Q-value for each poststate
    maxpostq = self.be.max(postq, axis=0).asnumpyarray()
    assert maxpostq.shape == (1, self.batch_size)

    # feed-forward pass for prestates
    self._setInput(prestates)
    preq = self.model.fprop(self.input, inference = False)
    assert preq.shape == (self.num_actions, self.batch_size)

    # make copy of prestate Q-values as targets
    # It seems neccessary for cpu backend.
    targets = preq.asnumpyarray().copy()

    # clip rewards between -1 and 1
    rewards = np.clip(rewards, self.min_reward, self.max_reward)

    # update Q-value targets for actions taken
    for i, action in enumerate(actions):
      if terminals[i]:
        targets[action, i] = float(rewards[i])
      else:
        targets[action, i] = float(rewards[i]) + self.discount_rate * maxpostq[0,i]

    # copy targets to GPU memory
    self.targets.set(targets)

    # calculate errors
    deltas = self.cost.get_errors(preq, self.targets)
    assert deltas.shape == (self.num_actions, self.batch_size)
    #assert np.count_nonzero(deltas.asnumpyarray()) == 32

    # calculate cost, just in case
    cost = self.cost.get_cost(preq, self.targets)
    assert cost.shape == (1,1)

    # clip errors
    if self.clip_error:
      self.be.clip(deltas, -self.clip_error, self.clip_error, out = deltas)

    # perform back-propagation of gradients
    self.model.bprop(deltas)

    # perform optimization
    self.optimizer.optimize(self.model.layers_to_optimize, epoch)

    # increase number of weight updates (needed for target clone interval)
    self.train_iterations += 1

    # calculate statistics
    if self.callback:
      self.callback.on_train(cost[0,0])

  def predict(self, states):
    # minibatch is full size, because Neon doesn't let change the minibatch size
    assert states.shape == ((self.batch_size, self.history_length,) + self.screen_dim)

    # calculate Q-values for the states
    self._setInput(states)
    qvalues = self.model.fprop(self.input, inference = True)
    assert qvalues.shape == (self.num_actions, self.batch_size)
    if logger.isEnabledFor(logging.DEBUG):
      logger.debug("Q-values: " + str(qvalues.asnumpyarray()[:,0]))

    # transpose the result, so that batch size is first dimension
    return qvalues.T.asnumpyarray()

  def load_weights(self, load_path):
    self.model.load_params(load_path)

  def save_weights(self, save_path):
    self.model.save_params(save_path)
示例#8
0
image_dir = my_dir + "test/image/"
class_names = ["forward", "left", "right", "backward"]  # from ROBOT-C bot.c
nclasses = len(class_names)
be = gen_backend(backend='cpu', batch_size=1)  # NN backend
init_uni = Uniform(low=-0.1, high=0.1)  # Unnecessary NN weight initialization
bn = True  # enable NN batch normalization
layers = [
    Conv((3, 3, 16), init=init_uni, activation=Rectlin(), batch_norm=bn),
    Pooling((2, 2)),
    Conv((3, 3, 32), init=init_uni, activation=Rectlin(), batch_norm=bn),
    Pooling((2, 2)),
    Affine(nout=50, init=init_uni, activation=Rectlin(), batch_norm=bn),
    Affine(nout=nclasses, init=init_uni, activation=Softmax())
]
model = Model(layers=layers)
model.load_params(param_file_name, load_states=False)

# Load images to classify
W = img_size
H = img_size
L = W * H * 3
size = H, W


def test_recognition(test_file_name):
    # Load image
    image = Image.open(test_file_name)
    image.show()
    print("Loaded " + test_file_name)

    # Convert image to sample
示例#9
0
class DQNNeon(Learner):
    """ This class is an implementation of the DQN network based on Neon.

    The modules that interact with the agent, the replay memory and the
    statistic calls are implemented here, taking the individual requirements
    of the Lasagne framework into account. The code is adapted from:
    https://github.com/tambetm/simple_dqn

    Attributes:
        input_shape (tuple[int]): Dimension of the network input.
        dummy_batch (numpy.ndarray): Dummy batche used to calculate Q-values for single states.
        batch_norm (bool): Indicates if normalization is wanted for a certain layer (default=False).
        be (neon.backends.nervanagpu.NervanaGPU): Describes the backend for the Neon implementation.
        input (neon.backends.nervanagpu.GPUTensor): Definition of network input shape.
        targets(neon.backends.nervanagpu.GPUTensor): Definition of network output shape.
        model (neon.models.model.Model): Generated Neon model.
        target_model (neon.models.model.Model): Generated target Neon model.
        cost_func (neon.layers.layer.GeneralizedCost): Cost function for model training.
        callback (Statistics): Hook for the statistics object to pass train and test information.

    Note:
        More attributes of this class are defined in the base class Learner.
    """

    def __init__(self, env, args, rng, name = "DQNNeon"):
        """ Initializes a network based on the Neon framework.

        Args:
            env (AtariEnv): The envirnoment in which the agent actuates.
            args (argparse.Namespace): All settings either with a default value or set via command line arguments.
            rng (mtrand.RandomState): initialized Mersenne Twister pseudo-random number generator.
            name (str): The name of the network object.

        Note:
            This function should always call the base class first to initialize
            the common values for the networks.
        """
        _logger.info("Initializing new object of type " + str(type(self).__name__))
        super(DQNNeon, self).__init__(env, args, rng, name)
        self.input_shape = (self.sequence_length,) + self.frame_dims + (self.batch_size,)
        self.dummy_batch = np.zeros((self.batch_size, self.sequence_length) + self.frame_dims, dtype=np.uint8)
        self.batch_norm = args.batch_norm

        self.be = gen_backend(
                backend = args.backend,
                batch_size = args.batch_size,
                rng_seed = args.random_seed,
                device_id = args.device_id,
                datatype = np.dtype(args.datatype).type,
                stochastic_round = args.stochastic_round)

        # prepare tensors once and reuse them
        self.input = self.be.empty(self.input_shape)
        self.input.lshape = self.input_shape # HACK: needed for convolutional networks
        self.targets = self.be.empty((self.output_shape, self.batch_size))

        # create model
        layers = self._create_layer()
        self.model = Model(layers = layers)
        self.cost_func = GeneralizedCost(costfunc = SumSquared())
        # Bug fix
        for l in self.model.layers.layers:
            l.parallelism = 'Disabled'
        self.model.initialize(self.input_shape[:-1], self.cost_func)

        self._set_optimizer()

        if not self.args.load_weights == None:
            self.load_weights(self.args.load_weights)

        # create target model
        if self.target_update_frequency:
            layers = self._create_layer()
            self.target_model = Model(layers)
            # Bug fix
            for l in self.target_model.layers.layers:
                l.parallelism = 'Disabled'
            self.target_model.initialize(self.input_shape[:-1])
        else:
            self.target_model = self.model

        self.callback = None
        _logger.debug("%s" % self)

    def _create_layer(self):
        """ Build a network consistent with the DeepMind Nature paper. """
        _logger.debug("Output shape = %d" % self.output_shape)
        # create network
        init_norm = Gaussian(loc=0.0, scale=0.01)
        layers = []
        # The first hidden layer convolves 32 filters of 8x8 with stride 4 with the input image and applies a rectifier nonlinearity.
        layers.append(
                Conv((8, 8, 32),
                strides=4,
                init=init_norm,
                activation=Rectlin(),
                batch_norm=self.batch_norm))
        # The second hidden layer convolves 64 filters of 4x4 with stride 2, again followed by a rectifier nonlinearity.
        layers.append(
                Conv((4, 4, 64),
                strides=2,
                init=init_norm,
                activation=Rectlin(),
                batch_norm=self.batch_norm))
        # This is followed by a third convolutional layer that convolves 64 filters of 3x3 with stride 1 followed by a rectifier.
        layers.append(
                Conv((3, 3, 64),
                strides=1,
                init=init_norm,
                activation=Rectlin(),
                batch_norm=self.batch_norm))
        # The final hidden layer is fully-connected and consists of 512 rectifier units.
        layers.append(
                Affine(
                    nout=512,
                    init=init_norm,
                    activation=Rectlin(),
                    batch_norm=self.batch_norm))
        # The output layer is a fully-connected linear layer with a single output for each valid action.
        layers.append(
                Affine(
                    nout= self.output_shape,
                    init = init_norm))
        return layers

    def _set_optimizer(self):
        """ Initializes the selected optimization algorithm. """
        _logger.debug("Optimizer = %s" % str(self.args.optimizer))
        if self.args.optimizer == 'rmsprop':
            self.optimizer = RMSProp(
                    learning_rate = self.args.learning_rate,
                    decay_rate = self.args.decay_rate,
                    stochastic_round = self.args.stochastic_round)
        elif self.args.optimizer == 'adam':
            self.optimizer = Adam(
                    learning_rate = self.args.learning_rate,
                    stochastic_round = self.args.stochastic_round)
        elif self.args.optimizer == 'adadelta':
            self.optimizer = Adadelta(
                    decay = self.args.decay_rate,
                    stochastic_round = self.args.stochastic_round)
        else:
            assert false, "Unknown optimizer"

    def _prepare_network_input(self, states):
        """ Transforms and normalizes the states from one minibatch.

        Args:
            states (): a set of states with the size of minibatch
        """
        _logger.debug("Normalizing and transforming input")
        # change order of axes to match what Neon expects
        states = np.transpose(states, axes = (1, 2, 3, 0))
        # copy() shouldn't be necessary here, but Neon doesn't work otherwise
        self.input.set(states.copy())
        # normalize network input between 0 and 1
        self.be.divide(self.input, self.grayscales, self.input)

    def train(self, minibatch, epoch):
        """ Prepare, perform and document a complete train step for one minibatch.

        Args:
            minibatch (numpy.ndarray): Mini-batch of states, shape=(batch_size,sequence_length,frame_width,frame_height)
            epoch (int): Current train epoch
        """
        _logger.debug("Complete trainig step for one minibatch")
        prestates, actions, rewards, poststates, terminals = minibatch
        assert len(prestates.shape) == 4
        assert len(poststates.shape) == 4
        assert len(actions.shape) == 1
        assert len(rewards.shape) == 1
        assert len(terminals.shape) == 1
        assert prestates.shape == poststates.shape
        assert prestates.shape[0] == actions.shape[0] == rewards.shape[0] == poststates.shape[0] == terminals.shape[0]
        # feed-forward pass for poststates to get Q-values
        self._prepare_network_input(poststates)
        postq = self.target_model.fprop(self.input, inference = True)
        assert postq.shape == (self.output_shape, self.batch_size)
        # calculate max Q-value for each poststate
        maxpostq = self.be.max(postq, axis=0).asnumpyarray()
        assert maxpostq.shape == (1, self.batch_size)
        # average maxpostq for stats
        maxpostq_avg = maxpostq.mean()
        # feed-forward pass for prestates
        self._prepare_network_input(prestates)
        preq = self.model.fprop(self.input, inference = False)
        assert preq.shape == (self.output_shape, self.batch_size)
        # make copy of prestate Q-values as targets
        targets = preq.asnumpyarray()
        # clip rewards between -1 and 1
        rewards = np.clip(rewards, self.min_reward, self.max_reward)
        # update Q-value targets for each state only at actions taken
        for i, action in enumerate(actions):
            if terminals[i]:
                targets[action, i] = float(rewards[i])
            else:
                targets[action, i] = float(rewards[i]) + self.discount_rate * maxpostq[0,i]
        # copy targets to GPU memory
        self.targets.set(targets)
        # calculate errors
        errors = self.cost_func.get_errors(preq, self.targets)
        assert errors.shape == (self.output_shape, self.batch_size)
        # average error where there is a error (should be 1 in every row)
        #TODO: errors_avg = np.sum(errors)/np.size(errors[errors>0.])
        # clip errors
        if self.clip_error:
            self.be.clip(errors, -self.clip_error, self.clip_error, out = errors)
        # calculate cost, just in case
        cost = self.cost_func.get_cost(preq, self.targets)
        assert cost.shape == (1,1)
        # perform back-propagation of gradients
        self.model.bprop(errors)
        # perform optimization
        self.optimizer.optimize(self.model.layers_to_optimize, epoch)
        # increase number of weight updates (needed for target clone interval)
        self.update_iterations += 1
        if self.target_update_frequency and self.update_iterations % self.target_update_frequency == 0:
            self._copy_theta()
            _logger.info("Network update #%d: Cost = %s, Avg Max Q-value = %s" % (self.update_iterations, str(cost.asnumpyarray()[0][0]), str(maxpostq_avg)))
        # update statistics
        if self.callback:
            self.callback.from_learner(cost.asnumpyarray()[0,0], maxpostq_avg)

    def get_Q(self, state):
        """ Calculates the Q-values for one mini-batch.

        Args:
            state(numpy.ndarray): Single state, shape=(sequence_length,frame_width,frame_height).

        Returns:
            q_values (numpy.ndarray): Results for first element of mini-batch from one forward pass through the network, shape=(self.output_shape,)
        """
        _logger.debug("State shape = %s" % str(state.shape))
        # minibatch is full size, because Neon doesn't let change the minibatch size
        # so we need to run 32 forward steps to get the one we actually want
        self.dummy_batch[0] = state
        states = self.dummy_batch
        assert states.shape == ((self.batch_size, self.sequence_length,) + self.frame_dims)
        # calculate Q-values for the states
        self._prepare_network_input(states)
        qvalues = self.model.fprop(self.input, inference = True)
        assert qvalues.shape == (self.output_shape, self.batch_size)
        _logger.debug("Qvalues: %s" % (str(qvalues.asnumpyarray()[:,0])))
        return qvalues.asnumpyarray()[:,0]

    def _copy_theta(self):
        """ Copies the weights of the current network to the target network. """
        _logger.debug("Copying weights")
        pdict = self.model.get_description(get_weights=True, keep_states=True)
        self.target_model.deserialize(pdict, load_states=True)

    def save_weights(self, target_dir, epoch):
        """ Saves the current network parameters to disk.

        Args:
            target_dir (str): Directory where the network parameters are stored for each episode.
            epoch (int): Current epoch.
        """
        filename = "%s_%s_%s_%d.prm" % (str(self.args.game.lower()), str(self.args.net_type.lower()), str(self.args.optimizer.lower()), (epoch + 1))
        self.model.save_params(os.path.join(target_dir, filename))

    def load_weights(self, source_file):
        """ Loads the network parameters from a given file.

        Args:
            source_file (str): Complete path to a file with network parameters.
        """
        self.model.load_params(source_file)
示例#10
0
opt_bias = GradientDescentMomentum(0.002, 0.9)
opt_bias_class = GradientDescentMomentum(0.02, 0.9)

# set up the mapping of layers to optimizers
opt = MultiOptimizer({'default': opt_vgg, 'Bias': opt_bias,
     'class_layer': opt_class_layer, 'class_layer_bias': opt_bias_class})

# use cross-entropy cost to train the network
cost = GeneralizedCost(costfunc=CrossEntropyMulti())

lunaModel = Model(layers=vgg_layers)

if args.model_file:
    import os
    assert os.path.exists(args.model_file), '%s not found' % args.model_file
    lunaModel.load_params(args.model_file)

# configure callbacks
#callbacks = Callbacks(lunaModel, eval_set=valid_set, **args.callback_args)
callbacks = Callbacks(lunaModel, eval_set=valid_set, metric=Misclassification(), **args.callback_args)

if args.deconv:
    callbacks.add_deconv_callback(train_set, valid_set)

lunaModel.fit(train_set, optimizer=opt, num_epochs=num_epochs,
        cost=cost, callbacks=callbacks)

lunaModel.save_params('LUNA16_VGG_model.prm')

neon_logger.display('Finished training. Calculating error on the validation set...')
neon_logger.display('Misclassification error (validation) = {:.2f}%'.format(lunaModel.eval(valid_set, metric=Misclassification())[0] * 100))
示例#11
0
from neon.layers import Conv, Affine, Pooling
from neon.initializers import Uniform
from neon.transforms.activation import Rectlin, Softmax
init_uni = Uniform(low=-0.1, high=0.1)
layers = [Conv(fshape=(5,5,16), init=init_uni, activation=Rectlin()),
          Pooling(fshape=2, strides=2),
          Conv(fshape=(5,5,32), init=init_uni, activation=Rectlin()),
          Pooling(fshape=2, strides=2),
          Affine(nout=500, init=init_uni, activation=Rectlin()),
          Affine(nout=10, init=init_uni, activation=Softmax())]

print("Before running this script, run my_cifar_train.py to train a CIFAR10 model")
print("Loading pre-trained CIFAR10 model")
from neon.models import Model
model = Model(layers)
model.load_params("cifar10_model.prm", load_states=False)

classes =["airplane", "automobile", "bird", "cat", "deer",
          "dog", "frog", "horse", "ship", "truck"]
nclass = len(classes)


# Sanity check 1
# an image of a frog from wikipedia
# image_source = "https://upload.wikimedia.org/wikipedia/commons/thumb/5/55/Atelopus_zeteki1.jpg/440px-Atelopus_zeteki1.jpg"
# import urllib
# urllib.urlretrieve(image_source, filename="image.jpg")

# crop and resize to 32x32
from PIL import Image
import numpy as np
示例#12
0
# Now construct the network
from neon.layers import ColorNoise
#layers = [ColorNoise()]

# layers.append(Affine(nout=100, init=Kaiming(local=False), batch_norm=True, activation=Softmax()))

scales = [112, 128, 160, 240]

for scale in scales:
    print scale

    layers = []
    layers += [Conv(**conv_params(7, 32, 2))]
    for nfm, stride in zip(nfms, strides):
        layers.append(module_factory(nfm, stride))
    layers.append(Pooling(7, op='avg'))

    layers.append(Conv(fshape=(1,1,100), init=Kaiming(local=True), batch_norm=True))
    layers.append(Pooling(fshape='all', op='avg'))
    layers.append(Activation(Softmax()))

    model = Model(layers=layers)
    test = ImageLoader(set_name='validation', shuffle=False, do_transforms=False, inner_size=scale,
                       scale_range=scale, repo_dir=args.data_dir)

    model.load_params("/home/users/hunter/bigfeat_dropout.pkl")

    softmaxes = model.get_outputs(test)
    from neon.util.persist import save_obj
    save_obj(softmaxes, "bigfeat_dropout_SM_{}.pkl".format(scale))
示例#13
0
    Conv((3, 3, 96), **convp1),
    Conv((3, 3, 96), **convp1s2),
    Dropout(keep=.5),
    Conv((3, 3, 192), **convp1),
    Conv((3, 3, 192), **convp1),
    Conv((3, 3, 192), **convp1s2),
    Dropout(keep=.5),
    Conv((3, 3, 192), **convp1),
    Conv((1, 1, 192), **conv),
    Conv((1, 1, 16), **conv),
    Pooling(8, op="avg"),
    Activation(Softmax())
]

mlp = Model(layers=layers)
mlp.load_params("~/neon/examples/cifar10_allcnn_e350.p")

model_description = mlp.get_description(get_weights=True)
layers = model_description["model"]["config"]["layers"]


def ignore_dropout(layers, input_dims):
    if layers[0]["type"] == "neon.layers.layer.Dropout":
        logger.debug("\tIgnoring dropout layer:%s",
                     layers[0]["config"]["name"])
        return 1
    else:
        return 0


def convolution_neuron_layer(layers, input_dims):
示例#14
0
scales = [112, 128, 160, 240]

for scale in scales:
    print scale

    layers = []
    layers += [Conv(**conv_params(7, 32, 2))]
    for nfm, stride in zip(nfms, strides):
        layers.append(module_factory(nfm, stride))
    layers.append(Pooling(7, op='avg'))

    layers.append(
        Conv(fshape=(1, 1, 100), init=Kaiming(local=True), batch_norm=True))
    layers.append(Pooling(fshape='all', op='avg'))
    layers.append(Activation(Softmax()))

    model = Model(layers=layers)
    test = ImageLoader(set_name='validation',
                       shuffle=False,
                       do_transforms=False,
                       inner_size=scale,
                       scale_range=scale,
                       repo_dir=args.data_dir)

    model.load_params("/home/users/hunter/bigfeat_dropout.pkl")

    softmaxes = model.get_outputs(test)
    from neon.util.persist import save_obj
    save_obj(softmaxes, "bigfeat_dropout_SM_{}.pkl".format(scale))
示例#15
0
    """
    prob = prob / (prob.sum() + 1e-6)
    return np.argmax(np.random.multinomial(1, prob, 1))


# Set batch size and time_steps to 1 for generation and reset buffers
model.be.bsz = 1
time_steps = 1
num_predict = 1000

layers = [
    LSTM(hidden_size, init, activation=Logistic(), gate_activation=Tanh()),
    Affine(len(train_set.vocab), init, bias=init, activation=Softmax())
]
model_new = Model(layers=layers)
model_new.load_params(args.save_path)
model_new.initialize(dataset=(train_set.shape[0], time_steps))

# Generate text
text = []
seed_tokens = list('ROMEO:')

x = model_new.be.zeros((len(train_set.vocab), time_steps))

for s in seed_tokens:
    x.fill(0)
    x[train_set.token_to_index[s], 0] = 1
    y = model_new.fprop(x)

for i in range(num_predict):
    # Take last prediction and feed into next fprop
示例#16
0
class DeepQNetwork:
    def __init__(self,
                 num_actions,
                 batch_size=32,
                 discount_rate=0.99,
                 history_length=4,
                 cols=64,
                 rows=64,
                 clip_error=1,
                 min_reward=-1,
                 max_reward=1,
                 batch_norm=False):
        self.num_actions = num_actions
        self.batch_size = batch_size
        self.discount_rate = discount_rate
        self.history_length = history_length
        self.board_dim = (cols, rows)
        self.clip_error = clip_error
        self.min_reward = min_reward
        self.max_reward = max_reward
        self.batch_norm = batch_norm

        self.be = gen_backend(backend='gpu',
                              batch_size=self.batch_size,
                              datatype=np.dtype('float32').type)

        self.input_shape = (self.history_length, ) + self.board_dim + (
            self.batch_size, )
        self.input = self.be.empty(self.input_shape)
        self.input.lshape = self.input_shape  # hack from simple_dqn "needed for convolutional networks"
        self.targets = self.be.empty((self.num_actions, self.batch_size))

        layers = self._createLayers(self.num_actions)
        self.model = Model(layers=layers)
        self.cost = GeneralizedCost(costfunc=SumSquared())
        # for l in self.model.layers.layers:
        # 	l.parallelism = 'Disabled'
        self.model.initialize(self.input_shape[:-1], cost=self.cost)
        self.optimizer = RMSProp(learning_rate=0.002,
                                 decay_rate=0.95,
                                 stochastic_round=True)

        self.train_iterations = 0
        self.target_model = Model(layers=self._createLayers(num_actions))
        # for l in self.target_model.layers.layers:
        # 	l.parallelism = 'Disabled'
        self.target_model.initialize(self.input_shape[:-1])

        self.callback = None

    def _createLayers(self, num_actions):
        init_xavier_conv = Xavier(local=True)
        init_xavier_affine = Xavier(local=False)
        layers = []
        layers.append(
            Conv((8, 8, 32),
                 strides=4,
                 init=init_xavier_conv,
                 activation=Rectlin(),
                 batch_norm=self.batch_norm))
        layers.append(
            Conv((4, 4, 64),
                 strides=2,
                 init=init_xavier_conv,
                 activation=Rectlin(),
                 batch_norm=self.batch_norm))
        layers.append(
            Conv((2, 2, 128),
                 strides=1,
                 init=init_xavier_conv,
                 activation=Rectlin(),
                 batch_norm=self.batch_norm))
        layers.append(
            Affine(nout=256,
                   init=init_xavier_affine,
                   activation=Rectlin(),
                   batch_norm=self.batch_norm))
        layers.append(Affine(nout=num_actions, init=init_xavier_affine))
        return layers

    def _setInput(self, states):
        states = np.transpose(states, axes=(1, 2, 3, 0))
        self.input.set(states.copy())
        self.be.add(self.input, 1, self.input)
        self.be.divide(self.input, 2, self.input)

    def update_target_network(self):
        pdict = self.model.get_description(get_weights=True, keep_states=True)
        self.target_model.deserialize(pdict, load_states=True)

    def train(self, minibatch, epoch):
        prestates, actions, rewards, poststates, terminals = minibatch

        self._setInput(poststates)
        postq = self.target_model.fprop(self.input, inference=True)
        assert postq.shape == (self.num_actions, self.batch_size)

        maxpostq = self.be.max(postq, axis=0).asnumpyarray()
        assert maxpostq.shape == (1, self.batch_size)

        self._setInput(prestates)
        preq = self.model.fprop(self.input, inference=False)
        assert preq.shape == (self.num_actions, self.batch_size)

        targets = preq.asnumpyarray().copy()
        rewards = np.clip(rewards, -1, 1)

        for i, action in enumerate(actions):
            if terminals[i]:
                targets[action, i] = float(rewards[i])
            else:
                targets[action, i] = float(
                    rewards[i]) + self.discount_rate * maxpostq[0, i]

        self.targets.set(targets)

        deltas = self.cost.get_errors(preq, self.targets)
        assert deltas.shape == (self.num_actions, self.batch_size)

        cost = self.cost.get_cost(preq, self.targets)
        assert cost.shape == (1, 1)

        if self.clip_error:
            self.be.clip(deltas, -self.clip_error, self.clip_error, out=deltas)

        self.model.bprop(deltas)
        self.optimizer.optimize(self.model.layers_to_optimize, epoch)

        self.train_iterations += 1
        self.callback.on_train(cost[0, 0])

    def predict(self, states):
        assert states.shape == ((
            self.batch_size,
            self.history_length,
        ) + self.board_dim)

        self._setInput(states)
        qvalues = self.model.fprop(self.input, inference=True)
        assert qvalues.shape == (self.num_actions, self.batch_size)

        return qvalues.T.asnumpyarray()

    def load_weights(self, load_path):
        self.model.load_params(load_path)

    def save_weights(self, save_path):
        self.model.save_params(save_path)
示例#17
0
layers = [
    Conv(fshape=(5, 5, 16), init=init_uni, activation=Rectlin()),
    Pooling(fshape=2, strides=2),
    Conv(fshape=(5, 5, 32), init=init_uni, activation=Rectlin()),
    Pooling(fshape=2, strides=2),
    Affine(nout=500, init=init_uni, activation=Rectlin()),
    Affine(nout=10, init=init_uni, activation=Softmax())
]

print(
    "Before running this script, run my_cifar_train.py to train a CIFAR10 model"
)
print("Loading pre-trained CIFAR10 model")
from neon.models import Model
model = Model(layers)
model.load_params("cifar10_model.prm", load_states=False)

classes = [
    "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse",
    "ship", "truck"
]
nclass = len(classes)

# Sanity check 1
# an image of a frog from wikipedia
# image_source = "https://upload.wikimedia.org/wikipedia/commons/thumb/5/55/Atelopus_zeteki1.jpg/440px-Atelopus_zeteki1.jpg"
# import urllib
# urllib.urlretrieve(image_source, filename="image.jpg")

# crop and resize to 32x32
from PIL import Image
示例#18
0
    Conv((3, 3, nfilters[1]), bias=Constant(0.1), **convp1),
    Conv((3, 3, nfilters[1]), bias=Constant(0.1), **convp1),
    Pooling(3, strides=2, padding=1),  # 16 -> 8
    Dropout(keep=0.8),
    Conv((3, 3, nfilters[2]), bias=Constant(0.1), **convp1),
    Conv((3, 3, nfilters[2]), bias=Constant(0.1), **convp1),
    Conv((3, 3, nfilters[2]), bias=Constant(0.1), **convp1),
    Pooling(3, strides=2, padding=1),  # 8 -> 4
    Dropout(keep=0.7),
    Affine(nout=10,
           bias=Constant(0.1),
           activation=Softmax(),
           init=Gaussian(scale=0.01))
]
model = Model(layers=layers)
model.load_params('models/cifar10/cifar10vgg.pkl', load_states=False)

# define optimizer
opt_w = GradientDescentMomentum(learning_rate=0.01,
                                momentum_coef=0.9,
                                wdecay=0.0005)
opt_b = GradientDescentMomentum(learning_rate=0.01, momentum_coef=0.9)
opt = MultiOptimizer({'default': opt_w, 'Bias': opt_b}, name='multiopt')

# configure callbacks
callbacks = Callbacks(model,
                      eval_set=valid_set,
                      metric=Misclassification(),
                      **args.callback_args)
callbacks.add_callback(
    TrainByStageCallback(model,
示例#19
0
class WordseqRegressor():
    def __init__(self, pickle_model="", datadir=None):
        self.maxlen = 100
        self.n_words = 100000
        parser = NeonArgparser(__doc__)
        self.args = parser.parse_args()
        self.args.batch_size = self.batch_size = 2048  #
        self.args.deterministic = None
        self.args.rng_seed = 0
        print extract_valid_args(self.args, gen_backend)
        self.be = gen_backend(**extract_valid_args(self.args, gen_backend))

        embedding_dim = 100
        init_emb = Uniform(-0.1 / embedding_dim, 0.1 / embedding_dim)
        init_glorot = GlorotUniform()
        self.layers = [
            LookupTable(vocab_size=self.n_words,
                        embedding_dim=embedding_dim,
                        init=init_emb,
                        pad_idx=0,
                        update=True,
                        name="LookupTable"),
            Dropout(keep=0.5),
            BiLSTM(100,
                   init=init_glorot,
                   activation=Tanh(),
                   gate_activation=Logistic(),
                   reset_cells=True,
                   split_inputs=False,
                   name="BiLSTM"),
            RecurrentMean(),
            Affine(1,
                   init_glorot,
                   bias=init_glorot,
                   activation=Identity(),
                   name="Affine")
        ]

        self.wordbatch = wordbatch.WordBatch(normalize_text,
                                             n_words=self.n_words,
                                             extractors=[(wordbatch.WordSeq, {
                                                 "seq_maxlen":
                                                 self.maxlen
                                             })])

        if datadir == None:
            self.model = Model(self.layers)
            self.model.load_params(pickle_model)
            self.wordbatch = pkl.load(gzip.open(pickle_model + ".wb", 'rb'))
        else:
            self.train(datadir, pickle_model)

    def remove_unks(self, x):
        return [[self.n_words if w >= self.n_words else w for w in sen]
                for sen in x]

    def format_texts(self, texts):
        return self.remove_unks(self.wordbatch.transform(texts))

    class ThreadWithReturnValue(Thread):
        def __init__(self,
                     group=None,
                     target=None,
                     name=None,
                     args=(),
                     kwargs={},
                     Verbose=None):
            Thread.__init__(self, group, target, name, args, kwargs, Verbose)
            self._return = None

        def run(self):
            if self._Thread__target is not None:
                self._return = self._Thread__target(*self._Thread__args,
                                                    **self._Thread__kwargs)

        def join(self):
            Thread.join(self)
            return self._return

    def train(self, datadir, pickle_model=""):
        texts = []
        labels = []
        training_data = os.listdir(datadir)
        rcount = 0
        texts2 = []
        batchsize = 100000

        t = None
        for jsonfile in training_data:
            with open(datadir + "/" + jsonfile, u'r') as inputfile:
                for line in inputfile:
                    #if rcount > 1000000: break
                    try:
                        line = json.loads(line.strip())
                    except:
                        continue
                    for review in line["Reviews"]:
                        rcount += 1
                        if rcount % 100000 == 0: print rcount
                        if rcount % 8 != 0: continue
                        if "Overall" not in review["Ratings"]: continue
                        texts.append(review["Content"])
                        labels.append(
                            (float(review["Ratings"]["Overall"]) - 3) * 0.5)
                        if len(texts) % batchsize == 0:
                            if t != None: texts2.append(t.join())
                            t = self.ThreadWithReturnValue(
                                target=self.wordbatch.transform,
                                args=(texts, ))
                            t.start()
                            texts = []
        texts2.append(t.join())
        texts2.append(self.wordbatch.transform(texts))
        del (texts)
        texts = sp.vstack(texts2)

        self.wordbatch.dictionary_freeze = True

        train = [
            np.asarray(texts, dtype='int32'),
            np.asanyarray(labels, dtype='float32')
        ]
        train[1].shape = (train[1].shape[0], 1)

        num_epochs = 10
        cost = GeneralizedCost(costfunc=SumSquared())
        self.model = Model(layers=self.layers)
        optimizer = Adam(learning_rate=0.01)

        index_shuf = list(range(len(train[0])))
        random.shuffle(index_shuf)
        train[0] = np.asarray([train[0][x] for x in index_shuf], dtype='int32')
        train[1] = np.asarray([train[1][x] for x in index_shuf],
                              dtype='float32')
        train_iter = ArrayIterator(train[0],
                                   train[1],
                                   nclass=1,
                                   make_onehot=False)
        self.model.fit(train_iter,
                       optimizer=optimizer,
                       num_epochs=num_epochs,
                       cost=cost,
                       callbacks=Callbacks(self.model,
                                           **self.args.callback_args))

        if pickle_model != "":
            self.model.save_params(pickle_model)
            with gzip.open(pickle_model + ".wb", 'wb') as model_file:
                pkl.dump(self.wordbatch, model_file, protocol=2)

    def predict_batch(self, texts):
        input = np.array(self.format_texts(texts))
        output = np.zeros((texts.shape[0], 1))
        test = ArrayIterator(input, output, nclass=1, make_onehot=False)
        results = [row[0] for row in self.model.get_outputs(test)]
        return results
示例#20
0
class ModelRunnerNeon():
    def __init__(self, args, max_action_no, batch_dimension):
        self.args = args
        self.train_batch_size = args.train_batch_size
        self.discount_factor = args.discount_factor
        self.use_gpu_replay_mem = args.use_gpu_replay_mem

        self.be = gen_backend(backend='gpu', batch_size=self.train_batch_size)

        self.input_shape = (batch_dimension[1], batch_dimension[2],
                            batch_dimension[3], batch_dimension[0])
        self.input = self.be.empty(self.input_shape)
        self.input.lshape = self.input_shape  # HACK: needed for convolutional networks
        self.targets = self.be.empty((max_action_no, self.train_batch_size))

        if self.use_gpu_replay_mem:
            self.history_buffer = self.be.zeros(batch_dimension,
                                                dtype=np.uint8)
            self.input_uint8 = self.be.empty(self.input_shape, dtype=np.uint8)
        else:
            self.history_buffer = np.zeros(batch_dimension, dtype=np.float32)

        self.train_net = Model(self.create_layers(max_action_no))
        self.cost = GeneralizedCost(costfunc=SumSquared())
        # Bug fix
        for l in self.train_net.layers.layers:
            l.parallelism = 'Disabled'
        self.train_net.initialize(self.input_shape[:-1], self.cost)

        self.target_net = Model(self.create_layers(max_action_no))
        # Bug fix
        for l in self.target_net.layers.layers:
            l.parallelism = 'Disabled'
        self.target_net.initialize(self.input_shape[:-1])

        if self.args.optimizer == 'Adam':  # Adam
            self.optimizer = Adam(beta_1=args.rms_decay,
                                  beta_2=args.rms_decay,
                                  learning_rate=args.learning_rate)
        else:  # Neon RMSProp
            self.optimizer = RMSProp(decay_rate=args.rms_decay,
                                     learning_rate=args.learning_rate)

        self.max_action_no = max_action_no
        self.running = True

    def get_initializer(self, input_size):
        dnnInit = self.args.dnn_initializer
        if dnnInit == 'xavier':
            initializer = Xavier()
        elif dnnInit == 'fan_in':
            std_dev = 1.0 / math.sqrt(input_size)
            initializer = Uniform(low=-std_dev, high=std_dev)
        else:
            initializer = Gaussian(0, 0.01)
        return initializer

    def create_layers(self, max_action_no):
        layers = []

        initializer = self.get_initializer(input_size=4 * 8 * 8)
        layers.append(
            Conv(fshape=(8, 8, 32),
                 strides=4,
                 init=initializer,
                 bias=initializer,
                 activation=Rectlin()))

        initializer = self.get_initializer(input_size=32 * 4 * 4)
        layers.append(
            Conv(fshape=(4, 4, 64),
                 strides=2,
                 init=initializer,
                 bias=initializer,
                 activation=Rectlin()))

        initializer = self.get_initializer(input_size=64 * 3 * 3)
        layers.append(
            Conv(fshape=(3, 3, 64),
                 strides=1,
                 init=initializer,
                 bias=initializer,
                 activation=Rectlin()))

        initializer = self.get_initializer(input_size=7 * 7 * 64)
        layers.append(
            Affine(nout=512,
                   init=initializer,
                   bias=initializer,
                   activation=Rectlin()))

        initializer = self.get_initializer(input_size=512)
        layers.append(
            Affine(nout=max_action_no, init=initializer, bias=initializer))

        return layers

    def clip_reward(self, reward):
        if reward > self.args.clip_reward_high:
            return self.args.clip_reward_high
        elif reward < self.args.clip_reward_low:
            return self.args.clip_reward_low
        else:
            return reward

    def set_input(self, data):
        if self.use_gpu_replay_mem:
            self.be.copy_transpose(data, self.input_uint8, axes=(1, 2, 3, 0))
            self.input[:] = self.input_uint8 / 255
        else:
            self.input.set(data.transpose(1, 2, 3, 0).copy())
            self.be.divide(self.input, 255, self.input)

    def predict(self, history_buffer):
        self.set_input(history_buffer)
        output = self.train_net.fprop(self.input, inference=True)
        return output.T.asnumpyarray()[0]

    def print_weights(self):
        pass

    def train(self, minibatch, replay_memory, learning_rate, debug):
        if self.args.prioritized_replay == True:
            prestates, actions, rewards, poststates, terminals, replay_indexes, heap_indexes, weights = minibatch
        else:
            prestates, actions, rewards, poststates, terminals = minibatch

        # Get Q*(s, a) with targetNet
        self.set_input(poststates)
        post_qvalue = self.target_net.fprop(self.input,
                                            inference=True).T.asnumpyarray()

        if self.args.double_dqn == True:
            # Get Q*(s, a) with trainNet
            post_qvalue2 = self.train_net.fprop(
                self.input, inference=True).T.asnumpyarray()

        # Get Q(s, a) with trainNet
        self.set_input(prestates)
        pre_qvalue = self.train_net.fprop(self.input, inference=False)

        label = pre_qvalue.asnumpyarray().copy()
        for i in range(0, self.train_batch_size):
            if self.args.clip_reward:
                reward = self.clip_reward(rewards[i])
            else:
                reward = rewards[i]
            if terminals[i]:
                label[actions[i], i] = reward
            else:
                if self.args.double_dqn == True:
                    max_index = np.argmax(post_qvalue2[i])
                    label[actions[i],
                          i] = reward + self.discount_factor * post_qvalue[i][
                              max_index]
                else:
                    label[actions[i],
                          i] = reward + self.discount_factor * np.max(
                              post_qvalue[i])

        # copy targets to GPU memory
        self.targets.set(label)

        delta = self.cost.get_errors(pre_qvalue, self.targets)

        if self.args.prioritized_replay == True:
            delta_value = delta.asnumpyarray()
            for i in range(self.train_batch_size):
                if debug:
                    print 'weight[%s]: %.5f, delta: %.5f, newDelta: %.5f' % (
                        i, weights[i], delta_value[actions[i], i],
                        weights[i] * delta_value[actions[i], i])
                replay_memory.update_td(heap_indexes[i],
                                        abs(delta_value[actions[i], i]))
                delta_value[actions[i],
                            i] = weights[i] * delta_value[actions[i], i]
            delta.set(delta_value.copy())

        if self.args.clip_loss:
            self.be.clip(delta, -1.0, 1.0, out=delta)

        self.train_net.bprop(delta)
        self.optimizer.optimize(self.train_net.layers_to_optimize, epoch=0)

    def update_model(self):
        # have to serialize also states for batch normalization to work
        pdict = self.train_net.get_description(get_weights=True,
                                               keep_states=True)
        self.target_net.deserialize(pdict, load_states=True)
        #print ('Updated target model')

    def finish_train(self):
        self.running = False

    def load(self, file_name):
        self.train_net.load_params(file_name)
        self.update_model()

    def save(self, file_name):
        self.train_net.save_params(file_name)
示例#21
0
def test_model_serialize(backend_default, data):
    (X_train, y_train), (X_test, y_test), nclass = load_mnist(path=data)

    train_set = ArrayIterator(
        [X_train, X_train], y_train, nclass=nclass, lshape=(1, 28, 28))

    init_norm = Gaussian(loc=0.0, scale=0.01)

    # initialize model
    path1 = Sequential([Conv((5, 5, 16), init=init_norm, bias=Constant(0), activation=Rectlin()),
                        Pooling(2),
                        Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())])
    path2 = Sequential([Affine(nout=100, init=init_norm, bias=Constant(0), activation=Rectlin()),
                        Dropout(keep=0.5),
                        Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())])
    layers = [MergeMultistream(layers=[path1, path2], merge="stack"),
              Affine(nout=20, init=init_norm, batch_norm=True, activation=Rectlin()),
              Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]

    tmp_save = 'test_model_serialize_tmp_save.pickle'
    mlp = Model(layers=layers)
    mlp.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
    mlp.cost = GeneralizedCost(costfunc=CrossEntropyBinary())
    mlp.initialize(train_set, cost=mlp.cost)
    n_test = 3
    num_epochs = 3
    # Train model for num_epochs and n_test batches
    for epoch in range(num_epochs):
        for i, (x, t) in enumerate(train_set):
            x = mlp.fprop(x)
            delta = mlp.cost.get_errors(x, t)
            mlp.bprop(delta)
            mlp.optimizer.optimize(mlp.layers_to_optimize, epoch=epoch)
            if i > n_test:
                break

    # Get expected outputs of n_test batches and states of all layers
    outputs_exp = []
    pdicts_exp = [l.get_params_serialize() for l in mlp.layers_to_optimize]
    for i, (x, t) in enumerate(train_set):
        outputs_exp.append(mlp.fprop(x, inference=True))
        if i > n_test:
            break

    # Serialize model
    mlp.save_params(tmp_save, keep_states=True)

    # Load model
    mlp = Model(layers=layers)
    mlp.load_params(tmp_save)

    outputs = []
    pdicts = [l.get_params_serialize() for l in mlp.layers_to_optimize]
    for i, (x, t) in enumerate(train_set):
        outputs.append(mlp.fprop(x, inference=True))
        if i > n_test:
            break

    # Check outputs, states, and params are the same
    for output, output_exp in zip(outputs, outputs_exp):
        assert np.allclose(output.get(), output_exp.get())

    for pd, pd_exp in zip(pdicts, pdicts_exp):
        for s, s_e in zip(pd['states'], pd_exp['states']):
            if isinstance(s, list):  # this is the batch norm case
                for _s, _s_e in zip(s, s_e):
                    assert np.allclose(_s, _s_e)
            else:
                assert np.allclose(s, s_e)
        for p, p_e in zip(pd['params'], pd_exp['params']):
            assert type(p) == type(p_e)
            if isinstance(p, list):  # this is the batch norm case
                for _p, _p_e in zip(p, p_e):
                    assert np.allclose(_p, _p_e)
            elif isinstance(p, np.ndarray):
                assert np.allclose(p, p_e)
            else:
                assert p == p_e

    os.remove(tmp_save)
示例#22
0
    MergeMultistream(layers=[image_path, sent_path], merge="recurrent"),
    Dropout(keep=0.5),
    LSTM(hidden_size, init, activation=Logistic(), gate_activation=Tanh(), reset_cells=True),
    Affine(train_set.vocab_size, init, bias=init2, activation=Softmax())
]

cost = GeneralizedCostMask(costfunc=CrossEntropyMulti(usebits=True))

# configure callbacks
checkpoint_model_path = "~/image_caption2.pkl"
if args.callback_args['save_path'] is None:
    args.callback_args['save_path'] = checkpoint_model_path

if args.callback_args['serialize'] is None:
    args.callback_args['serialize'] = 1

model = Model(layers=layers)

callbacks = Callbacks(model, **args.callback_args)

opt = RMSProp(decay_rate=0.997, learning_rate=0.0005, epsilon=1e-8, gradient_clip_value=1)

# train model
model.fit(train_set, optimizer=opt, num_epochs=num_epochs, cost=cost, callbacks=callbacks)

# load model (if exited) and evaluate bleu score on test set
if os.path.exists(args.callback_args['save_path']):
    model.load_params(args.callback_args['save_path'])
sents, targets = test_set.predict(model)
test_set.bleu_score(sents, targets)
示例#23
0
layers = [
    Conv((5, 5, 24), **convp1),
    Pooling(2, op='max'),
    Conv((3, 3, 32), **convp1),
    Pooling(2, op='max'),
    Conv((3, 3, 48), **convp1),
    Dropout(keep=.6),
    Pooling(2, op='max'),
    Affine(nout=64, init=init_uni, activation=relu),
    Dropout(keep=.4),
    Affine(nout=2, init=init_uni, activation=Softmax())
]

lunaModel = Model(layers)
lunaModel.load_params('LUNA16_simpleCNN2_model.prm')

# neon_logger.display('Calculating metrics on the test set. This could take a while...')
# neon_logger.display('Misclassification error (test) = {:.2f}%'.format(lunaModel.eval(test_set, metric=Misclassification())[0] * 100))

# neon_logger.display('Precision/recall (test) = {}'.format(lunaModel.eval(test_set, metric=PrecisionRecall(num_classes=2))))
# neon_logger.display('Misclassification (test) = {}'.format(lunaModel.eval(test_set, metric=Misclassification())))
# neon_logger.display('Accuracy (test) = {}'.format(lunaModel.eval(test_set, metric=Accuracy())))
#dfOuts = pd.DataFrame(lunaModel.get_outputs(test_set))
#dfOuts.to_csv('outTest.csv')

#pr = lunaModel.eval(test_set, metric=PrecisionRecall(num_classes=2))
#print(pr.outputs)

dfTarget = pd.read_csv(testFileName, header=None, names=['file', 'label'])
ot = np.zeros(dfTarget.shape[0])
示例#24
0
    LSTM(hidden_size,
         init_glorot,
         activation=Tanh(),
         gate_activation=Logistic(),
         reset_cells=True),
    RecurrentSum(),
    Dropout(keep=0.5),
    Affine(nclass, init_glorot, bias=init_glorot, activation=Softmax())
]

# load the weights
print("Initialized the models - ")
model_new = Model(layers=layers)
print("Loading the weights from {0}".format(args.model_weights))

model_new.load_params(args.model_weights)
model_new.initialize(dataset=(sentence_length, batch_size))

# setup buffers before accepting reviews
xdev = be.zeros((sentence_length, 1), dtype=np.int32)  # bsz is 1, feature size
xbuf = np.zeros((1, sentence_length), dtype=np.int32)
oov = 2
start = 1
index_from = 3
pad_char = 0
vocab, rev_vocab = pickle.load(open(args.vocab_file, 'rb'))

while True:
    line = input('Enter a Review from testData.tsv file \n')

    # clean the input
示例#25
0
# configure callbacks
checkpoint_model_path = "~/image_caption2.pkl"
if args.callback_args['save_path'] is None:
    args.callback_args['save_path'] = checkpoint_model_path

if args.callback_args['serialize'] is None:
    args.callback_args['serialize'] = 1

model = Model(layers=layers)

callbacks = Callbacks(model, **args.callback_args)

opt = RMSProp(decay_rate=0.997,
              learning_rate=0.0005,
              epsilon=1e-8,
              gradient_clip_value=1)

# train model
model.fit(train_set,
          optimizer=opt,
          num_epochs=num_epochs,
          cost=cost,
          callbacks=callbacks)

# load model (if exited) and evaluate bleu score on test set
if os.path.exists(args.callback_args['save_path']):
    model.load_params(args.callback_args['save_path'])
test_set = ImageCaptionTest(path=data_path)
sents, targets = test_set.predict(model)
test_set.bleu_score(sents, targets)
示例#26
0
    seq_len = 1

    if return_sequences is True:
        layers = [
            LSTM(hidden, init, activation=Logistic(), gate_activation=Tanh(), reset_cells=False),
            Affine(train_set.nfeatures, init, bias=init, activation=Identity())
        ]
    else:
        layers = [
            LSTM(hidden, init, activation=Logistic(), gate_activation=Tanh(), reset_cells=False),
            RecurrentLast(),
            Affine(train_set.nfeatures, init, bias=init, activation=Identity())
        ]

    model_new = Model(layers=layers)
    model_new.load_params(args.save_path)
    model_new.initialize(dataset=(train_set.nfeatures, seq_len))

    output = np.zeros((train_set.nfeatures, num_predict))
    seed = time_series.train[:seed_seq_len]

    x = model_new.be.empty((train_set.nfeatures, seq_len))
    for s_in in seed:
        x.set(s_in.reshape(train_set.nfeatures, seq_len))
        y = model_new.fprop(x, inference=False)

    for i in range(num_predict):
        # Take last prediction and feed into next fprop
        pred = y.get()[:, -1]
        output[:, i] = pred
        x[:] = pred.reshape(train_set.nfeatures, seq_len)
示例#27
0
def test_model_serialize(backend_default, data):
    (X_train, y_train), (X_test, y_test), nclass = load_mnist(path=data)

    train_set = ArrayIterator([X_train, X_train],
                              y_train,
                              nclass=nclass,
                              lshape=(1, 28, 28))

    init_norm = Gaussian(loc=0.0, scale=0.01)

    # initialize model
    path1 = Sequential([
        Conv((5, 5, 16),
             init=init_norm,
             bias=Constant(0),
             activation=Rectlin()),
        Pooling(2),
        Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())
    ])
    path2 = Sequential([
        Affine(nout=100,
               init=init_norm,
               bias=Constant(0),
               activation=Rectlin()),
        Dropout(keep=0.5),
        Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())
    ])
    layers = [
        MergeMultistream(layers=[path1, path2], merge="stack"),
        Affine(nout=20, init=init_norm, batch_norm=True, activation=Rectlin()),
        Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))
    ]

    tmp_save = 'test_model_serialize_tmp_save.pickle'
    mlp = Model(layers=layers)
    mlp.optimizer = GradientDescentMomentum(learning_rate=0.1,
                                            momentum_coef=0.9)
    mlp.cost = GeneralizedCost(costfunc=CrossEntropyBinary())
    mlp.initialize(train_set, cost=mlp.cost)
    n_test = 3
    num_epochs = 3
    # Train model for num_epochs and n_test batches
    for epoch in range(num_epochs):
        for i, (x, t) in enumerate(train_set):
            x = mlp.fprop(x)
            delta = mlp.cost.get_errors(x, t)
            mlp.bprop(delta)
            mlp.optimizer.optimize(mlp.layers_to_optimize, epoch=epoch)
            if i > n_test:
                break

    # Get expected outputs of n_test batches and states of all layers
    outputs_exp = []
    pdicts_exp = [l.get_params_serialize() for l in mlp.layers_to_optimize]
    for i, (x, t) in enumerate(train_set):
        outputs_exp.append(mlp.fprop(x, inference=True))
        if i > n_test:
            break

    # Serialize model
    mlp.save_params(tmp_save, keep_states=True)

    # Load model
    mlp = Model(layers=layers)
    mlp.load_params(tmp_save)

    outputs = []
    pdicts = [l.get_params_serialize() for l in mlp.layers_to_optimize]
    for i, (x, t) in enumerate(train_set):
        outputs.append(mlp.fprop(x, inference=True))
        if i > n_test:
            break

    # Check outputs, states, and params are the same
    for output, output_exp in zip(outputs, outputs_exp):
        assert np.allclose(output.get(), output_exp.get())

    for pd, pd_exp in zip(pdicts, pdicts_exp):
        for s, s_e in zip(pd['states'], pd_exp['states']):
            if isinstance(s, list):  # this is the batch norm case
                for _s, _s_e in zip(s, s_e):
                    assert np.allclose(_s, _s_e)
            else:
                assert np.allclose(s, s_e)
        for p, p_e in zip(pd['params'], pd_exp['params']):
            assert type(p) == type(p_e)
            if isinstance(p, list):  # this is the batch norm case
                for _p, _p_e in zip(p, p_e):
                    assert np.allclose(_p, _p_e)
            elif isinstance(p, np.ndarray):
                assert np.allclose(p, p_e)
            else:
                assert p == p_e

    os.remove(tmp_save)
示例#28
0
valmetric = TopKMisclassification(k=5)

# dummy optimizer for benchmarking
# training implementation coming soon
opt_gdm = GradientDescentMomentum(0.0, 0.0)
opt_biases = GradientDescentMomentum(0.0, 0.0)
opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})

# setup cost function as CrossEntropy
cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti()),
                        GeneralizedCost(costfunc=CrossEntropyMulti()),
                        GeneralizedCost(costfunc=CrossEntropyMulti())],
                 weights=[1, 0., 0.])  # We only want to consider the CE of the main path

assert os.path.exists(args.model_file), 'script requires the trained weights file'
model.load_params(args.model_file)
model.initialize(test, cost)


print 'running speed benchmark...'
model.benchmark(test, cost, opt)

print '\nCalculating performance on validation set...'
test.reset()
mets = model.eval(test, metric=valmetric)
print 'Validation set metrics:'
print 'LogLoss: %.2f, Accuracy: %.1f %% (Top-1), %.1f %% (Top-5)' % (mets[0],
                                                                     (1.0-mets[1])*100,
                                                                     (1.0-mets[2])*100)
示例#29
0
param_file_name = home_dir + "/ubuntu/model/trained_bot_model_32x32.prm"
class_names = ["forward", "left", "right", "backward"]    # from ROBOT-C bot.c
nclasses = len(class_names)
size = H, W

be = gen_backend(backend='cpu', batch_size=1)    # NN backend
init_uni = Uniform(low=-0.1, high=0.1)           # Unnecessary NN weight initialization
bn = True                                        # enable NN batch normalization
layers = [Conv((5, 5, 16), init=init_uni, activation=Rectlin(), batch_norm=bn),
          Pooling((2, 2)),
          Conv((3, 3, 32), init=init_uni, activation=Rectlin(), batch_norm=bn),
          Pooling((2, 2)),
          Affine(nout=50, init=init_uni, activation=Rectlin(), batch_norm=bn),
          Affine(nout=nclasses, init=init_uni, activation=Softmax())]
model = Model(layers=layers)
model.load_params(param_file_name, load_states=False)

def usage():
    print "python connect_to_vex_cortex.py"
    print "  Raspberry Pi records video, commands from VEX Cortex 2.0"
    print "  -p " + file_name_prefix + ": file name prefix"
    print "  -d: display received commands for debug"
    print "  -w " + str(w) + ": video width"
    print "  -h " + str(h) + ": video height"
    print "  -f " + str(fps) + ": video FPS, 0 for camera default"
    print "  -q " + str(quality) + ": quality to record video, 1..40"
    print "  -b " + str(bitrate) + ": bitrate e.g. 15000000, 0 for unlimited"
    print "  -i " + str(iso) + ": ISO 0 | 100 ... 800, see picamera doc, 0 for camera default"
    print "  -m: horizontal mirror"
    print "  -v: vertical mirror"
    print "  -s: shut down system on exit (must run as super user)"
示例#30
0
class DQNNeon(Learner):
    """ This class is an implementation of the DQN network based on Neon.

    The modules that interact with the agent, the replay memory and the
    statistic calls are implemented here, taking the individual requirements
    of the Lasagne framework into account. The code is adapted from:
    https://github.com/tambetm/simple_dqn

    Attributes:
        input_shape (tuple[int]): Dimension of the network input.
        dummy_batch (numpy.ndarray): Dummy batche used to calculate Q-values for single states.
        batch_norm (bool): Indicates if normalization is wanted for a certain layer (default=False).
        be (neon.backends.nervanagpu.NervanaGPU): Describes the backend for the Neon implementation.
        input (neon.backends.nervanagpu.GPUTensor): Definition of network input shape.
        targets(neon.backends.nervanagpu.GPUTensor): Definition of network output shape.
        model (neon.models.model.Model): Generated Neon model.
        target_model (neon.models.model.Model): Generated target Neon model.
        cost_func (neon.layers.layer.GeneralizedCost): Cost function for model training.
        callback (Statistics): Hook for the statistics object to pass train and test information.

    Note:
        More attributes of this class are defined in the base class Learner.

    """

    def __init__(self, env, args, rng, name = "DQNNeon"):
        """ Initializes a network based on the Neon framework.

        Args:
            env (AtariEnv): The envirnoment in which the agent actuates.
            args (argparse.Namespace): All settings either with a default value or set via command line arguments.
            rng (mtrand.RandomState): initialized Mersenne Twister pseudo-random number generator.
            name (str): The name of the network object.

        Note:
            This function should always call the base class first to initialize
            the common values for the networks.
        """
        _logger.info("Initializing new object of type " + str(type(self).__name__))
        super(DQNNeon, self).__init__(env, args, rng, name)
        self.input_shape = (self.sequence_length,) + self.frame_dims + (self.batch_size,)
        self.dummy_batch = np.zeros((self.batch_size, self.sequence_length) + self.frame_dims, dtype=np.uint8)
        self.batch_norm = args.batch_norm

        self.be = gen_backend(
                backend = args.backend,
                batch_size = args.batch_size,
                rng_seed = args.random_seed,
                device_id = args.device_id,
                datatype = np.dtype(args.datatype).type,
                stochastic_round = args.stochastic_round)

        # prepare tensors once and reuse them
        self.input = self.be.empty(self.input_shape)
        self.input.lshape = self.input_shape # HACK: needed for convolutional networks
        self.targets = self.be.empty((self.output_shape, self.batch_size))

        # create model
        layers = self._create_layer()
        self.model = Model(layers = layers)
        self.cost_func = GeneralizedCost(costfunc = SumSquared())
        # Bug fix
        for l in self.model.layers.layers:
            l.parallelism = 'Disabled'
        self.model.initialize(self.input_shape[:-1], self.cost_func)

        self._set_optimizer()

        if not self.args.load_weights == None:
            self.load_weights(self.args.load_weights)

        # create target model
        if self.target_update_frequency:
            layers = self._create_layer()
            self.target_model = Model(layers)
            # Bug fix
            for l in self.target_model.layers.layers:
                l.parallelism = 'Disabled'
            self.target_model.initialize(self.input_shape[:-1])
        else:
            self.target_model = self.model

        self.callback = None
        _logger.debug("%s" % self)

    def _create_layer(self):
        """ Build a network consistent with the DeepMind Nature paper. """
        _logger.debug("Output shape = %d" % self.output_shape)
        # create network
        init_norm = Gaussian(loc=0.0, scale=0.01)
        layers = []
        # The first hidden layer convolves 32 filters of 8x8 with stride 4 with the input image and applies a rectifier nonlinearity.
        layers.append(
                Conv((8, 8, 32),
                strides=4,
                init=init_norm,
                activation=Rectlin(),
                batch_norm=self.batch_norm))
        # The second hidden layer convolves 64 filters of 4x4 with stride 2, again followed by a rectifier nonlinearity.
        layers.append(
                Conv((4, 4, 64),
                strides=2,
                init=init_norm,
                activation=Rectlin(),
                batch_norm=self.batch_norm))
        # This is followed by a third convolutional layer that convolves 64 filters of 3x3 with stride 1 followed by a rectifier.
        layers.append(
                Conv((3, 3, 64),
                strides=1,
                init=init_norm,
                activation=Rectlin(),
                batch_norm=self.batch_norm))
        # The final hidden layer is fully-connected and consists of 512 rectifier units.
        layers.append(
                Affine(
                    nout=512,
                    init=init_norm,
                    activation=Rectlin(),
                    batch_norm=self.batch_norm))
        # The output layer is a fully-connected linear layer with a single output for each valid action.
        layers.append(
                Affine(
                    nout= self.output_shape,
                    init = init_norm))
        return layers

    def _set_optimizer(self):
        """ Initializes the selected optimization algorithm. """
        _logger.debug("Optimizer = %s" % str(self.args.optimizer))
        if self.args.optimizer == 'rmsprop':
            self.optimizer = RMSProp(
                    learning_rate = self.args.learning_rate,
                    decay_rate = self.args.decay_rate,
                    stochastic_round = self.args.stochastic_round)
        elif self.args.optimizer == 'adam':
            self.optimizer = Adam(
                    learning_rate = self.args.learning_rate,
                    stochastic_round = self.args.stochastic_round)
        elif self.args.optimizer == 'adadelta':
            self.optimizer = Adadelta(
                    decay = self.args.decay_rate,
                    stochastic_round = self.args.stochastic_round)
        else:
            assert false, "Unknown optimizer"

    def _prepare_network_input(self, states):
        """ Transforms and normalizes the states from one minibatch.

        Args:
            states (): a set of states with the size of minibatch
        """
        _logger.debug("Normalizing and transforming input")
        # change order of axes to match what Neon expects
        states = np.transpose(states, axes = (1, 2, 3, 0))
        # copy() shouldn't be necessary here, but Neon doesn't work otherwise
        self.input.set(states.copy())
        # normalize network input between 0 and 1
        self.be.divide(self.input, self.grayscales, self.input)

    def train(self, minibatch, epoch):
        """ Prepare, perform and document a complete train step for one minibatch.

        Args:
            minibatch (numpy.ndarray): Mini-batch of states, shape=(batch_size,sequence_length,frame_width,frame_height)
            epoch (int): Current train epoch
        """
        _logger.debug("Complete trainig step for one minibatch")
        prestates, actions, rewards, poststates, terminals = minibatch
        assert len(prestates.shape) == 4
        assert len(poststates.shape) == 4
        assert len(actions.shape) == 1
        assert len(rewards.shape) == 1
        assert len(terminals.shape) == 1
        assert prestates.shape == poststates.shape
        assert prestates.shape[0] == actions.shape[0] == rewards.shape[0] == poststates.shape[0] == terminals.shape[0]
        # feed-forward pass for poststates to get Q-values
        self._prepare_network_input(poststates)
        postq = self.target_model.fprop(self.input, inference = True)
        assert postq.shape == (self.output_shape, self.batch_size)
        # calculate max Q-value for each poststate
        maxpostq = self.be.max(postq, axis=0).asnumpyarray()
        assert maxpostq.shape == (1, self.batch_size)
        # average maxpostq for stats
        maxpostq_avg = maxpostq.mean()
        # feed-forward pass for prestates
        self._prepare_network_input(prestates)
        preq = self.model.fprop(self.input, inference = False)
        assert preq.shape == (self.output_shape, self.batch_size)
        # make copy of prestate Q-values as targets
        targets = preq.asnumpyarray()
        # clip rewards between -1 and 1
        rewards = np.clip(rewards, self.min_reward, self.max_reward)
        # update Q-value targets for each state only at actions taken
        for i, action in enumerate(actions):
            if terminals[i]:
                targets[action, i] = float(rewards[i])
            else:
                targets[action, i] = float(rewards[i]) + self.discount_rate * maxpostq[0,i]
        # copy targets to GPU memory
        self.targets.set(targets)
        # calculate errors
        errors = self.cost_func.get_errors(preq, self.targets)
        assert errors.shape == (self.output_shape, self.batch_size)
        # average error where there is a error (should be 1 in every row)
        #TODO: errors_avg = np.sum(errors)/np.size(errors[errors>0.])
        # clip errors
        if self.clip_error:
            self.be.clip(errors, -self.clip_error, self.clip_error, out = errors)
        # calculate cost, just in case
        cost = self.cost_func.get_cost(preq, self.targets)
        assert cost.shape == (1,1)
        # perform back-propagation of gradients
        self.model.bprop(errors)
        # perform optimization
        self.optimizer.optimize(self.model.layers_to_optimize, epoch)
        # increase number of weight updates (needed for target clone interval)
        self.update_iterations += 1
        if self.target_update_frequency and self.update_iterations % self.target_update_frequency == 0:
            self._copy_theta()
            if isinstance(cost, np.ndarray):
                _logger.info("Network update #%d: Cost = %s, Avg Max Q-value = %s" % (self.update_iterations, str(cost[0][0]), str(maxpostq_avg)))
            else:
                _logger.info("Network update #%d: Cost = %s, Avg Max Q-value = %s" % (self.update_iterations, str(cost.asnumpyarray()[0][0]), str(maxpostq_avg)))
        # update statistics
        if self.callback:
            if isinstance(cost, np.ndarray):
                self.callback.from_learner(cost[0,0], maxpostq_avg)
            else:
                self.callback.from_learner(cost.asnumpyarray()[0,0], maxpostq_avg)

    def get_Q(self, state):
        """ Calculates the Q-values for one mini-batch.

        Args:
            state(numpy.ndarray): Single state, shape=(sequence_length,frame_width,frame_height).

        Returns:
            q_values (numpy.ndarray): Results for first element of mini-batch from one forward pass through the network, shape=(self.output_shape,)
        """
        _logger.debug("State shape = %s" % str(state.shape))
        # minibatch is full size, because Neon doesn't let change the minibatch size
        # so we need to run 32 forward steps to get the one we actually want
        self.dummy_batch[0] = state
        states = self.dummy_batch
        assert states.shape == ((self.batch_size, self.sequence_length,) + self.frame_dims)
        # calculate Q-values for the states
        self._prepare_network_input(states)
        qvalues = self.model.fprop(self.input, inference = True)
        assert qvalues.shape == (self.output_shape, self.batch_size)
        _logger.debug("Qvalues: %s" % (str(qvalues.asnumpyarray()[:,0])))
        return qvalues.asnumpyarray()[:,0]

    def _copy_theta(self):
        """ Copies the weights of the current network to the target network. """
        _logger.debug("Copying weights")
        pdict = self.model.get_description(get_weights=True, keep_states=True)
        self.target_model.deserialize(pdict, load_states=True)

    def save_weights(self, target_dir, epoch):
        """ Saves the current network parameters to disk.

        Args:
            target_dir (str): Directory where the network parameters are stored for each episode.
            epoch (int): Current epoch.
        """
        filename = "%s_%s_%s_%d.prm" % (str(self.args.game.lower()), str(self.args.learner_type.lower()), str(self.args.optimizer.lower()), (epoch + 1))
        self.model.save_params(os.path.join(target_dir, filename))

    def load_weights(self, source_file):
        """ Loads the network parameters from a given file.

        Args:
            source_file (str): Complete path to a file with network parameters.
        """
        self.model.load_params(source_file)
示例#31
0
try:
    offset_memory = replay_memory.load(bot_params.offset_data_path)
    print "offsets loaded"
except IOError:
    offset_memory = replay_memory.OffsetMemory()

init_norm = Gaussian(loc=-0.1, scale=0.1)
layers = [Linear(1, init=init_norm), Bias(init=init_norm)]

mlp = Model(layers=layers)

cost = GeneralizedCost(costfunc=SumSquared())
optimizer = GradientDescentMomentum(0.5, momentum_coef=0.9)
try:
    mlp.load_params(bot_params.aim_weights_path)
except IOError:
    print "can't load aiming weights"


def get_offset_manual(predictions):
    #enemy_pos = replay_memory.clean_values_toone(predictions)[0, 0]
    x = 0.
    c = 0
    for i in range(0, len(predictions)):
        if predictions[i] > 0.97:
            x += (i + 1)
            c += 1
            if i in (0, 1, 7, 8):
                print "BIG OFFSET", i
    enemy_pos = x / c - 5
示例#32
0
be = gen_backend(backend='cpu', batch_size=30)
init_uni = Uniform(low=-0.1, high=0.1)
layers = [
    Conv(fshape=(4, 4, 16), init=init_uni, activation=Rectlin()),
    Pooling(fshape=2, strides=2),
    Conv(fshape=(4, 4, 32), init=init_uni, activation=Rectlin()),
    Pooling(fshape=2, strides=2),
    Conv(fshape=(4, 4, 32), init=init_uni, activation=Rectlin()),
    Pooling(fshape=2, strides=2),
    Affine(nout=500, init=init_uni, activation=Rectlin()),
    Affine(nout=11, init=init_uni, activation=Softmax())
]

model = Model(layers)

model.load_params('model.pkl')
data = readfile('PreImage', 'label.csv')
X_test = data.test_data
test_set = ArrayIterator(X_test, None, nclass=11, lshape=(1, 200, 200))
true = data.test_label
out = model.get_outputs(test_set)
row = len(X_test)
pred = np.zeros((row, 1))
i = 0
while i < row:
    pred[i] = out[i].argmax()
    i = i + 1
pred = pred + 1
loss = abs(true - pred)
print(loss)
count = 0
示例#33
0
class ModelRunnerNeon():
    def __init__(self, args,  max_action_no, batch_dimension):
        self.args = args
        self.train_batch_size = args.train_batch_size
        self.discount_factor = args.discount_factor
        self.use_gpu_replay_mem = args.use_gpu_replay_mem
        
        self.be = gen_backend(backend='gpu',             
                         batch_size=self.train_batch_size)

        self.input_shape = (batch_dimension[1], batch_dimension[2], batch_dimension[3], batch_dimension[0])
        self.input = self.be.empty(self.input_shape)
        self.input.lshape = self.input_shape # HACK: needed for convolutional networks
        self.targets = self.be.empty((max_action_no, self.train_batch_size))

        if self.use_gpu_replay_mem:
            self.history_buffer = self.be.zeros(batch_dimension, dtype=np.uint8)
            self.input_uint8 = self.be.empty(self.input_shape, dtype=np.uint8)
        else:
            self.history_buffer = np.zeros(batch_dimension, dtype=np.float32)

        self.train_net = Model(self.create_layers(max_action_no))
        self.cost = GeneralizedCost(costfunc=SumSquared())
        # Bug fix
        for l in self.train_net.layers.layers:
            l.parallelism = 'Disabled'
        self.train_net.initialize(self.input_shape[:-1], self.cost)
        
        self.target_net = Model(self.create_layers(max_action_no))
        # Bug fix
        for l in self.target_net.layers.layers:
            l.parallelism = 'Disabled'
        self.target_net.initialize(self.input_shape[:-1])

        if self.args.optimizer == 'Adam':        # Adam
            self.optimizer = Adam(beta_1=args.rms_decay,
                                            beta_2=args.rms_decay,
                                            learning_rate=args.learning_rate)
        else:		# Neon RMSProp
            self.optimizer = RMSProp(decay_rate=args.rms_decay,
                                            learning_rate=args.learning_rate)

        self.max_action_no = max_action_no
        self.running = True

    def get_initializer(self, input_size):
        dnnInit = self.args.dnn_initializer
        if dnnInit == 'xavier':
            initializer = Xavier()
        elif dnnInit == 'fan_in':
            std_dev = 1.0 / math.sqrt(input_size)
            initializer = Uniform(low=-std_dev, high=std_dev)
        else:
            initializer = Gaussian(0, 0.01)
        return initializer
            
    def create_layers(self, max_action_no):
        layers = []

        initializer = self.get_initializer(input_size = 4 * 8 * 8)
        layers.append(Conv(fshape=(8, 8, 32), strides=4, init=initializer, bias=initializer, activation=Rectlin()))

        initializer = self.get_initializer(input_size = 32 * 4 * 4)
        layers.append(Conv(fshape=(4, 4, 64), strides=2, init=initializer, bias=initializer, activation=Rectlin()))
        
        initializer = self.get_initializer(input_size = 64 * 3 * 3)
        layers.append(Conv(fshape=(3, 3, 64), strides=1, init=initializer, bias=initializer, activation=Rectlin()))
        
        initializer = self.get_initializer(input_size = 7 * 7 * 64)
        layers.append(Affine(nout=512, init=initializer, bias=initializer, activation=Rectlin()))
        
        initializer = self.get_initializer(input_size = 512)
        layers.append(Affine(nout=max_action_no, init=initializer, bias=initializer))
        
        return layers        
        
    def clip_reward(self, reward):
        if reward > self.args.clip_reward_high:
            return self.args.clip_reward_high
        elif reward < self.args.clip_reward_low:
            return self.args.clip_reward_low
        else:
            return reward

    def set_input(self, data):
        if self.use_gpu_replay_mem:
            self.be.copy_transpose(data, self.input_uint8, axes=(1, 2, 3, 0))
            self.input[:] = self.input_uint8 / 255
        else:
            self.input.set(data.transpose(1, 2, 3, 0).copy())
            self.be.divide(self.input, 255, self.input)

    def predict(self, history_buffer):
        self.set_input(history_buffer)
        output  = self.train_net.fprop(self.input, inference=True)
        return output.T.asnumpyarray()[0]            

    def print_weights(self):
        pass

    def train(self, minibatch, replay_memory, learning_rate, debug):
        if self.args.prioritized_replay == True:
            prestates, actions, rewards, poststates, terminals, replay_indexes, heap_indexes, weights = minibatch
        else:
            prestates, actions, rewards, poststates, terminals = minibatch
        
        # Get Q*(s, a) with targetNet
        self.set_input(poststates)
        post_qvalue = self.target_net.fprop(self.input, inference=True).T.asnumpyarray()
        
        if self.args.double_dqn == True:
            # Get Q*(s, a) with trainNet
            post_qvalue2 = self.train_net.fprop(self.input, inference=True).T.asnumpyarray()
        
        # Get Q(s, a) with trainNet
        self.set_input(prestates)
        pre_qvalue = self.train_net.fprop(self.input, inference=False)
        
        label = pre_qvalue.asnumpyarray().copy()
        for i in range(0, self.train_batch_size):
            if self.args.clip_reward:
                reward = self.clip_reward(rewards[i])
            else:
                reward = rewards[i]
            if terminals[i]:
                label[actions[i], i] = reward
            else:
                if self.args.double_dqn == True:
                    max_index = np.argmax(post_qvalue2[i])
                    label[actions[i], i] = reward + self.discount_factor* post_qvalue[i][max_index]
                else:
                    label[actions[i], i] = reward + self.discount_factor* np.max(post_qvalue[i])

        # copy targets to GPU memory
        self.targets.set(label)
    
        delta = self.cost.get_errors(pre_qvalue, self.targets)
        
        if self.args.prioritized_replay == True:
            delta_value = delta.asnumpyarray()
            for i in range(self.train_batch_size):
                if debug:
                    print 'weight[%s]: %.5f, delta: %.5f, newDelta: %.5f' % (i, weights[i], delta_value[actions[i], i], weights[i] * delta_value[actions[i], i]) 
                replay_memory.update_td(heap_indexes[i], abs(delta_value[actions[i], i]))
                delta_value[actions[i], i] = weights[i] * delta_value[actions[i], i]
            delta.set(delta_value.copy())
          
        if self.args.clip_loss:
            self.be.clip(delta, -1.0, 1.0, out = delta)
                
        self.train_net.bprop(delta)
        self.optimizer.optimize(self.train_net.layers_to_optimize, epoch=0)

    def update_model(self):
        # have to serialize also states for batch normalization to work
        pdict = self.train_net.get_description(get_weights=True, keep_states=True)
        self.target_net.deserialize(pdict, load_states=True)
        #print ('Updated target model')

    def finish_train(self):
        self.running = False
    
    def load(self, file_name):
        self.train_net.load_params(file_name)
        self.update_model()
        
    def save(self, file_name):
        self.train_net.save_params(file_name)