コード例 #1
0
ファイル: likelihoods.py プロジェクト: GPflow/GPflow
    def prob_is_largest(self, Y, mu, var, gh_x, gh_w):
        # work out what the mean and variance is of the indicated latent function.
        oh_on = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 1.0, 0.0), float_type)
        mu_selected = tf.reduce_sum(oh_on * mu, 1)
        var_selected = tf.reduce_sum(oh_on * var, 1)

        # generate Gauss Hermite grid
        X = tf.reshape(mu_selected, (-1, 1)) + gh_x * tf.reshape(
            tf.sqrt(tf.clip_by_value(2.0 * var_selected, 1e-10, np.inf)), (-1, 1)
        )

        # compute the CDF of the Gaussian between the latent functions and the grid (including the selected function)
        dist = (tf.expand_dims(X, 1) - tf.expand_dims(mu, 2)) / tf.expand_dims(
            tf.sqrt(tf.clip_by_value(var, 1e-10, np.inf)), 2
        )
        cdfs = 0.5 * (1.0 + tf.erf(dist / np.sqrt(2.0)))

        cdfs = cdfs * (1 - 2e-4) + 1e-4

        # blank out all the distances on the selected latent function
        oh_off = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 0.0, 1.0), float_type)
        cdfs = cdfs * tf.expand_dims(oh_off, 2) + tf.expand_dims(oh_on, 2)

        # take the product over the latent functions, and the sum over the GH grid.
        return tf.matmul(tf.reduce_prod(cdfs, reduction_indices=[1]), tf.reshape(gh_w / np.sqrt(np.pi), (-1, 1)))
コード例 #2
0
    def predict(self, test_features, test_labels, result_path):

        train_labels = tf.one_hot(self.train_labels, depth=2, on_value=1, off_value=0)
        test_labels = tf.one_hot(test_labels, depth=2, on_value=1, off_value=0)

        init = tf.global_variables_initializer()

        # Start training
        with tf.Session() as sess:

            # Run the initializer
            sess.run(init)

            y_, y = sess.run([test_labels, train_labels])

            # loop over test data
            for index in range(len(test_features)):

                feed_dict = {self.xtr: self.train_features, self.xte: test_features[index, :]}

                nn_index = sess.run(self.prediction, feed_dict=feed_dict)

                print('Test [{}] Actual Class: {}, Predicted Class : {}'.format(index, np.argmax(y_[index]),
                                                                                np.argmax(y[nn_index])))

                self.save_labels(predictions=np.argmax(y[nn_index]), actual=np.argmax(y_[index]),
                                 result_path=result_path, step=index, phase='testing')

                if np.argmax(y[nn_index]) == np.argmax(y_[index]):
                    self.accuracy += 1. / len(test_features)

        print('Accuracy : {}'.format(self.accuracy))
def load_mnist(path, is_training):
    fd = open(os.path.join(cfg.dataset, 'train-images-idx3-ubyte'))
    loaded = np.fromfile(file=fd, dtype=np.uint8)
    trX = loaded[16:].reshape((60000, 28, 28, 1)).astype(np.float)

    fd = open(os.path.join(cfg.dataset, 'train-labels-idx1-ubyte'))
    loaded = np.fromfile(file=fd, dtype=np.uint8)
    trY = loaded[8:].reshape((60000)).astype(np.float)

    fd = open(os.path.join(cfg.dataset, 't10k-images-idx3-ubyte'))
    loaded = np.fromfile(file=fd, dtype=np.uint8)
    teX = loaded[16:].reshape((10000, 28, 28, 1)).astype(np.float)

    fd = open(os.path.join(cfg.dataset, 't10k-labels-idx1-ubyte'))
    loaded = np.fromfile(file=fd, dtype=np.uint8)
    teY = loaded[8:].reshape((10000)).astype(np.float)

    # normalization and convert to a tensor [60000, 28, 28, 1]
    trX = tf.convert_to_tensor(trX / 255., tf.float32)

    # => [num_samples, 10]
    trY = tf.one_hot(trY, depth=10, axis=1, dtype=tf.float32)
    teY = tf.one_hot(teY, depth=10, axis=1, dtype=tf.float32)

    if is_training:
        return trX, trY
    else:
        return teX / 255., teY
コード例 #4
0
	def build(self, sampling):
		if sampling == True:
			batch_size, num_steps = 1, 1
		else:
			batch_size = self.__batch_size
			num_steps = self.__num_steps
		tf_x = tf.placeholder(tf.int32, shape=[batch_size, num_steps], name='tf_x')
		tf_y = tf.placeholder(tf.int32, shape=[batch_size, num_steps], name='tf_y')
		tf_keepprob = tf.placeholder(tf.float32, name='tf_keepprob')
		# one-hot encoding:
		x_onehot = tf.one_hot(tf_x, depth=self.__num_classes)
		y_onehot = tf.one_hot(tf_y, depth=self.__num_classes)
		# build the multi-layer RNN cells
		cells = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(self.__lstm_size), output_keep_prob=tf_keepprob) for _ in range(self.__num_layers)])
		# Define the initial state
		self.__initial_state = cells.zero_state(batch_size, tf.float32)
		# Run each sequence step through the RNN
		lstm_outputs, self.__final_state =  tf.nn.dynamic_rnn(cells, x_onehot, initial_state = self.__initial_state)
		print(' << lstm_outputs >>', lstm_outputs)
		seq_output_reshaped = tf.reshape(lstm_outputs, shape=[-1, self.__lstm_size], name='seq_output_reshaped')
		logits = tf.layers.dense(inputs=seq_output_reshaped, units=self.__num_classes, activation=None, name='logits')
		proba = tf.nn.softmax(logits, name='probabilities')
		y_reshaped = tf.reshape(y_onehot, shape=[-1, self.__num_classes], name='y_reshaped')
		cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped), name='cost')
		# Gradient clipping to avoid 'exploding gradients'
		tvars = tf.trainable_variables()
		grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), self.__grad_clip)
		optimizer = tf.train.AdamOptimizer(self.__learning_rate)
		train_op = optimizer.apply_gradients(zip(grads, tvars), name='train_op')
コード例 #5
0
ファイル: dqn_model.py プロジェクト: et0803/tensorforce
    def create_tf_operations(self, config):
        super(DQNModel, self).create_tf_operations(config)

        num_actions = {name: action.num_actions for name, action in config.actions}

        # Training network
        with tf.variable_scope('training'):
            self.training_network = NeuralNetwork(config.network, inputs=self.state)

            self.internal_inputs.extend(self.training_network.internal_inputs)
            self.internal_outputs.extend(self.training_network.internal_outputs)
            self.internal_inits.extend(self.training_network.internal_inits)
            training_output = dict()

            for action in self.action:
                training_output[action] = layers['linear'](x=self.training_network.output, size=num_actions[action])
                self.action_taken[action] = tf.argmax(training_output[action], axis=1)

        # Target network
        with tf.variable_scope('target'):
            self.target_network = NeuralNetwork(config.network, inputs=self.state)
            self.internal_inputs.extend(self.target_network.internal_inputs)
            self.internal_outputs.extend(self.target_network.internal_outputs)
            self.internal_inits.extend(self.target_network.internal_inits)
            target_value = dict()

            for action in self.action:
                target_output = layers['linear'](x=self.target_network.output, size=num_actions[action])
                if config.double_dqn:
                    selector = tf.one_hot(self.action_taken[action], num_actions[action])
                    target_value[action] = tf.reduce_sum(tf.multiply(target_output, selector), axis=1)
                else:
                    target_value[action] = tf.reduce_max(target_output, axis=1)

        with tf.name_scope('update'):
            for action in self.action:
                # One_hot tensor of the actions that have been taken
                action_one_hot = tf.one_hot(self.action[action][:-1], num_actions[action])
                # Training output, so we get the expected rewards given the actual states and actions
                q_value = tf.reduce_sum(training_output[action][:-1] * action_one_hot, axis=1)

                # Surrogate loss as the mean squared error between actual observed rewards and expected rewards
                q_target = self.reward[:-1] + (1.0 - tf.cast(self.terminal[:-1], tf.float32)) * self.discount * target_value[action][1:]
                delta = q_target - q_value
                self.loss_per_instance = tf.square(delta)

                # If gradient clipping is used, calculate the huber loss
                if config.clip_gradients > 0.0:
                    huber_loss = tf.where(tf.abs(delta) < config.clip_gradients, 0.5 * self.loss_per_instance, tf.abs(delta) - 0.5)
                    loss = tf.reduce_mean(huber_loss)
                else:
                    loss = tf.reduce_mean(self.loss_per_instance)
                tf.losses.add_loss(loss)

        # Update target network
        with tf.name_scope("update_target"):
            self.target_network_update = list()
            for v_source, v_target in zip(self.training_network.variables, self.target_network.variables):
                update = v_target.assign_sub(config.update_target_weight * (v_target - v_source))
                self.target_network_update.append(update)
コード例 #6
0
ファイル: memory.py プロジェクト: tsingcoo/models
  def make_update_op(self, upd_idxs, upd_keys, upd_vals,
                     batch_size, use_recent_idx, intended_output):
    """Function that creates all the update ops."""
    base_update_op = super(LSHMemory, self).make_update_op(
        upd_idxs, upd_keys, upd_vals,
        batch_size, use_recent_idx, intended_output)

    # compute hash slots to be updated
    hash_slot_idxs = self.get_hash_slots(upd_keys)

    # make updates
    update_ops = []
    with tf.control_dependencies([base_update_op]):
      for i, slot_idxs in enumerate(hash_slot_idxs):
        # for each slot, choose which entry to replace
        entry_idx = tf.random_uniform([batch_size],
                                      maxval=self.num_per_hash_slot,
                                      dtype=tf.int32)
        entry_mul = 1 - tf.one_hot(entry_idx, self.num_per_hash_slot,
                                   dtype=tf.int32)
        entry_add = (tf.expand_dims(upd_idxs, 1) *
                     tf.one_hot(entry_idx, self.num_per_hash_slot,
                                dtype=tf.int32))

        mul_op = tf.scatter_mul(self.hash_slots[i], slot_idxs, entry_mul)
        with tf.control_dependencies([mul_op]):
          add_op = tf.scatter_add(self.hash_slots[i], slot_idxs, entry_add)
          update_ops.append(add_op)

    return tf.group(*update_ops)
コード例 #7
0
ファイル: train-atari.py プロジェクト: tobyma/tensorpack
    def build_graph(self, state, action, futurereward, action_prob):
        logits, value = self._get_NN_prediction(state)
        value = tf.squeeze(value, [1], name='pred_value')  # (B,)
        policy = tf.nn.softmax(logits, name='policy')
        is_training = get_current_tower_context().is_training
        if not is_training:
            return
        log_probs = tf.log(policy + 1e-6)

        log_pi_a_given_s = tf.reduce_sum(
            log_probs * tf.one_hot(action, NUM_ACTIONS), 1)
        advantage = tf.subtract(tf.stop_gradient(value), futurereward, name='advantage')

        pi_a_given_s = tf.reduce_sum(policy * tf.one_hot(action, NUM_ACTIONS), 1)  # (B,)
        importance = tf.stop_gradient(tf.clip_by_value(pi_a_given_s / (action_prob + 1e-8), 0, 10))

        policy_loss = tf.reduce_sum(log_pi_a_given_s * advantage * importance, name='policy_loss')
        xentropy_loss = tf.reduce_sum(policy * log_probs, name='xentropy_loss')
        value_loss = tf.nn.l2_loss(value - futurereward, name='value_loss')

        pred_reward = tf.reduce_mean(value, name='predict_reward')
        advantage = tf.sqrt(tf.reduce_mean(tf.square(advantage)), name='rms_advantage')
        entropy_beta = tf.get_variable('entropy_beta', shape=[],
                                       initializer=tf.constant_initializer(0.01), trainable=False)
        cost = tf.add_n([policy_loss, xentropy_loss * entropy_beta, value_loss])
        cost = tf.truediv(cost, tf.cast(tf.shape(futurereward)[0], tf.float32), name='cost')
        summary.add_moving_summary(policy_loss, xentropy_loss,
                                   value_loss, pred_reward, advantage,
                                   cost, tf.reduce_mean(importance, name='importance'))
        return cost
コード例 #8
0
ファイル: addition.py プロジェクト: PFCM/datasets
def get_online_sequences(sequence_length, batch_size):
    """Gets tensor which constantly produce new random examples.

    Args:
        sequence_length: total length of the sequences.
        batch_size: how many at a time.

    Returns:
        (data, targets): data is `[sequence_length, batch_size, 2]` and targets
            are `[batch_size]`.
    """
    # getting the random channel is easy
    random_data = tf.random_uniform([sequence_length, batch_size, 1],
                                    minval=0.0, maxval=1.0)
    # now we need a random marker in each half of the data
    random_index_1 = tf.random_uniform([1, batch_size], minval=0,
                                       maxval=sequence_length//2,
                                       dtype=tf.int32)
    random_index_2 = tf.random_uniform([1, batch_size], minval=0,
                                       maxval=sequence_length//2,
                                       dtype=tf.int32)
    markers = tf.concat(axis=2, values=[tf.one_hot(random_index_1, sequence_length//2),
                            tf.one_hot(random_index_2, sequence_length//2)])
    markers = tf.transpose(markers)
    targets = tf.reduce_sum(random_data * markers,
                            axis=0)
    return tf.concat(axis=2, values=[random_data, markers]), tf.squeeze(targets)
コード例 #9
0
ファイル: DQN.py プロジェクト: xhrwang/tensorpack
    def _build_graph(self, inputs, is_training):
        state, action, reward, next_state, isOver = inputs
        self.predict_value = self._get_DQN_prediction(state, is_training)
        action_onehot = tf.one_hot(action, NUM_ACTIONS)
        pred_action_value = tf.reduce_sum(self.predict_value * action_onehot, 1)    #N,
        max_pred_reward = tf.reduce_mean(tf.reduce_max(
            self.predict_value, 1), name='predict_reward')
        add_moving_summary(max_pred_reward)
        self.greedy_choice = tf.argmax(self.predict_value, 1)   # N,

        with tf.variable_scope('target'):
            targetQ_predict_value = self._get_DQN_prediction(next_state, False)    # NxA

            # DQN
            #best_v = tf.reduce_max(targetQ_predict_value, 1)    # N,

            # Double-DQN
            predict_onehot = tf.one_hot(self.greedy_choice, NUM_ACTIONS, 1.0, 0.0)
            best_v = tf.reduce_sum(targetQ_predict_value * predict_onehot, 1)

            target = reward + (1.0 - tf.cast(isOver, tf.float32)) * GAMMA * tf.stop_gradient(best_v)

        sqrcost = tf.square(target - pred_action_value)
        abscost = tf.abs(target - pred_action_value)    # robust error func
        cost = tf.select(abscost < 1, sqrcost, abscost)
        summary.add_param_summary([('conv.*/W', ['histogram', 'rms']),
                                   ('fc.*/W', ['histogram', 'rms']) ])   # monitor all W
        self.cost = tf.reduce_mean(cost, name='cost')
コード例 #10
0
def char_cnn_model(features, target):
    """Character level convolutional neural network model to predict classes."""
    target = tf.one_hot(target, 15, 1, 0)
    byte_list = tf.reshape(tf.one_hot(features, 256, 1, 0), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
    with tf.variable_scope("CNN_Layer1"):
        # Apply Convolution filtering on input sequence.
        conv1 = tf.contrib.layers.convolution2d(byte_list, N_FILTERS, FILTER_SHAPE1, padding="VALID")
        # Add a RELU for non linearity.
        conv1 = tf.nn.relu(conv1)
        # Max pooling across output of Convolution+Relu.
        pool1 = tf.nn.max_pool(
            conv1, ksize=[1, POOLING_WINDOW, 1, 1], strides=[1, POOLING_STRIDE, 1, 1], padding="SAME"
        )
        # Transpose matrix so that n_filters from convolution becomes width.
        pool1 = tf.transpose(pool1, [0, 1, 3, 2])
    with tf.variable_scope("CNN_Layer2"):
        # Second level of convolution filtering.
        conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS, FILTER_SHAPE2, padding="VALID")
        # Max across each filter to get useful features for classification.
        pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])

    # Apply regular WX + B and classification.
    logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
    loss = tf.contrib.losses.softmax_cross_entropy(logits, target)

    train_op = tf.contrib.layers.optimize_loss(
        loss, tf.contrib.framework.get_global_step(), optimizer="Adam", learning_rate=0.01
    )

    return ({"class": tf.argmax(logits, 1), "prob": tf.nn.softmax(logits)}, loss, train_op)
コード例 #11
0
def char_rnn_model(features, labels, mode):
  """Character level recurrent neural network model to predict classes."""
  byte_vectors = tf.one_hot(features[CHARS_FEATURE], 256, 1., 0.)
  byte_list = tf.unstack(byte_vectors, axis=1)

  cell = tf.contrib.rnn.GRUCell(HIDDEN_SIZE)
  _, encoding = tf.contrib.rnn.static_rnn(cell, byte_list, dtype=tf.float32)

  logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)

  predicted_classes = tf.argmax(logits, 1)
  if mode == tf.estimator.ModeKeys.PREDICT:
    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions={
            'class': predicted_classes,
            'prob': tf.nn.softmax(logits)
        })

  onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
  loss = tf.losses.softmax_cross_entropy(
      onehot_labels=onehot_labels, logits=logits)
  if mode == tf.estimator.ModeKeys.TRAIN:
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)

  eval_metric_ops = {
      'accuracy': tf.metrics.accuracy(
          labels=labels, predictions=predicted_classes)
  }
  return tf.estimator.EstimatorSpec(
      mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
コード例 #12
0
def char_cnn_model(features, labels, mode):
  """Character level convolutional neural network model to predict classes."""
  features_onehot = tf.one_hot(features[CHARS_FEATURE], 256)
  input_layer = tf.reshape(
      features_onehot, [-1, MAX_DOCUMENT_LENGTH, 256, 1])
  with tf.variable_scope('CNN_Layer1'):
    # Apply Convolution filtering on input sequence.
    conv1 = tf.layers.conv2d(
        input_layer,
        filters=N_FILTERS,
        kernel_size=FILTER_SHAPE1,
        padding='VALID',
        # Add a ReLU for non linearity.
        activation=tf.nn.relu)
    # Max pooling across output of Convolution+Relu.
    pool1 = tf.layers.max_pooling2d(
        conv1,
        pool_size=POOLING_WINDOW,
        strides=POOLING_STRIDE,
        padding='SAME')
    # Transpose matrix so that n_filters from convolution becomes width.
    pool1 = tf.transpose(pool1, [0, 1, 3, 2])
  with tf.variable_scope('CNN_Layer2'):
    # Second level of convolution filtering.
    conv2 = tf.layers.conv2d(
        pool1,
        filters=N_FILTERS,
        kernel_size=FILTER_SHAPE2,
        padding='VALID')
    # Max across each filter to get useful features for classification.
    pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])

  # Apply regular WX + B and classification.
  logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)

  predicted_classes = tf.argmax(logits, 1)
  if mode == tf.estimator.ModeKeys.PREDICT:
    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions={
            'class': predicted_classes,
            'prob': tf.nn.softmax(logits)
        })

  onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
  loss = tf.losses.softmax_cross_entropy(
      onehot_labels=onehot_labels, logits=logits)
  if mode == tf.estimator.ModeKeys.TRAIN:
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)

  eval_metric_ops = {
      'accuracy': tf.metrics.accuracy(
          labels=labels, predictions=predicted_classes)
  }
  return tf.estimator.EstimatorSpec(
      mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
コード例 #13
0
ファイル: multitask_gp.py プロジェクト: 812864539/models
  def posterior_mean_and_sample(self, candidates):
    """Draw samples for test predictions.

    Given a Tensor of 'candidates' inputs, returns samples from the posterior
    and the posterior mean prediction for those inputs.

    Args:
      candidates: A (num-examples x num-dims) Tensor containing the inputs for
      which to return predictions.
    Returns:
      y_mean: The posterior mean prediction given these inputs
      y_sample: A sample from the posterior of the outputs given these inputs
    """
    # Cross-covariance for test predictions
    w = tf.identity(self.weights_train)
    inds = tf.squeeze(
        tf.reshape(
            tf.tile(
                tf.reshape(tf.range(self.n_out), (self.n_out, 1)),
                (1, tf.shape(candidates)[0])), (-1, 1)))

    cross_cov = self.cov(tf.tile(candidates, [self.n_out, 1]), self.x_train)
    cross_task_cov = self.task_cov(tf.one_hot(inds, self.n_out), w)
    cross_cov *= cross_task_cov

    # Test mean prediction
    y_mean = tf.matmul(cross_cov, tf.matmul(self.input_inv, self.y_train))

    # Test sample predictions
    # Note this can be done much more efficiently using Kronecker products
    # if all tasks are fully observed (which we won't assume)
    test_cov = (
        self.cov(tf.tile(candidates, [self.n_out, 1]),
                 tf.tile(candidates, [self.n_out, 1])) *
        self.task_cov(tf.one_hot(inds, self.n_out),
                      tf.one_hot(inds, self.n_out)) -
        tf.matmul(cross_cov,
                  tf.matmul(self.input_inv,
                            tf.transpose(cross_cov))))

    # Get the matrix square root through an SVD for drawing samples
    # This seems more numerically stable than the Cholesky
    s, _, v = tf.svd(test_cov, full_matrices=True)
    test_sqrt = tf.matmul(v, tf.matmul(tf.diag(s), tf.transpose(v)))

    y_sample = (
        tf.matmul(
            test_sqrt,
            tf.random_normal([tf.shape(test_sqrt)[0], 1], dtype=tf.float64)) +
        y_mean)

    y_sample = (
        tf.transpose(tf.reshape(y_sample,
                                (self.n_out, -1))) * self.input_std +
        self.input_mean)

    return y_mean, y_sample
コード例 #14
0
def one_hot_categorical_model(features, target):
    target = tf.one_hot(target, 2, 1.0, 0.0)
    features = tf.one_hot(features, n_classes, 1.0, 0.0)
    prediction, loss = learn.models.logistic_regression(
      tf.squeeze(features, [1]), target)
    train_op = layers.optimize_loss(loss,
        tf.contrib.framework.get_global_step(), optimizer='SGD',
        learning_rate=0.01)
    return tf.argmax(prediction, dimension=1), loss, train_op
コード例 #15
0
    def __init__(self, env, env_name, _optimizer='adam'):
        """
        :param env:
        Output of this Discriminator is reward for learning agent. Not the cost.
        Because discriminator predicts  P(expert|s,a) = 1 - P(agent|s,a).
        """
        self._optimizer = _optimizer
        env_header = env_name.split('-')[0]
        # CartPole-v1, Arcobot-v1, Pendulum-v0, HalfCheetah-v2, Hopper-v2, Walker2d-v2, Humanoid-v2
        if env_header == 'CartPole' or env_header == 'Arcobot' or env_header == 'Pendulum' or env_header == 'MountainCar': #Classic control Gym
            action_space_count = env.action_space.n
        else: #Mujoco
            action_space_count = env.action_space.shape[0]

        with tf.variable_scope('discriminator'):
            self.scope = tf.get_variable_scope().name
            self.expert_s = tf.placeholder(dtype=tf.float32, shape=[None] + list(env.observation_space.shape))
            self.expert_a = tf.placeholder(dtype=tf.int32, shape=[None])
            expert_a_one_hot = tf.one_hot(self.expert_a, depth=action_space_count)
            # add noise for stabilise training
            expert_a_one_hot += tf.random_normal(tf.shape(expert_a_one_hot), mean=0.2, stddev=0.1, dtype=tf.float32)/1.2
            expert_s_a = tf.concat([self.expert_s, expert_a_one_hot], axis=1)

            self.agent_s = tf.placeholder(dtype=tf.float32, shape=[None] + list(env.observation_space.shape))
            self.agent_a = tf.placeholder(dtype=tf.int32, shape=[None])
            agent_a_one_hot = tf.one_hot(self.agent_a, depth=action_space_count)
            # add noise for stabilise training
            agent_a_one_hot += tf.random_normal(tf.shape(agent_a_one_hot), mean=0.2, stddev=0.1, dtype=tf.float32)/1.2
            agent_s_a = tf.concat([self.agent_s, agent_a_one_hot], axis=1)

            with tf.variable_scope('network') as network_scope:
                prob_1 = self.construct_network(input=expert_s_a)
                network_scope.reuse_variables()  # share parameter
                prob_2 = self.construct_network(input=agent_s_a)

            with tf.variable_scope('loss'):
                loss_expert = tf.reduce_mean(tf.log(tf.clip_by_value(prob_1, 0.01, 1)))
                loss_agent = tf.reduce_mean(tf.log(tf.clip_by_value(1 - prob_2, 0.01, 1)))
                loss = loss_expert + loss_agent
                loss = -loss
                tf.summary.scalar('discriminator', loss)

            # optimizer: adagrad, rmsprop, adadelta, adam, cocob
            if self._optimizer == 'adagrad':
                optimizer = tf.train.AdagradOptimizer(learning_rate=0.01)  # initial_accumulator_value=0.1
            elif self._optimizer == 'rmsprop':
                optimizer = tf.train.RMSPropOptimizer(learning_rate=0.00025)  # decay=0.9, momentum=0.0, epsilon=1e-10, use_locking=False, centered=False
            elif self._optimizer == 'adadelta':
                optimizer = tf.train.AdadeltaOptimizer(learning_rate=0.5)  # learning_rate=0.001, rho=0.95, epsilon=1e-08, use_locking=False
            elif self._optimizer == 'cocob':
                optimizer = cocob.COCOB()
            else:  # adam
                optimizer = tf.train.AdamOptimizer()  # lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False
            self.train_op = optimizer.minimize(loss)

            self.rewards = tf.log(tf.clip_by_value(prob_2, 1e-10, 1))  # log(P(expert|s,a)) larger is better for agent
コード例 #16
0
ファイル: tfutil.py プロジェクト: NoahDStein/NeuralNetSandbox
def crappy_plot(val, levels):
    x_len = val.get_shape().as_list()[1]
    left_val = tf.concat(1, (val[:, 0:1], val[:, 0:x_len - 1]))
    right_val = tf.concat(1, (val[:, 1:], val[:, x_len - 1:]))

    left_mean = (val + left_val) // 2
    right_mean = (val + right_val) // 2
    low_val = tf.minimum(tf.minimum(left_mean, right_mean), val)
    high_val = tf.maximum(tf.maximum(left_mean, right_mean), val + 1)
    return tf.cumsum(tf.one_hot(low_val, levels, axis=1) - tf.one_hot(high_val, levels, axis=1), axis=1)
コード例 #17
0
def nearest_neighbor(x,
                     means,
                     block_v_size,
                     random_top_k=1,
                     soft_em=False,
                     num_samples=1):
  """Find the nearest element in means to elements in x.

  Args:
    x: Batch of encoder continuous latent states sliced/projected into shape
      [-1, num_blocks, block_dim].
    means: Embedding table of shpae [num_blocks, block_v_size, block_dim].
    block_v_size: Number of table entries per block.
    random_top_k: Noisy top-k if this is bigger than 1 (Default: 1).
    soft_em: If True then use soft EM rather than hard EM (Default: False).
    num_samples: Number of samples to take in soft EM (Default: 1).

  Returns:
    Tensor with nearest element in mean encoded in one-hot notation
    and distances.
  """
  x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True)
  means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True)
  scalar_prod = tf.matmul(
      tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1]))
  scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2])
  dist = x_norm_sq + tf.transpose(
      means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod

  # computing cluster probabilities
  if soft_em:
    num_blocks = common_layers.shape_list(dist)[1]
    nearest_idx = tf.stack(
        [
            tf.multinomial(-dist[:, i, :], num_samples=num_samples)
            for i in range(num_blocks)
        ],
        axis=1)
    nearest_hot = tf.one_hot(nearest_idx, depth=block_v_size)
    nearest_hot = tf.reduce_mean(nearest_hot, axis=-2)
  else:
    if random_top_k > 1:
      _, top_k_idx = tf.nn.top_k(-dist, k=random_top_k)
      nearest_idx = tf.gather(
          top_k_idx,
          tf.random_uniform(
              [1], minval=0, maxval=random_top_k - 1, dtype=tf.int32),
          axis=-1)
    else:
      nearest_idx = tf.argmax(-dist, axis=-1)
    nearest_hot = tf.one_hot(nearest_idx, block_v_size)
  return nearest_hot
コード例 #18
0
ファイル: one_hot_op_test.py プロジェクト: 0-T-0/tensorflow
 def _testOneHot(self, truth, use_gpu=False, expected_err_re=None, 
                 raises=None, **inputs):
   with self.test_session(use_gpu=use_gpu):
     if raises is not None:
       with self.assertRaises(raises):
         tf.one_hot(**inputs)
     else:
       ans = tf.one_hot(**inputs)
       if expected_err_re is None:
         tf_ans = ans.eval()
         self.assertAllClose(tf_ans, truth, atol=1e-10)
         self.assertEqual(tf_ans.shape, ans.get_shape())
       else:
         with self.assertRaisesOpError(expected_err_re):
           ans.eval()
コード例 #19
0
ファイル: network.py プロジェクト: JoyDoSun/tf-a3c-gpu
    def __init__(self, nA,
                 learning_rate,decay,grad_clip,entropy_beta,
                 state_shape=[84,84,4],
                 master=None, device_name='/gpu:0', scope_name='master'):
        with tf.device(device_name) :
            self.state = tf.placeholder(tf.float32,[None]+state_shape)
            block, self.scope  = ActorCritic._build_shared_block(self.state,scope_name)
            self.policy, self.log_softmax_policy = ActorCritic._build_policy(block,nA,scope_name)
            self.value = ActorCritic._build_value(block,scope_name)

            self.train_vars = sorted(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope.name), key=lambda v:v.name)
            if( master is not None ) :
                self.sync_op= self._sync_op(master)
                self.action = tf.placeholder(tf.int32,[None,])
                self.target_value = tf.placeholder(tf.float32,[None,])

                advantage = self.target_value - self.value
                entropy = tf.reduce_sum(-1. * self.policy * self.log_softmax_policy,axis=1)
                log_p_s_a = tf.reduce_sum(self.log_softmax_policy * tf.one_hot(self.action,nA),axis=1)

                self.policy_loss = tf.reduce_mean(tf.stop_gradient(advantage)*log_p_s_a)
                self.entropy_loss = tf.reduce_mean(entropy)
                self.value_loss = tf.reduce_mean(advantage**2)

                loss = -self.policy_loss - entropy_beta* self.entropy_loss + self.value_loss
                self.gradients = tf.gradients(loss,self.train_vars)
                clipped_gs = [tf.clip_by_average_norm(g,grad_clip) for g in self.gradients]
                self.train_op = master.optimizer.apply_gradients(zip(clipped_gs,master.train_vars))
            else :
                #self.optimizer = tf.train.AdamOptimizer(learning_rate,beta1=BETA)
                self.optimizer = tf.train.RMSPropOptimizer(learning_rate,decay=decay,use_locking=True)
コード例 #20
0
def rnn_model(features, target):
  """RNN model to predict from sequence of words to a class."""
  # Convert indexes of words into embeddings.
  # This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
  # maps word indexes of the sequence into [batch_size, sequence_length,
  # EMBEDDING_SIZE].
  word_vectors = tf.contrib.layers.embed_sequence(
      features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')

  # Split into list of embedding per word, while removing doc length dim.
  # word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
  word_list = tf.unstack(word_vectors, axis=1)

  # Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
  cell = tf.nn.rnn_cell.GRUCell(EMBEDDING_SIZE)

  # Create an unrolled Recurrent Neural Networks to length of
  # MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
  _, encoding = tf.nn.rnn(cell, word_list, dtype=tf.float32)

  # Given encoding of RNN, take encoding of last step (e.g hidden size of the
  # neural network of last step) and pass it as features for logistic
  # regression over output classes.
  target = tf.one_hot(target, 15, 1, 0)
  logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)

  # Create a training op.
  train_op = tf.contrib.layers.optimize_loss(
      loss, tf.contrib.framework.get_global_step(),
      optimizer='Adam', learning_rate=0.01)

  return (
      {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
      loss, train_op)
コード例 #21
0
ファイル: ce.py プロジェクト: aaiijmrtt/DEEPSPEECH
def ce(model, config, scope, connect, threshold = 1e-5):
	with tf.variable_scope(scope), tf.name_scope(scope):
		with tf.variable_scope('inputs'), tf.name_scope('inputs'):
			model['%s_in0length' %scope] = model['%s_out0length' %connect]
			model['%s_in1length' %scope] = model['%s_out1length' %connect]
			model['%s_in2length' %scope] = model['%s_out2length' %connect]
			model['%s_maxin2length' %scope] = model['%s_maxout2length' %connect]
			model['%s_inputs' %scope] = tf.clip_by_value(tf.nn.softmax(model['%s_outputs' %connect]), threshold, 1. - threshold, name = '%s_inputs' %scope)
			model['%s_out0length' %scope] = model['%s_in0length' %scope]
			model['%s_out1length' %scope] = model['%s_in1length' %scope]
			model['%s_out2length' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_out2length' %scope)
			model['%s_maxout2length' %scope] = model['%s_maxin2length' %scope]

		with tf.variable_scope('labels'), tf.name_scope('labels'):
			model['%s_labels_len' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_labels_len' %scope)
			model['%s_labels_ind' %scope] = tf.placeholder(tf.int64, [None, 2], '%s_labels_ind' %scope)
			model['%s_labels_val' %scope] = tf.placeholder(tf.int32, [None], '%s_labels_val' %scope)
			model['%s_labels_collapsed' %scope] = tf.sparse_to_dense(model['%s_labels_ind' %scope], [model['%s_maxin2length' %scope], model['%s_in0length' %scope]], model['%s_labels_val' %scope], -1, name = '%s_labels_collapsed' %scope)
			model['%s_labels' %scope] = tf.one_hot(model['%s_labels_collapsed' %scope], model['%s_out1length' %scope], name = '%s_labels' %scope)

		with tf.variable_scope('loss'), tf.name_scope('loss'):
			model['%s_loss' %scope] = tf.reduce_sum(-tf.multiply(model['%s_labels' %scope], tf.log(model['%s_inputs' %scope])), name = '%s_loss' %scope)

		with tf.variable_scope('outputs'), tf.name_scope('outputs'):
			model['%s_output' %scope] = model['%s_inputs' %scope]

	return model
コード例 #22
0
def record_parser_fn(value, is_training):
    """Parse an image record from `value`."""
    keys_to_features = {
          'width': tf.FixedLenFeature([], dtype=tf.int64, default_value=0),
          'height': tf.FixedLenFeature([], dtype=tf.int64, default_value=0),
          'image': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
          'label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
          'name': tf.FixedLenFeature([], dtype=tf.string, default_value='')
    }

    parsed = tf.parse_single_example(value, keys_to_features)

    image = tf.image.decode_image(tf.reshape(parsed['image'], shape=[]),
      FLAGS.image_channels)
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    
    bbox = tf.concat(axis=0, values=[ [[]], [[]], [[]], [[]] ])
    bbox = tf.transpose(tf.expand_dims(bbox, 0), [0, 2, 1])
    image = image_preprocess.preprocess_image(
        image=image,
        output_height=FLAGS.image_size,
        output_width=FLAGS.image_size,
        object_cover=0.0, 
        area_cover=0.05,
        is_training=is_training,
        bbox=bbox)

    label = tf.cast(tf.reshape(parsed['label'], shape=[]),dtype=tf.int32)
    label = tf.one_hot(label, FLAGS.class_num)    

    return image, label
コード例 #23
0
def sample_with_temperature(logits, temperature):
  """Either argmax after softmax or random sample along the pitch axis.

  Args:
    logits: a Tensor of shape (batch, time, pitch, instrument).
    temperature: a float  0.0=argmax 1.0=random

  Returns:
    a Tensor of the same shape, with one_hots on the pitch dimension.
  """
  logits = tf.transpose(logits, [0, 1, 3, 2])
  pitch_range = tf.shape(logits)[-1]

  def sample_from_logits(logits):
    with tf.control_dependencies([tf.assert_greater(temperature, 0.0)]):
      logits = tf.identity(logits)
    reshaped_logits = (
        tf.reshape(logits, [-1, tf.shape(logits)[-1]]) / temperature)
    choices = tf.multinomial(reshaped_logits, 1)
    choices = tf.reshape(choices,
                         tf.shape(logits)[:logits.get_shape().ndims - 1])
    return choices

  choices = tf.cond(tf.equal(temperature, 0.0),
                    lambda: tf.argmax(tf.nn.softmax(logits), -1),
                    lambda: sample_from_logits(logits))
  samples_onehot = tf.one_hot(choices, pitch_range)
  return tf.transpose(samples_onehot, [0, 1, 3, 2])
コード例 #24
0
 def accuracy(self):
     if self._accuracy is None:
         with tf.variable_scope('accuracy'):
             correct_predictions = tf.equal(tf.argmax(self.inference, axis=1),
                                            tf.argmax(tf.one_hot(self.targets, depth=self.n_classes), axis=1))
             self._accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
     return self._accuracy
コード例 #25
0
def parse_record(raw_record):
  """Parse CIFAR-10 image and label from a raw record."""
  # Every record consists of a label followed by the image, with a fixed number
  # of bytes for each.
  label_bytes = 1
  image_bytes = _HEIGHT * _WIDTH * _DEPTH
  record_bytes = label_bytes + image_bytes

  # Convert bytes to a vector of uint8 that is record_bytes long.
  record_vector = tf.decode_raw(raw_record, tf.uint8)

  # The first byte represents the label, which we convert from uint8 to int32
  # and then to one-hot.
  label = tf.cast(record_vector[0], tf.int32)
  label = tf.one_hot(label, _NUM_CLASSES)

  # The remaining bytes after the label represent the image, which we reshape
  # from [depth * height * width] to [depth, height, width].
  depth_major = tf.reshape(
      record_vector[label_bytes:record_bytes], [_DEPTH, _HEIGHT, _WIDTH])

  # Convert from [depth, height, width] to [height, width, depth], and cast as
  # float32.
  image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)

  return image, label
コード例 #26
0
def discretize_uniform(x, levels, thermometer=False):
  """Discretize input into levels using uniformly distributed buckets.

  Args:
    x: Input tensor to discretize, assumed to be between (0, 1).
    levels: Number of levels to discretize into.
    thermometer: Whether to encode the discretized tensor in thermometer encoding
        (Default: False).


  Returns:
    Discretized version of x of shape [-1, height, width, channels * levels].
  """
  clipped_x = tf.clip_by_value(x, 0., 1.)
  int_x = tf.to_int32((.99999 * clipped_x) * levels)
  one_hot = tf.one_hot(
      int_x, depth=levels, on_value=1., off_value=0., dtype=tf.float32, axis=-1)

  # Check to see if we are encoding in thermometer
  discretized_x = one_hot
  if thermometer:
    discretized_x = one_hot_to_thermometer(one_hot, levels, flattened=False)

  # Reshape x to [-1, height, width, channels * levels]
  discretized_x = flatten_last(discretized_x)
  return discretized_x
コード例 #27
0
 def loss(self):
     if self._loss is None:
         with tf.variable_scope('loss'):
             predictions = self.inference
             targets_onehot = tf.one_hot(self.targets, depth=self.n_classes)
             self._loss = tf.reduce_mean(-tf.reduce_sum(targets_onehot * tf.log(predictions + EPS), reduction_indices=1))
     return self._loss
コード例 #28
0
def discretize_centroids(x, levels, centroids, thermometer=False):
  """Discretize input into levels using custom centroids.

  Args:
    x: Input tensor to discretize, assumed to be between (0, 1).
    levels: Number of levels to discretize into.
    centroids: Custom centroids into which the input is to be discretized.
    thermometer: Whether to encode the discretized tensor in thermometer encoding
        (Default: False).

  Returns:
    Discretized version of x of shape [-1, height, width, channels * levels]
    using supplied centroids.
  """
  x_stacked = tf.stack(levels * [x], axis=-1)
  dist = tf.to_float(tf.squared_difference(x_stacked, centroids))
  idx = tf.argmin(dist, axis=-1)
  one_hot = tf.one_hot(idx, depth=levels, on_value=1., off_value=0.)

  # Check to see if we are encoding in thermometer
  discretized_x = one_hot
  if thermometer:
    discretized_x = one_hot_to_thermometer(one_hot, levels, flattened=False)

  # Reshape x to [-1, height, width, channels * levels]
  discretized_x = flatten_last(discretized_x)
  return discretized_x
コード例 #29
0
def thermometer_to_one_hot(x, levels, flattened=False):
  """Convert thermometer to one hot code.

  Args:
    x: Input tensor in thermometer encoding to convert to one-hot. Input is
        assumed to be
        of shape [-1, height, width, channels, levels].
    levels: Number of levels the input has been discretized into.
    flattened: True if x is of the form [-1, height, width, channels * levels]
        else it is of shape [-1, height, width, channels, levels].
        (Default: False).

  Returns:
    One hot encoding of x.
  """
  # Unflatten if flattened, so that x has shape
  # [-1, height, width, channels, levels]
  if flattened:
    x = unflatten_last(x, levels)
  int_x = tf.to_int32(tf.reduce_sum(x, axis=-1)) - 1
  one_hot = tf.one_hot(
      int_x, depth=levels, on_value=1., off_value=0., dtype=tf.float32, axis=-1)
  # Flatten back if input was flattened
  if flattened:
    one_hot = flatten_last(one_hot)
  return one_hot
コード例 #30
0
def one_hot_matrix(labels, C):
    """
    Creates a matrix where the i-th row corresponds to the ith class number and the jth column
                     corresponds to the jth training example. So if example j had a label i. Then entry (i,j) 
                     will be 1. 
                     
    Arguments:
    labels -- vector containing the labels 
    C -- number of classes, the depth of the one hot dimension
    
    Returns: 
    one_hot -- one hot matrix
    """
    
    
    # Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)
    C = tf.constant(C, name = "C")
    
    # Use tf.one_hot, be careful with the axis (approx. 1 line)
    one_hot_matrix = tf.one_hot(labels, C)
    
    # Create the session (approx. 1 line)
    sess = tf.Session()
    
    # Run the session (approx. 1 line)
    one_hot = sess.run(one_hot_matrix).T
    
    # Close the session (approx. 1 line). See method 1 above.
    session.close()
    
    ### END CODE HERE ###
    
    return one_hot
コード例 #31
0
def add_count(img, label, max_count=0):
    return img, (label, tf.one_hot(tf.math.reduce_sum(label, axis=1),
                                   max_count))
コード例 #32
0
ファイル: vectorCapsNet.py プロジェクト: jkronen/CapsLayer
    def __init__(self, height=28, width=28, channels=1, num_label=1):
        '''
        Args:
            height: ...
            width: ...
            channels: ...
        '''
        self.height = height
        self.width = width
        self.channels = channels
        self.num_label = num_label

        self.graph = tf.Graph()
        with self.graph.as_default():
            if cfg.is_training:
                self.X, self.labels = get_batch_data(cfg.dataset,
                                                     cfg.batch_size,
                                                     cfg.num_threads)
                self.x = tf.reshape(self.X,
                                    shape=[
                                        cfg.batch_size, self.height,
                                        self.width, self.channels
                                    ])
                self.Y = tf.one_hot(self.labels,
                                    depth=self.num_label,
                                    axis=1,
                                    dtype=tf.float32)

                self.build_arch()
                self.loss()
                self._summary()

                self.global_step = tf.Variable(1,
                                               name='global_step',
                                               trainable=False)
                self.optimizer = tf.train.AdamOptimizer()
                self.train_op = self.optimizer.minimize(
                    self.loss, global_step=self.global_step)
            else:
                self.X = tf.placeholder(tf.float32,
                                        shape=(cfg.batch_size, None))
                self.x = tf.reshape(self.X,
                                    shape=[
                                        cfg.batch_size, self.height,
                                        self.width, self.channels
                                    ])
                self.labels = tf.placeholder(tf.int32,
                                             shape=(cfg.batch_size, ))
                self.Y = tf.one_hot(self.labels,
                                    depth=self.num_label,
                                    axis=1,
                                    dtype=tf.float32)
                self.build_arch()

            with tf.variable_scope('accuracy'):
                logits_idx = tf.to_int32(
                    tf.argmax(softmax(self.activation, axis=1), axis=1))
                correct_prediction = tf.equal(tf.to_int32(self.labels),
                                              logits_idx)
                self.accuracy = tf.reduce_sum(
                    tf.cast(correct_prediction, tf.float32))
                self.test_acc = tf.placeholder_with_default(tf.constant(0.),
                                                            shape=[])
コード例 #33
0
def binarize(img, label):
    return img, tf.one_hot(tf.cast(label[0], tf.uint8),
                           2,
                           on_value=0,
                           off_value=1)
コード例 #34
0
ファイル: main_fix.py プロジェクト: koso019003/AM-FM-PM
    # In the code we define two different types of inputs. 
    # * Training inputs (The stories we downloaded) (batch_size > 1 with unrolling)
    # * Validation inputs (An unseen validation dataset) (bach_size =1, no unrolling)
    # * Test inputs (New story we are going to generate) (batch_size=1, no unrolling)

    tf.reset_default_graph()

    # Training Input data.
    train_inputs, train_labels, train_masks = [], [], []
    train_labels_ohe = []
    # Defining unrolled training inputs
    for ui in range(FLAGS.seq_len):
        train_inputs.append(tf.placeholder(tf.int32, shape=[FLAGS.batch_size], name='train_inputs_%d' % ui))
        train_labels.append(tf.placeholder(tf.int32, shape=[FLAGS.batch_size], name='train_labels_%d' % ui))
        train_masks.append(tf.placeholder(tf.float32, shape=[FLAGS.batch_size], name='train_masks_%d' % ui))
        train_labels_ohe.append(tf.one_hot(train_labels[ui], vocabulary_size))

    # Validation data placeholders
    valid_inputs = tf.placeholder(tf.int32, shape=[1], name='valid_inputs')
    valid_labels = tf.placeholder(tf.int32, shape=[1], name='valid_labels')
    valid_labels_ohe = tf.one_hot(valid_labels, vocabulary_size)

    # ## Loading Word Embeddings to TensorFlow
    # We load the previously learned and stored embeddings to TensorFlow and define tensors to hold embeddings
    embed_mat = np.load(embedding_name)
    embeddings_size = embed_mat.shape[1]
    embed_init = tf.constant(embed_mat)
    embeddings = tf.Variable(embed_init, name='embeddings')

    # Defining embedding lookup operations for all the unrolled
    # trianing inputs
コード例 #35
0
def embedding_postprocessor(input_tensor,
                            use_token_type=False,
                            token_type_ids=None,
                            token_type_vocab_size=16,
                            token_type_embedding_name="token_type_embeddings",
                            use_position_embeddings=True,
                            position_embedding_name="position_embeddings",
                            initializer_range=0.02,
                            max_position_embeddings=512,
                            dropout_prob=0.1):
  """Performs various post-processing on a word embedding tensor.
  Args:
    input_tensor: float Tensor of shape [batch_size, seq_length,
      embedding_size].
    use_token_type: bool. Whether to add embeddings for `token_type_ids`.
    token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
      Must be specified if `use_token_type` is True.
    token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
    token_type_embedding_name: string. The name of the embedding table variable
      for token type ids.
    use_position_embeddings: bool. Whether to add position embeddings for the
      position of each token in the sequence.
    position_embedding_name: string. The name of the embedding table variable
      for positional embeddings.
    initializer_range: float. Range of the weight initialization.
    max_position_embeddings: int. Maximum sequence length that might ever be
      used with this model. This can be longer than the sequence length of
      input_tensor, but cannot be shorter.
    dropout_prob: float. Dropout probability applied to the final output tensor.
  Returns:
    float tensor with same shape as `input_tensor`.
  Raises:
    ValueError: One of the tensor shapes or input values is invalid.
  """
  input_shape = get_shape_list(input_tensor, expected_rank=3)
  batch_size = input_shape[0]
  seq_length = input_shape[1]
  width = input_shape[2]

  output = input_tensor

  if use_token_type:
    if token_type_ids is None:
      raise ValueError("`token_type_ids` must be specified if"
                       "`use_token_type` is True.")
    token_type_table = tf.get_variable(
        name=token_type_embedding_name,
        shape=[token_type_vocab_size, width],
        initializer=create_initializer(initializer_range))
    # This vocab will be small so we always do one-hot here, since it is always
    # faster for a small vocabulary.
    flat_token_type_ids = tf.reshape(token_type_ids, [-1])
    one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
    token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
    token_type_embeddings = tf.reshape(token_type_embeddings,
                                       [batch_size, seq_length, width])
    output += token_type_embeddings

  if use_position_embeddings:
    assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
    with tf.control_dependencies([assert_op]):
      full_position_embeddings = tf.get_variable(
          name=position_embedding_name,
          shape=[max_position_embeddings, width],
          initializer=create_initializer(initializer_range))
      # Since the position embedding table is a learned variable, we create it
      # using a (long) sequence length `max_position_embeddings`. The actual
      # sequence length might be shorter than this, for faster training of
      # tasks that do not have long sequences.
      #
      # So `full_position_embeddings` is effectively an embedding table
      # for position [0, 1, 2, ..., max_position_embeddings-1], and the current
      # sequence has positions [0, 1, 2, ... seq_length-1], so we can just
      # perform a slice.
      position_embeddings = tf.slice(full_position_embeddings, [0, 0],
                                     [seq_length, -1])
      num_dims = len(output.shape.as_list())

      # Only the last two dimensions are relevant (`seq_length` and `width`), so
      # we broadcast among the first dimensions, which is typically just
      # the batch size.
      position_broadcast_shape = []
      for _ in range(num_dims - 2):
        position_broadcast_shape.append(1)
      position_broadcast_shape.extend([seq_length, width])
      position_embeddings = tf.reshape(position_embeddings,
                                       position_broadcast_shape)
      output += position_embeddings

  output = layer_norm_and_dropout(output, dropout_prob)
  return output
コード例 #36
0
def main(_):
  tf.logging.set_verbosity(tf.logging.INFO)

  if FLAGS.input_file_processor == "run_classifier":
    processors = {
        "sst-2": rc.SST2Processor,
        "mnli": rc.MnliProcessor,
    }
  elif FLAGS.input_file_processor == "run_classifier_distillation":
    processors = {
        "sst-2": rc.SST2ProcessorDistillation,
        "mnli": rc.MNLIProcessorDistillation,
    }
  else:
    raise ValueError("Invalid --input_file_processor flag value")

  tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
                                                FLAGS.init_checkpoint)
  bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)

  tokenizer = tokenization.FullTokenizer(
      vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)

  task_name = FLAGS.task_name.lower()
  processor = processors[task_name]()
  label_list = processor.get_labels()
  num_labels = len(label_list)

  input_ids_placeholder = tf.placeholder(
      dtype=tf.int32, shape=[None, FLAGS.max_seq_length])

  bert_input_mask_placeholder = tf.placeholder(
      dtype=tf.int32, shape=[None, FLAGS.max_seq_length])

  token_type_ids_placeholder = tf.placeholder(
      dtype=tf.int32, shape=[None, FLAGS.max_seq_length])

  prob_vector_placeholder = tf.placeholder(
      dtype=tf.float32, shape=[None, num_labels])

  one_hot_input_ids = tf.one_hot(
      input_ids_placeholder, depth=bert_config.vocab_size)

  input_tensor, _ = em_util.run_one_hot_embeddings(
      one_hot_input_ids=one_hot_input_ids, config=bert_config)

  flex_input_obj, per_eg_obj, probs = em_util.model_fn(
      input_tensor=input_tensor,
      bert_input_mask=bert_input_mask_placeholder,
      token_type_ids=token_type_ids_placeholder,
      bert_config=bert_config,
      num_labels=num_labels,
      obj_type=FLAGS.obj_type,
      prob_vector=prob_vector_placeholder)

  if FLAGS.obj_type.startswith("min"):
    final_obj = -1 * flex_input_obj
  elif FLAGS.obj_type.startswith("max"):
    final_obj = flex_input_obj

  # Calculate the gradient of the final loss function with respect to
  # the one-hot input space
  grad_obj_one_hot = tf.gradients(ys=final_obj, xs=one_hot_input_ids)[0]

  # gradients with respect to position in one hot input space with 1s in it
  # this is one term in the directional derivative of HotFlip,
  # Eq1 in https://arxiv.org/pdf/1712.06751.pdf
  #
  # grad_obj_one_hot.shape = [batch_size, seq_length, vocab_size]
  # input_ids_placeholder.shape = [batch_size, seq_length]
  # original_token_gradients.shape = [batch_size, seq_length]
  original_token_gradients = tf.gather(
      params=grad_obj_one_hot,
      indices=tf.expand_dims(input_ids_placeholder, -1),
      batch_dims=2)
  original_token_gradients = tf.tile(
      original_token_gradients, multiples=[1, 1, FLAGS.beam_size])

  # These are the gradients / indices whose one-hot position has the largest
  # gradient magnitude, the performs part of the max calculation in Eq10 of
  # https://arxiv.org/pdf/1712.06751.pdf
  biggest_gradients, biggest_indices = tf.nn.top_k(
      input=grad_obj_one_hot, k=FLAGS.beam_size)

  # Eq10 of https://arxiv.org/pdf/1712.06751.pdf
  grad_difference = biggest_gradients - original_token_gradients

  tvars = tf.trainable_variables()

  assignment_map, _ = modeling.get_assignment_map_from_checkpoint(
      tvars, FLAGS.init_checkpoint)

  tf.logging.info("Variables mapped = %d / %d", len(assignment_map), len(tvars))

  tf.train.init_from_checkpoint(FLAGS.init_checkpoint, assignment_map)

  sess = tf.Session()
  sess.run(tf.global_variables_initializer())

  if FLAGS.input_file:
    custom_examples = processor.get_custom_examples(FLAGS.input_file)
    custom_templates = [
        em_util.input_to_template(x, label_list) for x in custom_examples
    ]
  else:
    prob_vector = [float(x) for x in FLAGS.prob_vector.split(",")]
    custom_templates = [(FLAGS.input_template, prob_vector)]

  num_input_sequences = custom_templates[0][0].count("[SEP]")

  if FLAGS.flipping_mode == "beam_search":
    FLAGS.batch_size = 1

  detok_partial = functools.partial(em_util.detokenize, tokenizer=tokenizer)

  # Since input files will often be quite large, this flag allows processing
  # only a slice of the input file
  if FLAGS.input_file_range:
    start_index, end_index = FLAGS.input_file_range.split("-")
    if start_index == "start":
      start_index = 0
    if end_index == "end":
      end_index = len(custom_templates)
    start_index, end_index = int(start_index), int(end_index)
  else:
    start_index = 0
    end_index = len(custom_templates)

  tf.logging.info("Processing examples in range %d, %d", start_index, end_index)

  all_elements = []

  too_long = 0

  for ip_num, (ip_template, prob_vector) in enumerate(
      custom_templates[start_index:end_index]):
    # Parse the input template into a list of IDs and the corresponding mask.
    # Different segments in template are separated by " <piece> "
    # Each segment is associated with a word piece (or [EMPTY] to get flex
    # inputs) and a frequency. (which is separated by "<freq>"). * can be used
    # to choose a frequency till the end of the string
    #
    # Here is an example 2-sequence template for tasks like MNLI to optimize
    # 20 vectors, (10 for each sequence)
    # [CLS]<freq>1 <piece> [EMPTY]<freq>10 <piece> [SEP]<freq>1 <piece> \
    # [EMPTY]<freq>10 <piece> [SEP]<freq>1 <piece> [PAD]<freq>*
    (input_ids, input_mask, bert_input_mask,
     token_type_ids) = em_util.template_to_ids(
         template=ip_template,
         config=bert_config,
         tokenizer=tokenizer,
         max_seq_length=FLAGS.max_seq_length)

    if len(input_ids) > FLAGS.max_seq_length:
      # truncate them!
      input_ids = input_ids[:FLAGS.max_seq_length]
      input_mask = input_mask[:FLAGS.max_seq_length]
      bert_input_mask = bert_input_mask[:FLAGS.max_seq_length]
      token_type_ids = token_type_ids[:FLAGS.max_seq_length]
      too_long += 1

    all_elements.append({
        "input_ids": input_ids,
        "original_input_ids": [ii for ii in input_ids],
        "ip_num": start_index + ip_num,
        "score": 0.0,
        "bert_input_mask": bert_input_mask,
        "input_mask": input_mask,
        "token_type_ids": token_type_ids,
        "prob_vector": prob_vector,
        "stopped": False,
        "steps_taken": 0
    })

  tf.logging.info("%d / %d were too long and hence truncated.", too_long,
                  len(all_elements))

  iteration_number = 0
  consistent_output_sequences = []

  while all_elements and iteration_number < 10:

    steps_taken = []
    output_sequences = []
    failures = []
    zero_step_instances = 0

    iteration_number += 1
    tf.logging.info("Starting iteration number %d", iteration_number)
    tf.logging.info("Pending items = %d / %d", len(all_elements),
                    len(custom_templates[start_index:end_index]))

    batch_elements = []
    for ip_num, input_object in enumerate(all_elements):
      batch_elements.append(input_object)
      # wait until the input has populated up to the batch size
      if (len(batch_elements) < FLAGS.batch_size and
          ip_num < len(all_elements) - 1):
        continue

      # optimize a part of the flex_input (depending on the template)
      for step_num in range(FLAGS.total_steps):
        feed_dict = {
            input_ids_placeholder:
                np.array([x["input_ids"] for x in batch_elements]),
            bert_input_mask_placeholder:
                np.array([x["bert_input_mask"] for x in batch_elements]),
            token_type_ids_placeholder:
                np.array([x["token_type_ids"] for x in batch_elements]),
            prob_vector_placeholder:
                np.array([x["prob_vector"] for x in batch_elements])
        }

        if FLAGS.flipping_mode == "random":
          # Avoiding the gradient computation when the flipping mode is random
          peo, pr = sess.run([per_eg_obj, probs], feed_dict=feed_dict)
        else:
          peo, gd, bi, pr = sess.run(
              [per_eg_obj, grad_difference, biggest_indices, probs],
              feed_dict=feed_dict)

        if FLAGS.print_flips:
          output_log = "\n" + "\n".join([
              "Objective = %.4f, Score = %.4f, Element %d = %s" %
              (obj, elem["score"], kk, detok_partial(elem["input_ids"]))
              for kk, (obj, elem) in enumerate(zip(peo, batch_elements))
          ])
          tf.logging.info("Step = %d %s\n", step_num, output_log)

        should_stop = evaluate_stopping(
            stopping_criteria=FLAGS.stopping_criteria,
            obj_prob_vector=np.array([x["prob_vector"] for x in batch_elements
                                     ]),
            curr_prob_vector=pr,
            per_example_objective=peo)

        for elem, stop_bool in zip(batch_elements, should_stop):
          if stop_bool and (not elem["stopped"]):
            if step_num == 0:
              # don't actually stop the perturbation since we want a new input
              zero_step_instances += 1
            else:
              elem["stopped"] = True
              elem["steps_taken"] = step_num

        if np.all([elem["stopped"] for elem in batch_elements]):
          steps_taken.extend([elem["steps_taken"] for elem in batch_elements])
          output_sequences.extend([elem for elem in batch_elements])
          batch_elements = []
          break

        if step_num == FLAGS.total_steps - 1:
          failures.extend(
              [elem for elem in batch_elements if not elem["stopped"]])
          steps_taken.extend([
              elem["steps_taken"] for elem in batch_elements if elem["stopped"]
          ])
          output_sequences.extend(
              [elem for elem in batch_elements if elem["stopped"]])
          batch_elements = []
          break

        # Flip a token / word-piece either systematically or randomly
        # For instances where hotflip was not successful, do some random
        # perturbations before doing hotflip
        if (FLAGS.flipping_mode == "random" or
            (iteration_number > 1 and step_num < iteration_number)):
          for element in batch_elements:
            # don't perturb elements which have stopped
            if element["stopped"]:
              continue

            random_seq_index = np.random.choice([
                ii for ii, mask_id in enumerate(element["input_mask"])
                if mask_id > 0.5
            ])

            random_token_id = np.random.randint(len(tokenizer.vocab))
            while (tokenizer.inv_vocab[random_token_id][0] == "[" and
                   tokenizer.inv_vocab[random_token_id][-1] == "]"):
              random_token_id = np.random.randint(len(tokenizer.vocab))

            element["input_ids"][random_seq_index] = random_token_id

        elif FLAGS.flipping_mode == "greedy":
          batch_elements = greedy_updates(
              old_elements=batch_elements,
              grad_difference=gd,
              biggest_indices=bi,
              max_seq_length=FLAGS.max_seq_length)

        elif FLAGS.flipping_mode == "beam_search":
          # only supported with a batch size of 1!
          batch_elements = beam_search(
              old_beams=batch_elements,
              grad_difference=gd,
              biggest_indices=bi,
              beam_size=FLAGS.beam_size,
              accumulate_scores=FLAGS.accumulate_scores,
              max_seq_length=FLAGS.max_seq_length)

        else:
          raise ValueError("Invalid --flipping_mode flag value")

      tf.logging.info("steps = %.4f (%d failed, %d non-zero, %d zero)",
                      np.mean([float(x) for x in steps_taken if x > 0]),
                      len(failures), len([x for x in steps_taken if x > 0]),
                      zero_step_instances)

    # measure consistency of final dataset - run a forward pass through the
    # entire final dataset and verify it satisfies the original objective. This
    # if the code runs correctly, total_inconsistent = 0
    tf.logging.info("Measuring consistency of final dataset")

    total_inconsistent = 0
    total_lossy = 0

    for i in range(0, len(output_sequences), FLAGS.batch_size):
      batch_elements = output_sequences[i:i + FLAGS.batch_size]
      feed_dict = {
          input_ids_placeholder:
              np.array([x["input_ids"] for x in batch_elements]),
          bert_input_mask_placeholder:
              np.array([x["bert_input_mask"] for x in batch_elements]),
          token_type_ids_placeholder:
              np.array([x["token_type_ids"] for x in batch_elements]),
          prob_vector_placeholder:
              np.array([x["prob_vector"] for x in batch_elements])
      }
      peo, pr = sess.run([per_eg_obj, probs], feed_dict=feed_dict)
      consistency_flags = evaluate_stopping(
          stopping_criteria=FLAGS.stopping_criteria,
          obj_prob_vector=np.array([x["prob_vector"] for x in batch_elements]),
          curr_prob_vector=pr,
          per_example_objective=peo)
      total_inconsistent += len(batch_elements) - np.sum(consistency_flags)

      # Next, apply a lossy perturbation to the input (conversion to a string)
      # This is often lossy since it eliminates impossible sequences and
      # incorrect tokenizations. We check how many consistencies still hold true
      all_detok_strings = [
          em_util.ids_to_strings(elem["input_ids"], tokenizer)
          for elem in batch_elements
      ]

      all_ip_examples = []
      if num_input_sequences == 1:
        for ds, be in zip(all_detok_strings, batch_elements):
          prob_vector_labels = be["prob_vector"].tolist()
          all_ip_examples.append(
              rc.InputExample(
                  text_a=ds[0],
                  text_b=None,
                  label=prob_vector_labels,
                  guid=None))
      else:
        for ds, be in zip(all_detok_strings, batch_elements):
          prob_vector_labels = be["prob_vector"].tolist()
          all_ip_examples.append(
              rc.InputExample(
                  text_a=ds[0],
                  text_b=ds[1],
                  label=prob_vector_labels,
                  guid=None))

      all_templates = [
          em_util.input_to_template(aie, label_list) for aie in all_ip_examples
      ]
      all_new_elements = []
      for ip_template, prob_vector in all_templates:
        (input_ids, input_mask, bert_input_mask,
         token_type_ids) = em_util.template_to_ids(
             template=ip_template,
             config=bert_config,
             tokenizer=tokenizer,
             max_seq_length=FLAGS.max_seq_length)

        if len(input_ids) > FLAGS.max_seq_length:
          input_ids = input_ids[:FLAGS.max_seq_length]
          input_mask = input_mask[:FLAGS.max_seq_length]
          bert_input_mask = bert_input_mask[:FLAGS.max_seq_length]
          token_type_ids = token_type_ids[:FLAGS.max_seq_length]

        all_new_elements.append({
            "input_ids": input_ids,
            "input_mask": input_mask,
            "bert_input_mask": bert_input_mask,
            "token_type_ids": token_type_ids,
            "prob_vector": prob_vector
        })
      feed_dict = {
          input_ids_placeholder:
              np.array([x["input_ids"] for x in all_new_elements]),
          bert_input_mask_placeholder:
              np.array([x["bert_input_mask"] for x in all_new_elements]),
          token_type_ids_placeholder:
              np.array([x["token_type_ids"] for x in all_new_elements]),
          prob_vector_placeholder:
              np.array([x["prob_vector"] for x in all_new_elements])
      }
      peo, pr = sess.run([per_eg_obj, probs], feed_dict=feed_dict)
      lossy_consistency_flags = evaluate_stopping(
          stopping_criteria=FLAGS.stopping_criteria,
          obj_prob_vector=np.array([x["prob_vector"] for x in all_new_elements
                                   ]),
          curr_prob_vector=pr,
          per_example_objective=peo)

      total_lossy += len(all_new_elements) - np.sum(lossy_consistency_flags)

      net_consistency_flags = np.logical_and(consistency_flags,
                                             lossy_consistency_flags)

      for elem, ncf in zip(batch_elements, net_consistency_flags):
        if ncf:
          consistent_output_sequences.append(elem)
        else:
          failures.append(elem)

    tf.logging.info("Total inconsistent found = %d / %d", total_inconsistent,
                    len(output_sequences))
    tf.logging.info("Total lossy inconsistent found = %d / %d", total_lossy,
                    len(output_sequences))
    tf.logging.info("Total consistent outputs so far = %d / %d",
                    len(consistent_output_sequences),
                    len(custom_templates[start_index:end_index]))

    # Getting ready for next iteration of processing
    if iteration_number < 10:
      for elem in failures:
        elem["input_ids"] = [x for x in elem["original_input_ids"]]
        elem["stopped"] = False
        elem["steps_taken"] = 0
        elem["score"] = 0.0
      all_elements = failures

  tf.logging.info("Giving up on %d instances!", len(failures))
  for elem in failures:
    consistent_output_sequences.append(elem)

  if FLAGS.output_file:
    final_output = []
    for op_num, elem in enumerate(consistent_output_sequences):
      detok_strings = em_util.ids_to_strings(elem["input_ids"], tokenizer)

      if num_input_sequences == 1:
        final_output.append("%d\t%d\t%s" %
                            (op_num, elem["ip_num"], detok_strings[0]))
      elif num_input_sequences == 2:
        final_output.append(
            "%d\t%d\t%s\t%s" %
            (op_num, elem["ip_num"], detok_strings[0], detok_strings[1]))

    if num_input_sequences == 1:
      header = "index\toriginal_index\tsentence"
    elif num_input_sequences == 2:
      header = "index\toriginal_index\tsentence1\tsentence2"

    final_output = [header] + final_output

    with tf.gfile.Open(FLAGS.output_file, "w") as f:
      f.write("\n".join(final_output) + "\n")

  return
コード例 #37
0
    def get_train_valid_test(self):

        split_mask = self.split_mask
        batch_size = self.batch_size
        pca_components = self.pca_components
        img_size = self.image_size

        ##-------------
        print('train data shape before pca:[%d,%d,%d]' %
              (self.hsi_img.shape[0], self.hsi_img.shape[1],
               self.hsi_img.shape[2]))
        ##-------------

        # PCA the data
        pca = PCA(n_components=pca_components)
        pca_hsi_img = pca.fit_transform(
            np.reshape(self.hsi_img, [-1, self.hsi_img.shape[2]]))
        pca_hsi_img = np.reshape(
            pca_hsi_img, [self.hsi_img.shape[0], self.hsi_img.shape[1], -1])

        #construct the img patches

        r = self.image_size // 2
        print('half of the window:', r)

        new_pca_hsi_img = np.zeros([
            split_mask.shape[0], split_mask.shape[1],
            img_size * img_size * pca_components
        ])

        for i in range(split_mask.shape[0] - 2 * r):
            for j in range(split_mask.shape[1] - 2 * r):
                new_pca_hsi_img[i + r, j + r, :] = np.reshape(
                    pca_hsi_img[i:i + 2 * r + 1, j:j + 2 * r + 1], [
                        -1,
                    ])

        self.img_size_flat = new_pca_hsi_img.shape[2]
        #cut edge
        split_mask = split_mask[2:split_mask.shape[0] - 2,
                                2:split_mask.shape[1] - 2]
        gnd_img = self.gnd_img[2:self.gnd_img.shape[0] - 2,
                               2:self.gnd_img.shape[1] - 2]
        new_pca_hsi_img = new_pca_hsi_img[2:new_pca_hsi_img.shape[0] - 2,
                                          2:new_pca_hsi_img.shape[1] - 2]

        print('origin hsi image shape:', pca_hsi_img.shape)
        print('new patches image :', new_pca_hsi_img.shape)
        print('new mask image :', split_mask.shape)
        print('new grandtruth image :', gnd_img.shape)

        train_data_x = new_pca_hsi_img[split_mask == 'train']
        train_data_y = gnd_img[split_mask == 'train']

        ##-------------
        print('train data shape after pca:', train_data_x.shape)
        print('train data labels after pca:', train_data_y.shape)
        ##-------------

        valid_data_x = new_pca_hsi_img[split_mask == 'valid']
        valid_data_y = gnd_img[split_mask == 'valid']

        test_data_x = new_pca_hsi_img[split_mask == 'tests']
        test_data_y = gnd_img[split_mask == 'tests']

        print('origin train pixels :%d, origin valid pixels:%d, origin tests pixels:%d'\
              % (train_data_x.shape[0], valid_data_x.shape[0], test_data_x.shape[0]))

        # tackle the batch size mismatch problem
        mis_match = train_data_x.shape[0] % batch_size
        if mis_match != 0:
            mis_match = batch_size - mis_match

            train_data_x = np.vstack(
                (train_data_x, train_data_x[0:mis_match, :]))
            train_data_y = np.hstack((train_data_y, train_data_y[0:mis_match]))

        mis_match = valid_data_x.shape[0] % batch_size
        if mis_match != 0:
            mis_match = batch_size - mis_match
            valid_data_x = np.vstack(
                (valid_data_x, valid_data_x[0:mis_match, :]))
            valid_data_y = np.hstack((valid_data_y, valid_data_y[0:mis_match]))

        mis_match = test_data_x.shape[0] % batch_size
        if mis_match != 0:
            mis_match = batch_size - mis_match
            test_data_x = np.vstack((test_data_x, test_data_x[0:mis_match, :]))
            test_data_y = np.hstack((test_data_y, test_data_y[0:mis_match]))

        print('modified train pixels:%d, modified valid pixels:%d, modified tests pixels:%d'\
              % (train_data_x.shape[0], valid_data_x.shape[0], test_data_x.shape[0]))

        # modify the data to 4D tensor, labels to one-hot labels
        print('origin train label index one: %d' % train_data_y[0])
        train_data_y = tf.one_hot(train_data_y,
                                  self.class_number,
                                  on_value=None,
                                  off_value=None,
                                  axis=-1,
                                  dtype=None,
                                  name=None)
        print('one hot train label index one: ',
              (tf.Session().run(train_data_y[0])))
        print('train_set labels shape:', train_data_y.shape)

        valid_data_y = tf.one_hot(valid_data_y,
                                  self.class_number,
                                  on_value=None,
                                  off_value=None,
                                  axis=-1,
                                  dtype=None,
                                  name=None)

        test_data_y = tf.one_hot(test_data_y,
                                 self.class_number,
                                 on_value=None,
                                 off_value=None,
                                 axis=-1,
                                 dtype=None,
                                 name=None)

        return [
            train_data_x,
            tf.Session().run(train_data_y), valid_data_x,
            tf.Session().run(valid_data_y), test_data_x,
            tf.Session().run(test_data_y)
        ]
コード例 #38
0
ファイル: agent423.py プロジェクト: ShirMaimon/NetHack
    'move_S', 'move_SE', 'move_W', 'move_NW', 'rest', 'open_door', 'search',
    'kick'
]
#batchS = 4
n_input = (RADIUS * 2 + 1) * (RADIUS * 2 + 1
                              )  # MNIST data input (img shape: 28*28)
n_action = len(actions)
n_item = 18
n_hidden_1 = n_input * 2  # 1st layer number of features
n_hidden_2 = n_input * 2  # 2nd layer number of features
n_rec = n_input

tf.reset_default_graph()

x = tf.placeholder(shape=[None, n_input], dtype=tf.int32)
x_one_hot = tf.one_hot(x, n_item)
rnn_input = tf.unstack(x_one_hot, axis=1)
Wr = tf.Variable(tf.random_normal([n_rec, n_hidden_1]))
# W1 = tf.Variable(tf.random_normal([n_rec,n_hidden_1]))
W2 = tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]))
W3 = tf.Variable(tf.random_normal([n_hidden_2, n_action]))
layer_rec = tf.contrib.rnn.BasicLSTMCell(n_rec)
rnn_output, final_state = tf.contrib.rnn.static_rnn(layer_rec,
                                                    rnn_input,
                                                    dtype=tf.float32)
layer_1 = tf.matmul(rnn_output[-1], Wr)
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.matmul(layer_1, W2)
layer_2 = tf.nn.relu(layer_2)
Qout = tf.matmul(layer_2, W3)
predictList = tf.nn.softmax(Qout)
コード例 #39
0
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME')


#Note:  tf.name_scope is used for structuring the graph
#Note: learningrate should be around 1e-4....1e-6
#Note: Network is taken from https://www.tensorflow.org/get_started/mnist/pros
with tf.name_scope('Network'):

    with tf.name_scope('input'):
        x_image = tf.placeholder(tf.float32,
                                 [None, image_size, image_size, col_channels],
                                 name='Images_raw')
        y_raw = tf.placeholder(tf.int32, [None], name='Labels_raw')
        y_ = tf.one_hot(indices=y_raw, depth=43, name='Labels_oneHot')

    with tf.name_scope('learningrate'):
        learningrate = tf.placeholder(tf.float32)

    with tf.name_scope('Layer1'):
        W_conv1 = initVariable("W1", [5, 5, 3, 32])
        b_conv1 = initVariable("B1", [32])
        h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
        h_pool1 = max_pool_2x2(h_conv1)  #resulting feature maps = 24x24 Pixel

    with tf.name_scope('Layer2'):
        W_conv2 = initVariable("W2", [5, 5, 32, 64])
        b_conv2 = initVariable("B2", [64])
        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
        h_pool2 = max_pool_2x2(h_conv2)  #resulting feature maps = 12x12 Pixel
コード例 #40
0
ファイル: models.py プロジェクト: welbertrana/logistic_lda
def mlp(features, labels, mode, params):
    """
  Model function implementing a simple MLP which can be used for topic modeling.

  Args:
    features['embedding']: A tensor of shape [B, D]
    features['author_topic']: A tensor of shape [B] containing author labels as strings
    features['item_topic']: An tensor of shape [B] containing item labels (used in PREDICT only)
    labels: This will be ignored as labels are provided via `features`
    mode: Estimator's `ModeKeys`
    params['meta_info']['topics']: A list of strings of all possible topics
    params['hidden_units']: A list of integers describing the number of hidden units
    params['learning_rate']: Learning rate used with Adam
    params['decay_rate']: Exponential learning rate decay parameter
    params['decay_steps']: Exponential learning rate decay parameter
    params['embedding']: A function which preprocesses features

  Returns:
    A `tf.estimator.EstimatorSpec`
  """

    n_topics = len(params['meta_info']['topics'])

    # preprocess features (e.g., compute embeddings from words)
    features = params['embedding'](features)

    # convert string labels to integers
    topic_table = create_table(params['meta_info']['topics'])
    author_topics = topic_table.lookup(features['author_topic'])

    net = features['embedding']
    for units in params['hidden_units']:
        net = tf.layers.dense(
            net,
            units=units,
            activation=tf.nn.relu,
            kernel_regularizer=tf.contrib.layers.l2_regularizer(
                params['model_regularization']))

    logits = tf.layers.dense(
        net,
        n_topics,
        activation=None,
        kernel_regularizer=tf.contrib.layers.l2_regularizer(
            params['model_regularization']))

    if mode == tf.estimator.ModeKeys.PREDICT:
        probs = tf.reduce_max(tf.nn.softmax(logits), 1)
        predictions = tf.argmax(logits, 1)
        predictions = {
            'item_id': features['item_id'],
            'item_prediction': predictions,
            'item_probability': probs,
            'item_topic': topic_table.lookup(features['item_topic']),
            'author_id': features['author_id'],
            'author_prediction': predictions,
            'author_probability': probs,
            'author_topic': author_topics,
        }

        return tf.estimator.EstimatorSpec(mode, predictions=predictions)

    # model is trained to predict which topic an author belongs to
    loss = tf.reduce_mean(
        softmax_cross_entropy(targets=tf.one_hot(author_topics,
                                                 depth=n_topics),
                              logits=logits))

    tf.summary.scalar('loss', loss)

    if mode == tf.estimator.ModeKeys.EVAL:
        accuracy, acc_op = tf.metrics.accuracy(labels=author_topics,
                                               predictions=tf.argmax(
                                                   logits, 1),
                                               name='acc_op')
        metric_ops = {'accuracy': (accuracy, acc_op)}
        return tf.estimator.EstimatorSpec(mode,
                                          loss=loss,
                                          eval_metric_ops=metric_ops)

    optimizer = tf.train.AdamOptimizer(
        learning_rate=tf.train.exponential_decay(
            learning_rate=params['learning_rate'],
            decay_rate=params['decay_rate'],
            decay_steps=params['decay_steps'],
            global_step=tf.train.get_global_step()))
    train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())

    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
コード例 #41
0
ファイル: models.py プロジェクト: welbertrana/logistic_lda
def logistic_lda(features, labels, mode, params):
    """
  An implementation of logistic LDA.

  Args:
    features['embedding']: A tensor of shape [B, D]
    features['author_topic']: A tensor of shape [B] containing author labels as strings
    features['author_id']: A tensor of shape [B] containing integer IDs
    features['item_topic']: A tensor of shape [B] containing item labels (use '' if unknown)
    features['item_id']: A tensor of shape [B] containing integer IDs
    labels: This will be ignored as labels are provided via `features`
    mode: Estimator's `ModeKeys`
    params['meta_info']['topics']: A list of strings of all possible topics
    params['meta_info']['author_ids']: A list of all possible author IDs (these IDs group items)
    params['hidden_units']: A list of integers describing the number of hidden units
    params['learning_rate']: Learning rate used with Adam
    params['decay_rate']: Exponential learning rate decay parameter
    params['decay_steps']: Exponential learning rate decay parameter
    params['author_topic_weight']: Controls how much author labels influence the model
    params['author_topic_iterations']: Number of iterations to infer missing author labels
    params['model_regularization']: Regularize model to make use of as many topics as possible
    params['items_per_author']: For simplicity, model assumes this many items per author
    params['alpha']: Smoothes topic distributions of authors
    params['embedding']: A function which preprocesses features
  """

    if params['author_topic_iterations'] < 1:
        raise ValueError('`author_topic_iterations` should be larger than 0.')

    n_authors = len(params['meta_info']['author_ids'])
    n_topics = len(params['meta_info']['topics'])

    with tf.name_scope('preprocessing'):
        # lookup table which maps topics to indices and missing topics to -1
        topic_table = create_table(keys=params['meta_info']['topics'] + [''],
                                   values=list(range(n_topics)) + [-1],
                                   name='topic_table')

        # convert string labels to integers
        author_topics = topic_table.lookup(features['author_topic'])
        item_topics = topic_table.lookup(features['item_topic'])

        # convert author IDs to low integers
        author_table = create_table(keys=np.asarray(
            params['meta_info']['author_ids'], dtype=np.int64),
                                    name='author_table')
        author_ids = tf.squeeze(author_table.lookup(features['author_id']))

    # preprocess features (e.g., compute embeddings from words)
    with tf.name_scope('embedding'):
        features = params['embedding'](features)

    # predict topics from items
    net = features['embedding']
    for units in params['hidden_units']:
        net = tf.layers.dense(net, units=units, activation=tf.nn.relu)

    with tf.name_scope('variational_inference'):
        # keeps track of topic counts per user
        topic_counts_var = tf.get_variable('topic_counts',
                                           shape=[n_authors, n_topics],
                                           dtype=tf.float32,
                                           initializer=tf.ones_initializer,
                                           trainable=False,
                                           use_resource=True)

        # keeps track of predicted topic distributions across all items
        topic_dist_total_var = tf.get_variable(
            'topic_dist_total',
            shape=[1, n_topics],
            initializer=tf.constant_initializer(1.0 / n_topics,
                                                dtype=tf.float32),
            trainable=False,
            use_resource=True)

        # expected topic counts for each author
        topic_counts = tf.gather(topic_counts_var, author_ids)

        author_topics_onehot = tf.one_hot(tf.squeeze(author_topics), n_topics)
        author_topics_prediction = tf.ones_like(
            author_topics_onehot) / n_topics

        # infer missing author topics
        for _ in range(params['author_topic_iterations']):
            if params['use_author_topics']:
                # where available, use ground truth instead of predictions
                author_topics_prediction = tf.where(author_topics < 0,
                                                    author_topics_prediction,
                                                    author_topics_onehot)

            # update beliefs over author's topic distribution
            author_alpha = params['alpha'] + topic_counts + params[
                'author_topic_weight'] * author_topics_prediction
            topic_biases = tf.digamma(author_alpha)

            # update predictions of author topics
            author_topics_prediction = tf.nn.softmax(
                params['author_topic_weight'] * topic_biases)

        logits = tf.layers.dense(net, n_topics, activation=None)  # BxK
        logits_biased = logits + topic_biases

        # probability of each topic
        probs = tf.nn.softmax(logits)
        probs_biased = tf.nn.softmax(logits_biased)

        if mode == tf.estimator.ModeKeys.PREDICT:
            if params['author_topic_weight'] < 1e-8:
                author_topics_prediction = tf.nn.softmax(1e-8 * topic_biases)

            predictions = {
                'item_id': features.get('item_id',
                                        tf.zeros_like(author_ids) - 1),
                'item_prediction': tf.argmax(logits_biased, 1),
                'item_probability': tf.reduce_max(probs_biased, 1),
                'item_topic': item_topics,
                'author_id': author_ids,
                'author_prediction': tf.argmax(author_topics_prediction, 1),
                'author_probability': tf.reduce_max(author_topics_prediction,
                                                    1),
                'author_topic': author_topics,
            }
            return tf.estimator.EstimatorSpec(mode, predictions=predictions)

        # model is regularized to predict these topics
        expected_topics = (probs + 1e-6) / (topic_dist_total_var +
                                            1e-6) / n_topics

        # the unbiased model tries to predict the biased topics
        loss = tf.reduce_mean(
            softmax_cross_entropy(
                targets=tf.stop_gradient(probs_biased +
                                         params['model_regularization'] *
                                         expected_topics),
                logits=logits))

        tf.summary.scalar('cross_entropy', loss)

        # compute upper bound on the KL divergence (up to a constant)
        with tf.name_scope('upper_bound'):
            dirichlet_entropy = tf.distributions.Dirichlet(
                author_alpha).entropy()
            dirichlet_entropy = tf.reduce_mean(
                dirichlet_entropy) / params['items_per_author']

            dirichlet_regularizer = (params['alpha'] - 1.0) * tf.reduce_sum(
                topic_biases, axis=1)
            dirichlet_regularizer = tf.reduce_mean(
                dirichlet_regularizer) / params['items_per_author']

            regularizer_entropy = tf.reduce_sum(expected_topics *
                                                tf.log(expected_topics),
                                                axis=1)
            regularizer_entropy = -tf.reduce_mean(
                regularizer_entropy) * params['model_regularization']

            logprobs_biased = logits_biased - tf.reduce_logsumexp(
                logits_biased, axis=1, keepdims=True)
            topic_entropy_plus = tf.reduce_sum(
                probs_biased * (logprobs_biased - topic_biases), axis=1)
            topic_entropy_plus = -tf.reduce_mean(topic_entropy_plus)

            loss = loss - tf.stop_gradient(dirichlet_regularizer +
                                           dirichlet_entropy +
                                           topic_entropy_plus +
                                           regularizer_entropy)

            tf.summary.scalar('upper_bound', loss)

        if mode == tf.estimator.ModeKeys.EVAL:
            # this assumes that all authors/items are labeled
            accuracy_author = tf.metrics.accuracy(labels=author_topics,
                                                  predictions=tf.argmax(
                                                      topic_counts, 1),
                                                  name='acc_op')
            accuracy_item = tf.metrics.accuracy(labels=item_topics,
                                                predictions=tf.argmax(
                                                    logits_biased, 1),
                                                name='acc_op')
            metric_ops = {
                'accuracy_author': accuracy_author,
                'accuracy_item': accuracy_item
            }

            return tf.estimator.EstimatorSpec(mode,
                                              loss=loss,
                                              eval_metric_ops=metric_ops)

        # update topic counters
        topic_counts_diff = probs_biased - topic_counts / params[
            'items_per_author']
        topic_counts_update = tf.scatter_add(topic_counts_var, author_ids,
                                             topic_counts_diff)

        # update distribution of predicted topics
        topic_dist_diff = (probs - topic_dist_total_var) / (
            params['items_per_author'] * n_authors)
        topic_dist_total_update = tf.assign_add(
            topic_dist_total_var,
            tf.reduce_sum(topic_dist_diff, axis=0, keepdims=True))

        optimizer = tf.train.AdamOptimizer(
            learning_rate=tf.train.exponential_decay(
                learning_rate=params['learning_rate'],
                decay_rate=params['decay_rate'],
                decay_steps=params['decay_steps'],
                global_step=tf.train.get_global_step()))
        train_op = optimizer.minimize(loss,
                                      global_step=tf.train.get_global_step())

        # update model parameters, topic counts, and topic distribution estimate
        train_op = tf.group(train_op, topic_counts_update,
                            topic_dist_total_update)

    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
コード例 #42
0
def build_train(make_obs_ph,
                q_func,
                num_actions,
                optimizer,
                num_heads=10,
                grad_norm_clipping=None,
                gamma=1.0,
                double_q=True,
                scope="deepq",
                reuse=None,
                param_noise=False,
                param_noise_filter_func=None):
    """Creates the train function:

    Parameters
    ----------
    make_obs_ph: str -> tf.placeholder or TfInput
        a function that takes a name and creates a placeholder of input with that name
    q_func: (tf.Variable, int, str, bool) -> tf.Variable
        the model that takes the following inputs:
            observation_in: object
                the output of observation placeholder
            num_actions: int
                number of actions
            scope: str
            reuse: bool
                should be passed to outer variable scope
        and returns a tensor of shape (batch_size, num_actions) with values of every action.
    num_actions: int
        number of actions
    reuse: bool
        whether or not to reuse the graph variables
    optimizer: tf.train.Optimizer
        optimizer to use for the Q-learning objective.
    grad_norm_clipping: float or None
        clip gradient norms to this value. If None no clipping is performed.
    gamma: float
        discount rate.
    double_q: bool
        if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
        In general it is a good idea to keep it enabled.
    scope: str or VariableScope
        optional scope for variable_scope.
    reuse: bool or None
        whether or not the variables should be reused. To be able to reuse the scope must be given.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    param_noise_filter_func: tf.Variable -> bool
        function that decides whether or not a variable should be perturbed. Only applicable
        if param_noise is True. If set to None, default_param_noise_filter is used by default.

    Returns
    -------
    act: (tf.Variable, bool, float) -> tf.Variable
        function to select and action given observation.
`       See the top of the file for details.
    train: (object, np.array, np.array, object, np.array, np.array) -> np.array
        optimize the error in Bellman's equation.
`       See the top of the file for details.
    update_target: () -> ()
        copy the parameters from optimized Q function to the target Q function.
`       See the top of the file for details.
    debug: {str: function}
        a bunch of functions to print debug data like q_values.
    """
    act_f = build_act(make_obs_ph,
                      q_func,
                      num_actions,
                      scope=scope,
                      reuse=reuse)

    with tf.variable_scope(scope, reuse=reuse):
        # set up placeholders
        obs_t_input = make_obs_ph("obs_t")
        act_t_ph = tf.placeholder(tf.int32, [None], name="action")
        rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
        head = tf.placeholder(tf.int32, [None], name="head_t")

        obs_tp1_input = make_obs_ph("obs_tp1")
        head_tp1 = tf.placeholder(tf.int32, [None], name="head_tp1")
        done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
        importance_weights_ph = tf.placeholder(tf.float32, [None],
                                               name="weight")

        # q network evaluation
        q_t = q_func(obs_t_input.get(),
                     num_actions,
                     scope="q_func",
                     reuse=True)  # reuse parameters from act
        q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                        scope=tf.get_variable_scope().name +
                                        "/q_func")

        # target q network evalution
        q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func")
        target_q_func_vars = tf.get_collection(
            tf.GraphKeys.GLOBAL_VARIABLES,
            scope=tf.get_variable_scope().name + "/target_q_func")

        # q scores for actions which we know were selected in the given state.
        one_hot_action = tf.one_hot(act_t_ph, num_actions)
        q_t_selected = tf.reduce_sum(q_t * one_hot_action, axis=2)

        # compute estimate of best possible value starting from state at t + 1
        if double_q:
            q_tp1_using_online_net = q_func(obs_tp1_input.get(),
                                            num_actions,
                                            scope="q_func",
                                            reuse=True)
            q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 2)
            q_tp1_best = tf.reduce_sum(
                q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions),
                axis=2)
        else:
            q_tp1_best = tf.reduce_max(q_tp1, 2)
        q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best

        # compute RHS of bellman equation
        q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked

        # compute the error (potentially clipped)
        q_t_selected = tf.gather(q_t_selected, head)
        q_t_selected_target = tf.gather(q_t_selected_target, head)
        td_error = tf.reduce_mean(q_t_selected -
                                  tf.stop_gradient(q_t_selected_target),
                                  axis=0)
        errors = U.huber_loss(td_error)
        weighted_error = tf.reduce_mean(importance_weights_ph * errors)

        # compute optimization op (potentially with gradient clipping)
        if grad_norm_clipping is not None:
            gradients = optimizer.compute_gradients(weighted_error,
                                                    var_list=q_func_vars)
            for i, (grad, var) in enumerate(gradients):
                if grad is not None:
                    gradients[i] = (tf.clip_by_norm(grad,
                                                    grad_norm_clipping), var)
            optimize_expr = optimizer.apply_gradients(gradients)
        else:
            optimize_expr = optimizer.minimize(weighted_error,
                                               var_list=q_func_vars)

        # update_target_fn will be called periodically to copy Q network to target Q network
        update_target_expr = []
        for var, var_target in zip(
                sorted(q_func_vars, key=lambda v: v.name),
                sorted(target_q_func_vars, key=lambda v: v.name)):
            update_target_expr.append(var_target.assign(var))
        update_target_expr = tf.group(*update_target_expr)

        # Create callable functions
        train = U.function(inputs=[
            obs_t_input, act_t_ph, rew_t_ph, head, head_tp1, obs_tp1_input,
            done_mask_ph, importance_weights_ph
        ],
                           outputs=td_error,
                           updates=[optimize_expr])

        update_target = U.function([], [], updates=[update_target_expr])

        q_values = U.function([obs_t_input], q_t)

        return act_f, train, update_target, {'q_values': q_values}
        def __graph__():
            """Build the inference graph"""
            with tf.name_scope('input'):
                # [BATCH_SIZE, SEQUENCE_LENGTH]
                x_input = tf.placeholder(dtype=tf.uint8, shape=[None, self.sequence_length], name='x_input')

                # [BATCH_SIZE, SEQUENCE_LENGTH, 10]
                x_onehot = tf.one_hot(indices=x_input, depth=10, on_value=1.0, off_value=0.0, name='x_onehot')

                # [BATCH_SIZE]
                y_input = tf.placeholder(dtype=tf.uint8, shape=[None], name='y_input')

                # [BATCH_SIZE, N_CLASSES]
                y_onehot = tf.one_hot(indices=y_input, depth=self.num_classes, on_value=1.0, off_value=-1.0,
                                      name='y_onehot')

            state = tf.placeholder(dtype=tf.float32, shape=[None, self.cell_size], name='initial_state')

            p_keep = tf.placeholder(dtype=tf.float32, name='p_keep')
            learning_rate = tf.placeholder(dtype=tf.float32, name='learning_rate')

            cell = tf.contrib.rnn.GRUCell(self.cell_size)
            drop_cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=p_keep)

            # outputs: [BATCH_SIZE, SEQUENCE_LENGTH, CELL_SIZE]
            # states: [BATCH_SIZE, CELL_SIZE]
            outputs, states = tf.nn.dynamic_rnn(drop_cell, x_onehot, initial_state=state, dtype=tf.float32)

            states = tf.identity(states, name='H')

            with tf.name_scope('final_training_ops'):
                with tf.name_scope('weights'):
                    weight = tf.get_variable('weights',
                                             initializer=tf.random_normal([self.cell_size, self.num_classes],
                                                                          stddev=0.01))
                    self.variable_summaries(weight)
                with tf.name_scope('biases'):
                    bias = tf.get_variable('biases', initializer=tf.constant(0.1, shape=[self.num_classes]))
                    self.variable_summaries(bias)
                hf = tf.transpose(outputs, [1, 0, 2])
                last = tf.gather(hf, int(hf.get_shape()[0]) - 1)
                with tf.name_scope('Wx_plus_b'):
                    output = tf.matmul(last, weight) + bias
                    tf.summary.histogram('pre-activations', output)

            # L2-SVM
            with tf.name_scope('svm'):
                regularization_loss = tf.reduce_mean(tf.square(weight))
                hinge_loss = tf.reduce_mean(
                    tf.square(tf.maximum(tf.zeros([self.batch_size, self.num_classes]), 1 - y_onehot * output)))
                with tf.name_scope('loss'):
                    loss = regularization_loss + self.svm_c * hinge_loss
            tf.summary.scalar('loss', loss)

            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)

            with tf.name_scope('accuracy'):
                predicted_class = tf.sign(output)
                predicted_class = tf.identity(predicted_class, name='prediction')
                with tf.name_scope('correct_prediction'):
                    correct = tf.equal(tf.argmax(predicted_class, 1), tf.argmax(y_onehot, 1))
                with tf.name_scope('accuracy'):
                    accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
            tf.summary.scalar('accuracy', accuracy)

            # merge all the summaries collected from the TF graph
            merged = tf.summary.merge_all()

            # set class properties
            self.x_input = x_input
            self.y_input = y_input
            self.y_onehot = y_onehot
            self.p_keep = p_keep
            self.loss = loss
            self.optimizer = optimizer
            self.state = state
            self.states = states
            self.learning_rate = learning_rate
            self.predicted_class = predicted_class
            self.accuracy = accuracy
            self.merged = merged
コード例 #44
0
ファイル: tf_ganist.py プロジェクト: mahyarkoy/ganist
	def build_graph(self):
		with tf.name_scope('ganist'):
			### define placeholders for image and label inputs **g_num** **mt**
			self.im_input = tf.placeholder(tf_dtype, [None]+self.data_dim, name='im_input')
			#self.z_input = tf.placeholder(tf_dtype, [None, self.z_dim], name='z_input')
			#self.z_input = tf.placeholder(tf_dtype, [None, 1, 1, 1], name='z_input')
			self.z_input = tf.placeholder(tf.int32, [None], name='z_input')
			self.zi_input = tf.placeholder(tf_dtype, [None, self.z_dim], name='zi_input')
			self.e_input = tf.placeholder(tf_dtype, [None, 1, 1, 1], name='e_input')
			self.train_phase = tf.placeholder(tf.bool, name='phase')

			### build generator **mt**
			self.g_layer = self.build_gen(self.z_input, self.zi_input, self.g_act, self.train_phase)
			#self.g_layer = self.build_gen_mt(self.im_input, self.z_input, self.g_act, self.train_phase)

			### build discriminator
			self.r_logits, self.r_hidden = self.build_dis(self.im_input, self.d_act, self.train_phase)
			self.g_logits, self.g_hidden = self.build_dis(self.g_layer, self.d_act, self.train_phase, reuse=True)
			self.r_en_logits = self.build_encoder(self.r_hidden, self.d_act, self.train_phase)
			self.g_en_logits = self.build_encoder(self.g_hidden, self.d_act, self.train_phase, reuse=True)

			### real gen manifold interpolation
			rg_layer = (1.0 - self.e_input) * self.g_layer + self.e_input * self.im_input
			self.rg_logits, _ = self.build_dis(rg_layer, self.d_act, self.train_phase, reuse=True)

			### build d losses
			if self.d_loss_type == 'log':
				self.d_r_loss = tf.nn.sigmoid_cross_entropy_with_logits(
						logits=self.r_logits, labels=tf.ones_like(self.r_logits, tf_dtype))
				self.d_g_loss = tf.nn.sigmoid_cross_entropy_with_logits(
						logits=self.g_logits, labels=tf.zeros_like(self.g_logits, tf_dtype))
				self.d_rg_loss = tf.nn.sigmoid_cross_entropy_with_logits(
						logits=self.rg_logits, labels=tf.ones_like(self.rg_logits, tf_dtype))
			elif self.d_loss_type == 'was':
				self.d_r_loss = -self.r_logits 
				self.d_g_loss = self.g_logits
				self.d_rg_loss = -self.rg_logits
			else:
				raise ValueError('>>> d_loss_type: %s is not defined!' % self.d_loss_type)

			### gradient penalty
			### NaN free norm gradient
			rg_grad = tf.gradients(self.rg_logits, rg_layer)
			rg_grad_flat = tf.reshape(rg_grad, [-1, np.prod(self.data_dim)])
			rg_grad_ok = tf.reduce_sum(tf.square(rg_grad_flat), axis=1) > 1.
			rg_grad_safe = tf.where(rg_grad_ok, rg_grad_flat, tf.ones_like(rg_grad_flat))
			#rg_grad_abs = tf.where(rg_grad_flat >= 0., rg_grad_flat, -rg_grad_flat)
			rg_grad_abs =  0. * rg_grad_flat
			rg_grad_norm = tf.where(rg_grad_ok, 
				tf.norm(rg_grad_safe, axis=1), tf.reduce_sum(rg_grad_abs, axis=1))
			gp_loss = tf.square(rg_grad_norm - 1.0)
			### for logging
			self.rg_grad_norm_output = tf.norm(rg_grad_flat, axis=1)
			
			### d loss combination **g_num**
			self.d_loss_mean = tf.reduce_mean(self.d_r_loss + self.d_g_loss)
			self.d_loss_total = self.d_loss_mean + self.gp_loss_weight * tf.reduce_mean(gp_loss)

			### build g loss
			if self.g_loss_type == 'log':
				self.g_loss = -tf.nn.sigmoid_cross_entropy_with_logits(
					logits=self.g_logits, labels=tf.zeros_like(self.g_logits, tf_dtype))
			elif self.g_loss_type == 'mod':
				self.g_loss = tf.nn.sigmoid_cross_entropy_with_logits(
					logits=self.g_logits, labels=tf.ones_like(self.g_logits, tf_dtype))
			elif self.g_loss_type == 'was':
				self.g_loss = -self.g_logits
			else:
				raise ValueError('>>> g_loss_type: %s is not defined!' % self.g_loss_type)

			self.g_loss_mean = tf.reduce_mean(self.g_loss, axis=None)
			self.g_grad_norm = tf.norm(tf.reshape(
				tf.gradients(self.g_loss, self.g_layer), [-1, np.prod(self.data_dim)]), axis=1)

			### mean matching
			mm_loss = tf.reduce_mean(tf.square(tf.reduce_mean(self.g_layer, axis=0) - tf.reduce_mean(self.im_input, axis=0)), axis=None)

			### reconstruction penalty
			rec_penalty = tf.reduce_mean(tf.minimum(tf.log(tf.reduce_sum(
				tf.square(self.g_layer - self.im_input), axis=[1, 2, 3])+1e-6), 6.)) \
				+ tf.reduce_mean(tf.minimum(tf.log(tf.reduce_sum(
				tf.square(self.g_layer - tf.reverse(self.im_input, axis=[0])), axis=[1, 2, 3])+1e-6), 6.))

			### generated encoder loss: lower bound on mutual_info(z_input, generator id) **g_num**
			self.g_en_loss = tf.nn.softmax_cross_entropy_with_logits(
				labels=tf.one_hot(tf.reshape(self.z_input, [-1]), self.g_num, dtype=tf_dtype), 
				logits=self.g_en_logits)

			### real encoder entropy: entropy of g_id given real image, marginal entropy of g_id **g_num**
			self.r_en_h = -tf.reduce_mean(tf.reduce_sum(tf.nn.softmax(self.r_en_logits) * tf.nn.log_softmax(self.r_en_logits), axis=1))
			r_en_marg_pr = tf.reduce_mean(tf.nn.softmax(self.r_en_logits), axis=0)
			self.r_en_marg_hlb = -tf.reduce_sum(r_en_marg_pr * tf.log(r_en_marg_pr + 1e-8))
			print 'e_en_logits_shape: ', self.r_en_logits.shape

			### discounter
			self.rl_counter = tf.get_variable('rl_counter', dtype=tf_dtype,
				initializer=1.0)

			### g loss combination **g_num**
			#self.g_loss_mean += self.mm_loss_weight * mm_loss - self.rec_penalty_weight * rec_penalty
			self.g_loss_total = self.g_loss_mean + self.en_loss_weight * tf.reduce_mean(self.g_en_loss)

			### e loss combination
			self.en_loss_total = tf.reduce_mean(self.g_en_loss) + \
				0. * self.r_en_h + 0.* -self.r_en_marg_hlb

			### collect params
			self.g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "g_net")
			self.d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "d_net")
			self.e_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "e_net")

			### compute stat of weights
			self.nan_vars = 0.
			self.inf_vars = 0.
			self.zero_vars = 0.
			self.big_vars = 0.
			self.count_vars = 0
			for v in self.g_vars + self.d_vars:
				self.nan_vars += tf.reduce_sum(tf.cast(tf.is_nan(v), tf_dtype))
				self.inf_vars += tf.reduce_sum(tf.cast(tf.is_inf(v), tf_dtype))
				self.zero_vars += tf.reduce_sum(tf.cast(tf.square(v) < 1e-6, tf_dtype))
				self.big_vars += tf.reduce_sum(tf.cast(tf.square(v) > 1., tf_dtype))
				self.count_vars += tf.reduce_prod(v.get_shape())
			self.count_vars = tf.cast(self.count_vars, tf_dtype)
			#self.nan_vars /= self.count_vars 
			#self.inf_vars /= self.count_vars
			self.zero_vars /= self.count_vars
			self.big_vars /= self.count_vars

			self.g_vars_count = 0
			self.d_vars_count = 0
			self.e_vars_count = 0
			for v in self.g_vars:
				self.g_vars_count += int(np.prod(v.get_shape()))
			for v in self.d_vars:
				self.d_vars_count += int(np.prod(v.get_shape()))
			for v in self.e_vars:
				self.e_vars_count += int(np.prod(v.get_shape()))

			### build optimizers
			update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
			print '>>> update_ops list: ', update_ops
			with tf.control_dependencies(update_ops):
				self.g_opt = tf.train.AdamOptimizer(
					self.g_lr, beta1=self.g_beta1, beta2=self.g_beta2).minimize(
					self.g_loss_total, var_list=self.g_vars)
				self.d_opt = tf.train.AdamOptimizer(
					self.d_lr, beta1=self.d_beta1, beta2=self.d_beta2).minimize(
					self.d_loss_total, var_list=self.d_vars)
				self.e_opt = tf.train.AdamOptimizer(
					self.e_lr, beta1=self.e_beta1, beta2=self.e_beta2).minimize(
					self.en_loss_total, var_list=self.e_vars)

			### summaries **g_num**
			g_loss_sum = tf.summary.scalar("g_loss", self.g_loss_mean)
			d_loss_sum = tf.summary.scalar("d_loss", self.d_loss_mean)
			e_loss_sum = tf.summary.scalar("e_loss", self.en_loss_total)
			self.summary = tf.summary.merge([g_loss_sum, d_loss_sum, e_loss_sum])

			### Policy gradient updates **g_num**
			self.pg_var = tf.get_variable('pg_var', dtype=tf_dtype,
				initializer=self.g_rl_vals)
			self.pg_q = tf.get_variable('pg_q', dtype=tf_dtype,
				initializer=self.g_rl_vals)
			self.pg_base = tf.get_variable('pg_base', dtype=tf_dtype,
				initializer=0.0)
			self.pg_var_flat = self.pg_temp * tf.reshape(self.pg_var, [1, -1])
			
			### log p(x) for the selected policy at each batch location
			log_soft_policy = -tf.nn.softmax_cross_entropy_with_logits(
				labels=tf.one_hot(tf.reshape(self.z_input, [-1]), self.g_num, dtype=tf_dtype), 
				logits=tf.tile(self.pg_var_flat, tf.shape(tf.reshape(self.z_input, [-1, 1]))))
			
			self.gi_h = -tf.reduce_sum(tf.nn.softmax(self.pg_var) * tf.nn.log_softmax(self.pg_var))
			
			### policy gradient reward
			#pg_reward = tf.reshape(self.d_g_loss, [-1]) - 0.*self.en_loss_weight * tf.reshape(self.g_en_loss, [-1])
			pg_reward = tf.reduce_mean(self.r_en_logits, axis=0)
			
			### critic update (q values update)
			#pg_q_z = tf.gather(self.pg_q, tf.reshape(self.z_input, [-1]))
			#pg_q_opt = tf.scatter_update(self.pg_q, tf.reshape(self.z_input, [-1]), 
			#		self.pg_q_lr*pg_q_z + (1-self.pg_q_lr) * pg_reward)
			rl_counter_opt = tf.assign(self.rl_counter, self.rl_counter * 0.999)

			### r_en_logits as q values
			pg_q_opt = tf.assign(self.pg_q, (1-self.pg_q_lr)*self.pg_q + \
				self.pg_q_lr * pg_reward)

			### cross entropy E_x H(p(c|x)||q(c))
			with tf.control_dependencies([pg_q_opt, rl_counter_opt]):
				en_pr = tf.nn.softmax(self.r_en_logits)
				pg_loss_total = -tf.reduce_mean(en_pr * tf.nn.log_softmax(self.pg_var)) \
					- 1000. * self.rl_counter * self.gi_h	

			### actor update (p values update)
			#with tf.control_dependencies([pg_q_opt, rl_counter_opt]):
			#	pg_q_zu = tf.gather(self.pg_q, tf.reshape(self.z_input, [-1]))
			#	pg_loss_total = -tf.reduce_mean(log_soft_policy * pg_q_zu) + \
			#		1000. * self.rl_counter * -self.gi_h

			self.pg_opt = tf.train.AdamOptimizer(
					self.pg_lr, beta1=self.pg_beta1, beta2=self.pg_beta2).minimize(
					pg_loss_total, var_list=[self.pg_var])
コード例 #45
0
    def __init__(self, nb_actions, scope):
        with tf.variable_scope(scope):

            self.inputs = tf.placeholder(
                shape=[None, FLAGS.resized_height, FLAGS.resized_width, FLAGS.agent_history_length], dtype=tf.float32,
                name="Input")

            self.image_summaries = []
            self.image_summaries.append(
                tf.summary.image('input', tf.expand_dims(self.inputs[:, :, :, 0], 3), max_outputs=FLAGS.batch_size))

            out = self.inputs
            self.nb_actions = nb_actions
            with tf.variable_scope("convnet"):
                out = layers.conv2d(out, num_outputs=32, kernel_size=5, stride=2, activation_fn=tf.nn.relu,
                                      variables_collections=tf.get_collection("variables"),
                                      outputs_collections="activations")

                out = layers.conv2d(out, num_outputs=32, kernel_size=5, stride=2, activation_fn=tf.nn.relu,
                                      padding="VALID",
                                      variables_collections=tf.get_collection("variables"),
                                      outputs_collections="activations")
            conv_out = layers.flatten(out)

            with tf.variable_scope("action_value"):
                value_out = layers.fully_connected(conv_out, num_outputs=FLAGS.hidden_size,
                                                   activation_fn=None,
                                                   variables_collections=tf.get_collection("variables"),
                                                   outputs_collections="activations")
                if FLAGS.layer_norm:
                    value_out = layer_norm_fn(value_out, relu=True)
                else:
                    value_out = tf.nn.relu(value_out)
                self.action_values = value_out = layers.fully_connected(value_out, num_outputs=nb_actions,
                                                   activation_fn=None,
                                                   variables_collections=tf.get_collection("variables"),
                                                   outputs_collections="activations")

            if scope != 'target':
                self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name="actions")
                self.actions_onehot = tf.one_hot(self.actions, nb_actions, dtype=tf.float32, name="actions_one_hot")
                self.target_q = tf.placeholder(shape=[None], dtype=tf.float32, name="target_Q")

                self.action_value = tf.reduce_sum(tf.multiply(self.action_values, self.actions_onehot),
                                                  reduction_indices=1, name="Q")
                # Loss functions
                td_error = self.action_value - self.target_q
                self.action_value_loss = tf.reduce_mean(huber_loss(td_error))
                #self.action_value_loss = l2_loss(td_error)
                if FLAGS.optimizer == "Adam": # to add more optimizers
                    optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.lr)
                else: # default = Adam
                    optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.lr)
                gradients, self.train_op = minimize_and_clip(optimizer, self.action_value_loss, tf.trainable_variables(), FLAGS.gradient_norm_clipping)
                # gradients, self.train_op = minimize(optimizer, self.action_value_loss, tf.trainable_variables())
                self.summaries = []
                # self.summaries.append(
                #     tf.contrib.layers.summarize_collection("variables"))  # tf.get_collection("variables")))
                # self.summaries.append(tf.contrib.layers.summarize_collection("activations",
                #                                                              summarizer=tf.contrib.layers.summarize_activation))

                for grad, weight in gradients:
                    if grad is not None:
                        self.summaries.append(tf.summary.histogram(weight.name + '_grad', grad))
                        self.summaries.append(tf.summary.histogram(weight.name, weight))

                self.merged_summary = tf.summary.merge(self.summaries)
コード例 #46
0
    def _creat_model(self):
        self.y_ = tf.one_hot(indices=self.y, depth=self.target_vocab_size)

        with tf.variable_scope('generator'):
            self.g = self.generator()
        with tf.variable_scope('discriminator') as scope:
            self.D_real = self.discriminator(self.y_)
            scope.reuse_variables()
            self.D_fake = self.discriminator(self.g)

        self.G_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          scope='generator')
        self.D_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          scope='discriminator')

        self.preds = tf.to_int32(tf.arg_max(self.g, dimension=-1))
        self.istarget = tf.to_float(tf.not_equal(self.y, 0))
        self.acc = tf.reduce_sum(
            tf.to_float(tf.equal(self.preds, self.y)) *
            self.istarget) / (tf.reduce_sum(self.istarget))
        tf.summary.scalar('acc', self.acc)
        if self.is_training:
            # Loss
            self.y_smoothed = label_smoothing(
                tf.one_hot(self.y, depth=self.target_vocab_size))
            self.loss = tf.nn.softmax_cross_entropy_with_logits(
                logits=self.g, labels=self.y_smoothed)
            self.content_loss = tf.reduce_sum(
                self.loss * self.istarget) / (tf.reduce_sum(self.istarget))
            tf.summary.scalar('mean_loss', self.content_loss)

            disc_loss = -tf.reduce_mean(self.D_real) + tf.reduce_mean(
                self.D_fake)
            gen_loss = -tf.reduce_mean(self.D_fake)

            alpha = tf.random_uniform(shape=[hp.batch_size, 1, 1],
                                      minval=0.,
                                      maxval=1.)

            differences = self.y_ - self.g
            interpolates = self.y_ + alpha * differences
            gradients = tf.gradients(self.discriminator(interpolates),
                                     [interpolates])[0]
            slopes = tf.sqrt(
                tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
            gradient_penalty = tf.reduce_mean((slopes - 1.)**2)

            self.global_step = tf.Variable(0, name='global_step')
            #====
            # tf.assign(ref, value, validate_shape=None, use_locking=None, name=None)
            #函数完成了将value赋值给ref的作用。其中:ref 必须是tf.Variable创建的tensor,如果ref=tf.constant()会报错!
            #同时,shape(value)==shape(ref)
            #=====
            self.gs_op = tf.assign(self.global_step,
                                   tf.add(self.global_step, 1))

            self.D_loss = self.SIGMA * (disc_loss +
                                        self.LAMBDA * gradient_penalty)
            self.G_loss = self.content_loss + self.SIGMA * gen_loss
            self.D_opt = tf.train.AdamOptimizer(
                learning_rate=hp.D_learning_rate, beta1=0.5,
                beta2=0.9).minimize(self.D_loss, var_list=self.D_params)
            self.G_opt = tf.train.AdamOptimizer(
                learning_rate=hp.G_learning_rate,
                beta1=0.8,
                beta2=0.98,
                epsilon=1e-8).minimize(self.G_loss, var_list=self.G_params)
            self.merged = tf.summary.merge_all()
コード例 #47
0
 def _create_loss_op(self):
     one_hot_y = tf.one_hot(self.Y, 43)
     return tf.reduce_mean(
         tf.nn.softmax_cross_entropy_with_logits(logits=self.inference_op,
                                                 labels=one_hot_y))
コード例 #48
0
  def __call__(self, inputs, filter_scaling=1, strides=1,
    last_inputs=None, cell_num=-1):
    self._cell_num = cell_num
    self._filter_scaling = filter_scaling
    self._filter_size = int(self._filters * filter_scaling)
    num_nodes = self._num_nodes
    dag = self._dag
    data_format = self._data_format

    # node 1 and node 2 are last_inputs and inputs respectively
    # begin processing from node 3

    last_inputs, inputs = self._cell_base(last_inputs, inputs, is_training=True)
    layers = [last_inputs, inputs]
    used = []
    for i in xrange(num_nodes):
      prev_layers = tf.stack(layers, axis=0)
      with tf.variable_scope('cell_{}'.format(i+1)):
        with tf.variable_scope('x'):
          x_id = dag[4*i]
          x_op = dag[4*i+1]
          x = prev_layers[x_id, :, :, :, :]
          x = self._nas_cell(x, i, x_id, x_op, self._filter_size)
          x_used = tf.one_hot(x_id, depth=num_nodes+2, dtype=tf.int32)
        with tf.variable_scope('y'):
          y_id = dag[4*i+2]
          y_op = dag[4*i+3]
          y = prev_layers[y_id, :, :, :, :]
          y = self._nas_cell(y, i, y_id, y_op, self._filter_size)
          y_used = tf.one_hot(y_id, depth=num_nodes+2, dtype=tf.int32)
        
        output = x + y
        used.extend([x_used, y_used])
        layers.append(output)

    used = tf.add_n(used)
    indices = tf.where(tf.equal(used, 0))
    indices = tf.to_int32(indices)
    indices = tf.reshape(indices, [-1])
    num_outs = tf.size(indices)
    out = tf.stack(layers, axis=0)
    out = tf.gather(out, indices, axis=0)

    inp = prev_layers[0]
    if self._data_format == "channels_last":
      N = tf.shape(inp)[0]
      H = tf.shape(inp)[1]
      W = tf.shape(inp)[2]
      C = tf.shape(inp)[3]
      out = tf.transpose(out, [1, 2, 3, 0, 4])
      out = tf.reshape(out, [N, H, W, num_outs * self._filter_size])
    elif self._data_format == "channels_first":
      N = tf.shape(inp)[0]
      C = tf.shape(inp)[1]
      H = tf.shape(inp)[2]
      W = tf.shape(inp)[3]
      out = tf.transpose(out, [1, 0, 2, 3, 4])
      out = tf.reshape(out, [N, num_outs * self._filter_size, H, W])
    else:
      raise ValueError("Unknown data_format '{0}'".format(self._data_format))

    with tf.variable_scope("final_conv"):
      w = create_weight("w",
                        [self._num_nodes + 2, self._filter_size * self._filter_size],
                        initializer=_KERNEL_INITIALIZER)
      w = tf.gather(w, indices, axis=0)
      w = tf.reshape(w, [1, 1, num_outs * self._filter_size, self._filter_size])
      out = tf.nn.relu(out)
      out = tf.nn.conv2d(out, w, strides=[1, 1, 1, 1], padding="SAME",
                         data_format='NCHW' if self._data_format == 'channels_first' else 'NHWC')
      out = batch_normalization(out, is_training=True, data_format=self._data_format)

    out = tf.reshape(out, tf.shape(prev_layers[0]))

    return out
コード例 #49
0
def estimator_model_fn(features, labels, mode, params):
    """The estimator function"""
    batch_size = tf.shape(features['source'])[0]

    if mode == tf.estimator.ModeKeys.PREDICT:
        # TODO: To be implemented
        return

    if FLAGS.step == 'source':
        input_layer_source = tf.feature_column.input_layer(
            {"source": features['source']}, params['feature_columns'])
        # CNNs need input data to be of shape [batch_size, width, height, channel]
        input_layer_source = tf.reshape(
            input_layer_source, [batch_size, 28, 28, params['channel_size']])

        out = lenet_encoder(input_layer_source, scope='source_encoder')
        with tf.variable_scope('classifier', reuse=tf.AUTO_REUSE):
            logits = tf.layers.dense(out, 10)

        # Gotta change labels to one-hot
        class_labels = tf.one_hot(labels['labels'], 10)

        # Compute loss
        class_loss = tf.losses.softmax_cross_entropy(class_labels,
                                                     logits=logits)

        # Get predicted classes
        predicted_classes_source = tf.argmax(logits,
                                             axis=1,
                                             output_type=tf.int32)

        # Evaluate if in EVAL
        if mode == tf.estimator.ModeKeys.EVAL:
            source_class_acc = tf.metrics.accuracy(
                labels=labels['labels'],
                predictions=predicted_classes_source,
                name='source_class_acc_op')
            metrics = {'source_class_acc': source_class_acc}
            return tf.estimator.EstimatorSpec(mode,
                                              loss=class_loss,
                                              eval_metric_ops=metrics)

        # Calculate a non streaming (per batch) accuracy
        source_class_acc = utilities.non_streaming_accuracy(
            predicted_classes_source, tf.cast(labels['labels'], tf.int32))

        # Initialize learning rate
        # tf.summary.scalar('class_loss', class_loss)
        # tf.summary.scalar('source_class_acc', source_class_acc)
        tf.identity(class_loss, 'loss')
        tf.identity(source_class_acc, 'source_class_acc')
        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op = optimizer.minimize(
                class_loss, global_step=tf.train.get_global_step())
        return tf.estimator.EstimatorSpec(mode,
                                          loss=class_loss,
                                          train_op=train_op)
    if FLAGS.step == 'target':
        # Now train adversarial
        input_layer_source = tf.feature_column.input_layer(
            {"source": features['source']}, params['feature_columns'][0])
        input_layer_target = tf.feature_column.input_layer(
            {"target": features['target']}, params['feature_columns'][1])
        # CNNs need input data to be of shape [batch_size, width, height, channel]
        input_layer_source = tf.reshape(
            input_layer_source, [batch_size, 28, 28, params['channel_size']])
        input_layer_target = tf.reshape(
            input_layer_target, [batch_size, 32, 32, params['channel_size']])
        # Resize SVHN source to 28 x 28
        input_layer_target = tf.image.resize_images(input_layer_target,
                                                    [28, 28])
        # Get source and target encoded vectors.
        out_source = lenet_encoder(input_layer_source,
                                   scope='source_encoder',
                                   trainable=False)
        out_target = lenet_encoder(input_layer_target,
                                   scope='target_encoder',
                                   trainable=True)
        out_target_ = lenet_encoder(input_layer_target,
                                    scope='source_encoder',
                                    trainable=False)
        # Classify target for non-streaming accuracy
        with tf.variable_scope('classifier', reuse=tf.AUTO_REUSE):
            class_logits_target = tf.layers.dense(out_target,
                                                  10,
                                                  trainable=False)
        with tf.variable_scope('classifier', reuse=tf.AUTO_REUSE):
            class_logits_source = tf.layers.dense(out_source,
                                                  10,
                                                  trainable=False)
        with tf.variable_scope('classifier', reuse=tf.AUTO_REUSE):
            class_logits_target_ = tf.layers.dense(out_target_,
                                                   10,
                                                   trainable=False)
        # Initialize source encoder with pretrained weights
        tf.train.init_from_checkpoint(
            tf.train.latest_checkpoint('./model_m2mm/source_model/'), {
                'source_encoder/': 'source_encoder/',
                'classifier/': 'classifier/'
            })
        if tf.train.latest_checkpoint(
                './model_m2mm/adversarial_model') is None:
            tf.train.init_from_checkpoint(
                tf.train.latest_checkpoint('./model_m2mm/source_model/'),
                {'source_encoder/': 'target_encoder/'})
        # Create non-streaming (per batch) accuracy
        pred_classes_target = tf.argmax(class_logits_target,
                                        axis=1,
                                        output_type=tf.int32)
        pred_classes_source = tf.argmax(class_logits_source,
                                        axis=1,
                                        output_type=tf.int32)
        pred_classes_target_ = tf.argmax(class_logits_target_,
                                         axis=1,
                                         output_type=tf.int32)
        # Create discriminator labels
        source_adv_label = tf.ones([tf.shape(out_source)[0]], tf.int32)
        target_adv_label = tf.zeros([tf.shape(out_target)[0]], tf.int32)
        # Send encoded vectors through discriminator
        disc_logits_source = discriminator(out_source)
        disc_logits_target = discriminator(out_target)
        # disc_logits_source = tf.Print(disc_logits_source, [tf.argmax(disc_logits_source, axis=1)], 'Source discriminator: ')
        # Calculate losses
        # The generator uses inverted labels as explained in TODO: LINK!!
        loss_gen = tf.losses.sparse_softmax_cross_entropy(
            logits=disc_logits_target, labels=(1 - target_adv_label))
        loss_adv = tf.losses.sparse_softmax_cross_entropy(logits=disc_logits_source,
                                                          labels=source_adv_label) + \
                   tf.losses.sparse_softmax_cross_entropy(logits=disc_logits_target,
                                                          labels=target_adv_label)
        tf.summary.scalar("generator_loss", loss_gen)
        tf.summary.scalar("discriminator_loss", loss_adv)
        tf.identity(loss_gen, 'loss_gen')
        tf.identity(loss_adv, 'loss_adv')
        # Evaluate if in EVAL
        if mode == tf.estimator.ModeKeys.EVAL:
            target_class_acc_ = tf.metrics.accuracy(
                labels=labels['label_t'],
                predictions=pred_classes_target_,
                name='target_class_encoder_acc')
            source_class_acc = tf.metrics.accuracy(
                labels=labels['label_s'],
                predictions=pred_classes_source,
                name='source_class_acc_op')
            target_class_acc = tf.metrics.accuracy(
                labels=labels['label_t'],
                predictions=pred_classes_target,
                name='target_class_acc_op')
            metrics = {
                'source_class_acc': source_class_acc,
                'target_class_acc': target_class_acc,
                'target_class_encoder_acc': target_class_acc_
            }
            return tf.estimator.EstimatorSpec(mode,
                                              loss=loss_gen + loss_adv,
                                              eval_metric_ops=metrics)
        target_class_acc = utilities.non_streaming_accuracy(
            pred_classes_target, tf.cast(labels['label_t'], tf.int32))
        source_class_acc = utilities.non_streaming_accuracy(
            pred_classes_source, tf.cast(labels['label_s'], tf.int32))
        target_class_acc_enc = utilities.non_streaming_accuracy(
            pred_classes_target_, tf.cast(labels['label_t'], tf.int32))
        tf.identity(target_class_acc, name='target_class_acc')
        tf.identity(source_class_acc, name='source_class_acc')
        tf.identity(target_class_acc_enc, name='target_class_acc_enc')
        # Get the trainable variables
        var_target_encoder = tf.trainable_variables('target_encoder')
        var_discriminator = tf.trainable_variables('discriminator')
        print(var_target_encoder)
        print(var_discriminator)
        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate, 0.5)
        train_op_gen = optimizer.minimize(
            loss_gen,
            global_step=tf.train.get_global_step(),
            var_list=var_target_encoder)
        train_op_adv = optimizer.minimize(
            loss_adv,
            global_step=tf.train.get_global_step(),
            var_list=var_discriminator)
        return tf.estimator.EstimatorSpec(mode,
                                          loss=loss_gen + loss_adv,
                                          train_op=tf.group(
                                              train_op_gen, train_op_adv))
コード例 #50
0
# Create logdir name
args.logdir = os.path.join(
    "logs", "{}-{}-{}".format(
        os.path.basename(__file__),
        datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"), ",".join(
            ("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value)
             for key, value in sorted(vars(args).items())))))

# Load data
uppercase_data = UppercaseData(args.window, args.alphabet_size)

model = tf.keras.Sequential([
    tf.keras.layers.InputLayer(input_shape=[2 * args.window + 1],
                               dtype=tf.int32),
    tf.keras.layers.Lambda(
        lambda x: tf.one_hot(x, len(uppercase_data.train.alphabet))),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(1024, activation=tf.nn.relu),
    tf.keras.layers.Dropout(0.2),
    tf.keras.layers.Dense(512, activation=tf.nn.relu),
    tf.keras.layers.Dropout(0.2),
    tf.keras.layers.Dense(256, activation=tf.nn.relu),
    tf.keras.layers.Dropout(0.2),
    tf.keras.layers.Dense(2, activation=tf.nn.sigmoid)
])

model.compile(
    optimizer=tf.keras.optimizers.Adam(),
    loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
    metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")],
)
コード例 #51
0
    def create_tf_operations(self, config):
        super(DQNModel, self).create_tf_operations(config)

        flat_action_sizes = {
            name: util.prod(action.shape) * action.num_actions
            for name, action in config.actions
        }
        action_shapes = {
            name: (-1, ) + action.shape + (action.num_actions, )
            for name, action in config.actions
        }

        # Training network
        with tf.variable_scope('training'):
            network_builder = util.get_function(fct=config.network)
            self.training_network = NeuralNetwork(
                network_builder=network_builder, inputs=self.state)
            self.internal_inputs.extend(self.training_network.internal_inputs)
            self.internal_outputs.extend(
                self.training_network.internal_outputs)
            self.internal_inits.extend(self.training_network.internal_inits)

            self.training_output = dict()
            for action in self.action:
                output = layers['linear'](x=self.training_network.output,
                                          size=flat_action_sizes[action])
                self.training_output[action] = tf.reshape(
                    tensor=output, shape=action_shapes[action])
                self.action_taken[action] = tf.argmax(
                    self.training_output[action], axis=-1)

        # Target network
        with tf.variable_scope('target'):
            network_builder = util.get_function(fct=config.network)
            self.target_network = NeuralNetwork(
                network_builder=network_builder, inputs=self.state)
            self.internal_inputs.extend(self.target_network.internal_inputs)
            self.internal_outputs.extend(self.target_network.internal_outputs)
            self.internal_inits.extend(self.target_network.internal_inits)

            target_value = dict()
            for action in self.action:
                output = layers['linear'](x=self.target_network.output,
                                          size=flat_action_sizes[action])
                output = tf.reshape(tensor=output, shape=action_shapes[action])
                if config.double_dqn:
                    selector = tf.one_hot(indices=self.action_taken[action],
                                          depth=action_shapes[action][1])
                    target_value[action] = tf.reduce_sum(
                        input_tensor=(output * selector), axis=-1)
                else:
                    target_value[action] = tf.reduce_max(input_tensor=output,
                                                         axis=-1)

        with tf.name_scope('update'):
            self.actions_one_hot = dict()
            self.q_values = dict()
            deltas = list()
            for action in self.action:
                # One_hot tensor of the actions that have been taken
                self.actions_one_hot[action] = tf.one_hot(
                    indices=self.action[action][:-1],
                    depth=config.actions[action].num_actions)

                # Training output, so we get the expected rewards given the actual states and actions
                self.q_values[action] = tf.reduce_sum(
                    input_tensor=(self.training_output[action][:-1] *
                                  self.actions_one_hot[action]),
                    axis=-1)

                reward = self.reward[:-1]
                terminal = tf.cast(x=self.terminal[:-1], dtype=tf.float32)
                for _ in range(len(config.actions[action].shape)):
                    reward = tf.expand_dims(input=reward, axis=1)
                    terminal = tf.expand_dims(input=terminal, axis=1)

                # Surrogate loss as the mean squared error between actual observed rewards and expected rewards
                q_target = reward + (
                    1.0 -
                    terminal) * config.discount * target_value[action][1:]
                delta = q_target - self.q_values[action]

                ds_list = [delta]
                for _ in range(len(config.actions[action].shape)):
                    ds_list = [
                        d for ds in ds_list
                        for d in tf.unstack(value=ds, axis=1)
                    ]
                deltas.extend(ds_list)

            delta = tf.add_n(inputs=deltas) / len(deltas)
            self.loss_per_instance = tf.square(delta)

            # If gradient clipping is used, calculate the huber loss
            if config.clip_loss > 0.0:
                huber_loss = tf.where(
                    condition=(tf.abs(delta) < config.clip_gradients),
                    x=(0.5 * self.loss_per_instance),
                    y=(tf.abs(delta) - 0.5))
                loss = tf.reduce_mean(input_tensor=huber_loss, axis=0)
            else:
                loss = tf.reduce_mean(input_tensor=self.loss_per_instance,
                                      axis=0)
            self.dqn_loss = loss
            tf.losses.add_loss(loss)

        # Update target network
        with tf.name_scope('update_target'):
            self.target_network_update = list()
            for v_source, v_target in zip(self.training_network.variables,
                                          self.target_network.variables):
                update = v_target.assign_sub(config.update_target_weight *
                                             (v_target - v_source))
                self.target_network_update.append(update)
コード例 #52
0
ファイル: incep9_flat.py プロジェクト: formigone/ml-engine
def model_fn(features, labels, mode, params):
    x = tf.reshape(features, [-1, 16000], name='input_incep9_flat')
    x_norm = tf.layers.batch_normalization(x, training=mode == tf.estimator.ModeKeys.TRAIN, name='x_norm')
    x = get_spectrogram(x_norm, type='mel/pow')
    if params['verbose_summary']:
        tf.summary.image('input', x)

    conv1 = tf.layers.conv2d(x, filters=16, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv1')
    conv1b = tf.layers.conv2d(conv1, filters=16, kernel_size=3, activation=tf.nn.relu, name='conv1b')
    pool1 = tf.layers.max_pooling2d(conv1b, pool_size=[2, 2], strides=2, name='pool1')
    if params['verbose_summary']:
        log_conv_kernel('conv1')
        log_conv_kernel('conv1b')
        tf.summary.image('pool1', pool1[:, :, :, 0:1])

    incep2 = inception_block(pool1, t1x1=8, t3x3=8, t5x5=8, tmp=8, name='incep2')

    conv3 = tf.layers.conv2d(incep2, filters=32, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv3')
    conv3b = tf.layers.conv2d(conv3, filters=32, kernel_size=3, activation=tf.nn.relu, name='conv3b')
    pool3 = tf.layers.max_pooling2d(conv3b, pool_size=[2, 2], strides=2, name='pool3')
    if params['verbose_summary']:
        log_conv_kernel('conv3')
        log_conv_kernel('conv3b')
        tf.summary.image('pool3', pool3[:, :, :, 0:1])

    conv5 = tf.layers.conv2d(pool3, filters=64, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv5')
    conv5b = tf.layers.conv2d(conv5, filters=64, kernel_size=3, activation=tf.nn.relu, name='conv5b')
    pool5 = tf.layers.max_pooling2d(conv5b, pool_size=[2, 2], strides=2, name='pool5')
    if params['verbose_summary']:
        log_conv_kernel('conv5')
        log_conv_kernel('conv5b')
        tf.summary.image('pool5', pool5[:, :, :, 0:1])

    incep6 = inception_block(pool5, t1x1=32, t3x3=32, t5x5=32, tmp=32, name='incep6')

    conv7 = tf.layers.conv2d(incep6, filters=128, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv7')
    conv7b = tf.layers.conv2d(conv7, filters=128, kernel_size=3, activation=tf.nn.relu, name='conv7b')
    pool7 = tf.layers.max_pooling2d(conv7b, pool_size=[2, 2], strides=2, name='pool7')
    if params['verbose_summary']:
        log_conv_kernel('conv7')
        log_conv_kernel('conv7b')
        tf.summary.image('pool7', pool7[:, :, :, 0:1])

    conv8 = tf.layers.conv2d(pool7, filters=256, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv8')
    conv8b = tf.layers.conv2d(conv8, filters=256, kernel_size=3, activation=tf.nn.relu, name='conv8b')
    pool8 = tf.layers.max_pooling2d(conv8b, pool_size=[2, 2], strides=2, name='pool8')
    if params['verbose_summary']:
        log_conv_kernel('conv8')
        log_conv_kernel('conv8b')
        tf.summary.image('pool8', pool8[:, :, :, 0:1])

    conv9 = tf.layers.conv2d(pool8, filters=512, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv9')
    conv9b = tf.layers.conv2d(conv9, filters=512, kernel_size=3, activation=tf.nn.relu, name='conv9b')
    pool9 = tf.layers.max_pooling2d(conv9b, pool_size=[2, 2], strides=2, name='pool9')
    if params['verbose_summary']:
        log_conv_kernel('conv9')
        log_conv_kernel('conv9b')
        tf.summary.image('pool9', pool9[:, :, :, 0:1])

    flat = flatten(pool9)
    dropout4 = tf.layers.dropout(flat, rate=params['dropout_rate'], training=mode == tf.estimator.ModeKeys.TRAIN, name='dropout4')
    dense4 = tf.layers.dense(dropout4, units=2048, activation=tf.nn.relu, name='dense4')

    logits = tf.layers.dense(dense4, units=params['num_classes'], name='logits')

    predictions = {
        'classes': tf.argmax(logits, axis=1, name='prediction_classes'),
        'probabilities': tf.nn.softmax(logits, name='prediction_softmax')
    }

    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode, predictions={'predictions': predictions['probabilities']})

    onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=params['num_classes'], name='onehot_labels')
    loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)
    tf.summary.scalar('loss', loss)

    optimizer = tf.train.GradientDescentOptimizer(learning_rate=params['learning_rate'])
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
    eval_metric_ops = {
        'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions['classes'])
    }

    tf.summary.scalar('accuracy', eval_metric_ops['accuracy'][1])

    return tf.estimator.EstimatorSpec(
        mode=mode,
        loss=loss,
        train_op=train_op,
        eval_metric_ops=eval_metric_ops
    )
コード例 #53
0
    def __init__(self, vocab_size, batch_size, embed_size, hidden_size,
                 sequence_length, start_token, learning_rate, reward_gamma):
        self.vocab_size = vocab_size
        self.batch_size = batch_size
        self.embed_size = embed_size
        self.hidden_size = hidden_size
        self.sequence_length = sequence_length
        self.start_token = tf.constant([start_token] * self.batch_size,
                                       dtype=tf.int32)
        self.learning_rate = tf.Variable(learning_rate,
                                         dtype=tf.float32,
                                         trainable=False)
        self.reward_gamma = reward_gamma
        self.g_params = []
        self.grad_clip = 5.0
        self.expected_reward = tf.Variable(tf.zeros([self.sequence_length]))

        with tf.variable_scope('generator'):
            self.g_embeddings = tf.Variable(
                self.init_matrix([self.vocab_size, self.embed_size]))
            self.g_params.append(
                self.g_embeddings)  # shape = [1, vocab_size, emb_size]
            self.g_lstm_forward = self.recurrent_lstm_forward(self.g_params)
            self.g_linear_forward = self.recurrent_linear_forward(
                self.g_params)

# Initialize parameters ------------------

# placeholder
        self.x = tf.placeholder(tf.int32,
                                shape=[self.batch_size, self.sequence_length])
        # rewards shape[1] = self.sequence_length comes from Monte-Carlo Search
        self.rewards = tf.placeholder(
            tf.float32, shape=[self.batch_size, self.sequence_length])

        # processed for batch(Real datasets)
        with tf.device("/cpu:0"):
            self.processed_x = tf.transpose(
                tf.nn.embedding_lookup(self.g_embeddings, self.x),
                perm=[1, 0, 2])  # shape=[seq_length, batch_size, emb_size]

        # Init hidden state
        self.h0 = tf.zeros([self.batch_size, self.hidden_size])
        self.h0 = tf.stack([self.h0, self.h0])  # hidden_state + cell

        # input sequence is an array of tokens while output sequence is an array of probabilities
        output_prob_sequence = tensor_array_ops.TensorArray(
            dtype=tf.float32,
            size=self.sequence_length,
            dynamic_size=False,
            infer_shape=True)
        token_sequence = tensor_array_ops.TensorArray(
            dtype=tf.int32,
            size=self.sequence_length,
            dynamic_size=False,
            infer_shape=True)

        # End initialize ------------------

        # Forward step -------------------

        def _g_recurrence(i, x_t, h_tm, gen_o, gen_x):
            h_t = self.g_lstm_forward(x_t, h_tm)  # hidden_memory
            o_t = self.g_linear_forward(
                h_t)  # output of prob, shape = [batch_size, vocab_size]
            log_prob = tf.log(o_t)
            next_token = tf.cast(
                tf.reshape(tf.multinomial(log_prob, 1),
                           [self.batch_size]), tf.int32
            )  # using this softmax distribution to choose one token as next
            x_ = tf.nn.embedding_lookup(self.g_embeddings, next_token)
            gen_o = gen_o.write(
                i,
                tf.reduce_sum(
                    tf.multiply(
                        tf.one_hot(next_token, self.vocab_size, 1.0, 0.0),
                        o_t), 1))
            gen_x = gen_x.write(
                i, next_token
            )  # output_array, shape = [index_num(seq_length), batch_size]
            return i + 1, x_, h_t, gen_o, gen_x

        _, _, _, self.output_prob_sequence, self.token_sequence = control_flow_ops.while_loop(
            cond=lambda i, _1, _2, _3, _4: i < self.sequence_length,
            body=_g_recurrence,
            loop_vars=(tf.constant(0, dtype=tf.int32),
                       tf.nn.embedding_lookup(self.g_embeddings,
                                              self.start_token), self.h0,
                       output_prob_sequence, token_sequence))

        self.token_sequence = self.token_sequence.stack(
        )  # shape = [sequence_length * batch_size]
        self.token_sequence = tf.transpose(
            self.token_sequence,
            perm=[1, 0])  # shape = [batch_size * sequence_length]

        # End Forward step ----------------------

        # Pre-train step -------------------------

        # Supervised pre-training for generator
        g_predictions = tensor_array_ops.TensorArray(dtype=tf.float32,
                                                     size=self.sequence_length,
                                                     dynamic_size=False,
                                                     infer_shape=True)

        # Real-data result of sequence
        ta_embed_x = tensor_array_ops.TensorArray(dtype=tf.float32,
                                                  size=self.sequence_length,
                                                  dynamic_size=False,
                                                  infer_shape=True)
        ta_embed_x = ta_embed_x.unstack(
            self.processed_x)  # Gain real data's token embedding

        def _pretrain_recurrence(i, x_t, h_tm, g_predictions):
            h_t = self.g_lstm_forward(x_t, h_tm)
            o_t = self.g_linear_forward(h_t)
            g_predictions = g_predictions.write(
                i,
                o_t)  # softmax_distribution, shape = [batch_size, vocab_size]
            x_ = ta_embed_x.read(
                i)  # read the next_token from real datasets with index i
            return i + 1, x_, h_t, g_predictions

        _, _, _, self.g_predictions = control_flow_ops.while_loop(
            cond=lambda i, _1, _2, _3: i < self.sequence_length,
            body=_pretrain_recurrence,
            loop_vars=(tf.constant(0, dtype=tf.int32),
                       tf.nn.embedding_lookup(self.g_embeddings,
                                              self.start_token), self.h0,
                       g_predictions))

        self.g_predictions = self.g_predictions.stack()
        # g_predictions is an softmax distribution array
        self.g_predictions = tf.transpose(
            self.g_predictions,
            perm=[1, 0, 2])  # shape = [batch_size, seq_length, vocab_size]

        # End pre-train step -------------------

        # Pre-train Compile configuration ----------------

        # pre-train_loss
        self.loss = -tf.reduce_sum(
            tf.one_hot(tf.cast(tf.reshape(self.x, [-1]), tf.int32),
                       self.vocab_size, 1.0, 0.0) *
            tf.log(
                tf.clip_by_value(
                    tf.reshape(self.g_predictions, [-1, self.vocab_size]),
                    1e-20, 1.0))) / (
                        self.sequence_length * self.batch_size
                    )  # one_hot shape = [seq_length * batch_size, vocab_size]

        # pre-train_backward
        self.optimizer = tf.train.AdamOptimizer(self.learning_rate)

        # End Setting Pre-train Compile ------------------

        # Training pre-train model ------------------

        self.pretrain_grad, _ = tf.clip_by_global_norm(
            tf.gradients(self.loss, self.g_params),
            self.grad_clip)  # sum(list(g_params / loss))
        self.pretrain_updates = self.optimizer.apply_gradients(
            zip(self.pretrain_grad, self.g_params))

        # End training pre-train --------------------

        # Adversarial Learning train ----------------

        # output is a number represents the whole reward of sequence_tokens
        self.g_loss = -tf.reduce_sum(
            tf.reduce_sum(
                tf.one_hot(tf.cast(tf.reshape(self.x, [-1]), tf.int32),
                           self.vocab_size, 1.0, 0.0) *
                tf.log(
                    tf.clip_by_value(
                        tf.reshape(self.g_predictions, [-1, self.vocab_size]),
                        1e-20, 1.0)), 1) * tf.reshape(self.rewards, [-1])
        )  # Adversarial Learning with rewards, [seq_length * batch_size] * (sum of prob)
        # ==> sum([seq_length * batch_size] * (sum of prob) * (rewards))

        self.g_optimizer = tf.train.AdamOptimizer(self.learning_rate)
        self.g_gradicts, _ = tf.clip_by_global_norm(
            tf.gradients(self.g_loss, self.g_params), self.grad_clip)
        self.g_updates = self.g_optimizer.apply_gradients(
            zip(self.g_gradicts, self.g_params))
コード例 #54
0
ファイル: BinaryDbReader.py プロジェクト: dedoogong/asrada
    def get(self):
        """ Provides input data to the graph. """
        # calculate size of each record (this lists what is contained in the db and how many bytes are occupied)
        record_bytes = 2

        encoding_bytes = 4
        kp_xyz_entries = 3 * self.num_kp
        record_bytes += encoding_bytes*kp_xyz_entries

        encoding_bytes = 4
        kp_uv_entries = 2 * self.num_kp
        record_bytes += encoding_bytes*kp_uv_entries

        cam_matrix_entries = 9
        record_bytes += encoding_bytes*cam_matrix_entries

        image_bytes = self.image_size[0] * self.image_size[1] * 3
        record_bytes += image_bytes

        hand_parts_bytes = self.image_size[0] * self.image_size[1]
        record_bytes += hand_parts_bytes

        kp_vis_bytes = self.num_kp
        record_bytes += kp_vis_bytes

        """ READ DATA ITEMS"""
        # Start reader
        reader = tf.FixedLengthRecordReader(header_bytes=0, record_bytes=record_bytes)
        _, value = reader.read(tf.train.string_input_producer([self.path_to_db]))

        # decode to floats
        bytes_read = 0
        data_dict = dict()
        record_bytes_float32 = tf.decode_raw(value, tf.float32)

        # 1. Read keypoint xyz
        keypoint_xyz = tf.reshape(tf.slice(record_bytes_float32, [bytes_read//4], [kp_xyz_entries]), [self.num_kp, 3])
        bytes_read += encoding_bytes*kp_xyz_entries

        # calculate palm coord
        if not self.use_wrist_coord:
            palm_coord_l = tf.expand_dims(0.5*(keypoint_xyz[0, :] + keypoint_xyz[12, :]), 0)
            palm_coord_r = tf.expand_dims(0.5*(keypoint_xyz[21, :] + keypoint_xyz[33, :]), 0)
            keypoint_xyz = tf.concat([palm_coord_l, keypoint_xyz[1:21, :], palm_coord_r, keypoint_xyz[-20:, :]], 0)

        data_dict['keypoint_xyz'] = keypoint_xyz

        # 2. Read keypoint uv
        keypoint_uv = tf.cast(tf.reshape(tf.slice(record_bytes_float32, [bytes_read//4], [kp_uv_entries]), [self.num_kp, 2]), tf.int32)
        bytes_read += encoding_bytes*kp_uv_entries

        keypoint_uv = tf.cast(keypoint_uv, tf.float32)

        # calculate palm coord
        if not self.use_wrist_coord:
            palm_coord_uv_l = tf.expand_dims(0.5*(keypoint_uv[0, :] + keypoint_uv[12, :]), 0)
            palm_coord_uv_r = tf.expand_dims(0.5*(keypoint_uv[21, :] + keypoint_uv[33, :]), 0)
            keypoint_uv = tf.concat([palm_coord_uv_l, keypoint_uv[1:21, :], palm_coord_uv_r, keypoint_uv[-20:, :]], 0)

        if self.coord_uv_noise:
            noise = tf.truncated_normal([42, 2], mean=0.0, stddev=self.coord_uv_noise_sigma)
            keypoint_uv += noise

        data_dict['keypoint_uv'] = keypoint_uv

        # 3. Camera intrinsics
        cam_mat = tf.reshape(tf.slice(record_bytes_float32, [bytes_read//4], [cam_matrix_entries]), [3, 3])
        bytes_read += encoding_bytes*cam_matrix_entries
        data_dict['cam_mat'] = cam_mat

        # decode to uint8
        bytes_read += 2
        record_bytes_uint8 = tf.decode_raw(value, tf.uint8)

        # 4. Read image
        image = tf.reshape(tf.slice(record_bytes_uint8, [bytes_read], [image_bytes]),
                               [self.image_size[0], self.image_size[1], 3])
        image = tf.cast(image, tf.float32)
        bytes_read += image_bytes

        # subtract mean
        image = image / 255.0 - 0.5
        if self.hue_aug:
            image = tf.image.random_hue(image, self.hue_aug_max)
        data_dict['image'] = image

        # 5. Read mask
        hand_parts_mask = tf.reshape(tf.slice(record_bytes_uint8, [bytes_read], [hand_parts_bytes]),
                               [self.image_size[0], self.image_size[1]])
        hand_parts_mask = tf.cast(hand_parts_mask, tf.int32)
        bytes_read += hand_parts_bytes
        data_dict['hand_parts'] = hand_parts_mask
        hand_mask = tf.greater(hand_parts_mask, 1)
        bg_mask = tf.logical_not(hand_mask)
        data_dict['hand_mask'] = tf.cast(tf.stack([bg_mask, hand_mask], 2), tf.int32)

        # 6. Read visibilty
        keypoint_vis = tf.reshape(tf.slice(record_bytes_uint8, [bytes_read], [kp_vis_bytes]),
                               [self.num_kp])
        keypoint_vis = tf.cast(keypoint_vis, tf.bool)
        bytes_read += kp_vis_bytes

        # calculate palm visibility
        if not self.use_wrist_coord:
            palm_vis_l = tf.expand_dims(tf.logical_or(keypoint_vis[0], keypoint_vis[12]), 0)
            palm_vis_r = tf.expand_dims(tf.logical_or(keypoint_vis[21], keypoint_vis[33]), 0)
            keypoint_vis = tf.concat([palm_vis_l, keypoint_vis[1:21], palm_vis_r, keypoint_vis[-20:]], 0)
        data_dict['keypoint_vis'] = keypoint_vis

        assert bytes_read == record_bytes, "Doesnt add up."

        """ DEPENDENT DATA ITEMS: SUBSET of 21 keypoints"""
        # figure out dominant hand by analysis of the segmentation mask
        one_map, zero_map = tf.ones_like(hand_parts_mask), tf.zeros_like(hand_parts_mask)
        cond_l = tf.logical_and(tf.greater(hand_parts_mask, one_map), tf.less(hand_parts_mask, one_map*18))
        cond_r = tf.greater(hand_parts_mask, one_map*17)
        hand_map_l = tf.where(cond_l, one_map, zero_map)
        hand_map_r = tf.where(cond_r, one_map, zero_map)
        num_px_left_hand = tf.reduce_sum(hand_map_l)
        num_px_right_hand = tf.reduce_sum(hand_map_r)

        # PRODUCE the 21 subset using the segmentation masks
        # We only deal with the more prominent hand for each frame and discard the second set of keypoints
        kp_coord_xyz_left = keypoint_xyz[:21, :]
        kp_coord_xyz_right = keypoint_xyz[-21:, :]

        cond_left = tf.logical_and(tf.cast(tf.ones_like(kp_coord_xyz_left), tf.bool), tf.greater(num_px_left_hand, num_px_right_hand))
        kp_coord_xyz21 = tf.where(cond_left, kp_coord_xyz_left, kp_coord_xyz_right)

        hand_side = tf.where(tf.greater(num_px_left_hand, num_px_right_hand),
                             tf.constant(0, dtype=tf.int32),
                             tf.constant(1, dtype=tf.int32))  # left hand = 0; right hand = 1
        data_dict['hand_side'] = tf.one_hot(hand_side, depth=2, on_value=1.0, off_value=0.0, dtype=tf.float32)

        data_dict['keypoint_xyz21'] = kp_coord_xyz21

        # make coords relative to root joint
        kp_coord_xyz_root = kp_coord_xyz21[0, :] # this is the palm coord
        kp_coord_xyz21_rel = kp_coord_xyz21 - kp_coord_xyz_root  # relative coords in metric coords
        index_root_bone_length = tf.sqrt(tf.reduce_sum(tf.square(kp_coord_xyz21_rel[12, :] - kp_coord_xyz21_rel[11, :])))
        data_dict['keypoint_scale'] = index_root_bone_length
        data_dict['keypoint_xyz21_normed'] = kp_coord_xyz21_rel / index_root_bone_length  # normalized by length of 12->11

        # calculate local coordinates
        kp_coord_xyz21_local = bone_rel_trafo(data_dict['keypoint_xyz21_normed'])
        kp_coord_xyz21_local = tf.squeeze(kp_coord_xyz21_local)
        data_dict['keypoint_xyz21_local'] = kp_coord_xyz21_local

        # calculate viewpoint and coords in canonical coordinates
        kp_coord_xyz21_rel_can, rot_mat = canonical_trafo(data_dict['keypoint_xyz21_normed'])
        kp_coord_xyz21_rel_can, rot_mat = tf.squeeze(kp_coord_xyz21_rel_can), tf.squeeze(rot_mat)
        kp_coord_xyz21_rel_can = flip_right_hand(kp_coord_xyz21_rel_can, tf.logical_not(cond_left))
        data_dict['keypoint_xyz21_can'] = kp_coord_xyz21_rel_can
        data_dict['rot_mat'] = tf.matrix_inverse(rot_mat)

        # Set of 21 for visibility
        keypoint_vis_left = keypoint_vis[:21]
        keypoint_vis_right = keypoint_vis[-21:]
        keypoint_vis21 = tf.where(cond_left[:, 0], keypoint_vis_left, keypoint_vis_right)
        data_dict['keypoint_vis21'] = keypoint_vis21

        # Set of 21 for UV coordinates
        keypoint_uv_left = keypoint_uv[:21, :]
        keypoint_uv_right = keypoint_uv[-21:, :]
        keypoint_uv21 = tf.where(cond_left[:, :2], keypoint_uv_left, keypoint_uv_right)
        data_dict['keypoint_uv21'] = keypoint_uv21

        """ DEPENDENT DATA ITEMS: HAND CROP """
        if self.hand_crop:
            crop_center = keypoint_uv21[12, ::-1]

            # catch problem, when no valid kp available (happens almost never)
            crop_center = tf.cond(tf.reduce_all(tf.is_finite(crop_center)), lambda: crop_center,
                                  lambda: tf.constant([0.0, 0.0]))
            crop_center.set_shape([2, ])

            if self.crop_center_noise:
                noise = tf.truncated_normal([2], mean=0.0, stddev=self.crop_center_noise_sigma)
                crop_center += noise

            crop_scale_noise = tf.constant(1.0)
            if self.crop_scale_noise:
                    crop_scale_noise = tf.squeeze(tf.random_uniform([1], minval=1.0, maxval=1.2))

            # select visible coords only
            kp_coord_h = tf.boolean_mask(keypoint_uv21[:, 1], keypoint_vis21)
            kp_coord_w = tf.boolean_mask(keypoint_uv21[:, 0], keypoint_vis21)
            kp_coord_hw = tf.stack([kp_coord_h, kp_coord_w], 1)

            # determine size of crop (measure spatial extend of hw coords first)
            min_coord = tf.maximum(tf.reduce_min(kp_coord_hw, 0), 0.0)
            max_coord = tf.minimum(tf.reduce_max(kp_coord_hw, 0), self.image_size)

            # find out larger distance wrt the center of crop
            crop_size_best = 2*tf.maximum(max_coord - crop_center, crop_center - min_coord)
            crop_size_best = tf.reduce_max(crop_size_best)
            crop_size_best = tf.minimum(tf.maximum(crop_size_best, 50.0), 500.0)

            # catch problem, when no valid kp available
            crop_size_best = tf.cond(tf.reduce_all(tf.is_finite(crop_size_best)), lambda: crop_size_best,
                                  lambda: tf.constant(200.0))
            crop_size_best.set_shape([])

            # calculate necessary scaling
            scale = tf.cast(self.crop_size, tf.float32) / crop_size_best
            scale = tf.minimum(tf.maximum(scale, 1.0), 10.0)
            scale *= crop_scale_noise
            data_dict['crop_scale'] = scale

            if self.crop_offset_noise:
                noise = tf.truncated_normal([2], mean=0.0, stddev=self.crop_offset_noise_sigma)
                crop_center += noise

            # Crop image
            img_crop = crop_image_from_xy(tf.expand_dims(image, 0), crop_center, self.crop_size, scale)
            data_dict['image_crop'] = tf.squeeze(img_crop)

            # Modify uv21 coordinates
            crop_center_float = tf.cast(crop_center, tf.float32)
            keypoint_uv21_u = (keypoint_uv21[:, 0] - crop_center_float[1]) * scale + self.crop_size // 2
            keypoint_uv21_v = (keypoint_uv21[:, 1] - crop_center_float[0]) * scale + self.crop_size // 2
            keypoint_uv21 = tf.stack([keypoint_uv21_u, keypoint_uv21_v], 1)
            data_dict['keypoint_uv21'] = keypoint_uv21

            # Modify camera intrinsics
            scale = tf.reshape(scale, [1, ])
            scale_matrix = tf.dynamic_stitch([[0], [1], [2],
                                              [3], [4], [5],
                                              [6], [7], [8]], [scale, [0.0], [0.0],
                                                               [0.0], scale, [0.0],
                                                               [0.0], [0.0], [1.0]])
            scale_matrix = tf.reshape(scale_matrix, [3, 3])

            crop_center_float = tf.cast(crop_center, tf.float32)
            trans1 = crop_center_float[0] * scale - self.crop_size // 2
            trans2 = crop_center_float[1] * scale - self.crop_size // 2
            trans1 = tf.reshape(trans1, [1, ])
            trans2 = tf.reshape(trans2, [1, ])
            trans_matrix = tf.dynamic_stitch([[0], [1], [2],
                                              [3], [4], [5],
                                              [6], [7], [8]], [[1.0], [0.0], -trans2,
                                                               [0.0], [1.0], -trans1,
                                                               [0.0], [0.0], [1.0]])
            trans_matrix = tf.reshape(trans_matrix, [3, 3])

            data_dict['cam_mat'] = tf.matmul(trans_matrix, tf.matmul(scale_matrix, cam_mat))

        """ DEPENDENT DATA ITEMS: Scoremap from the SUBSET of 21 keypoints"""
        # create scoremaps from the subset of 2D annoataion
        keypoint_hw21 = tf.stack([keypoint_uv21[:, 1], keypoint_uv21[:, 0]], -1)

        scoremap_size = self.image_size
        
        if self.hand_crop:
            scoremap_size = (self.crop_size, self.crop_size)

        scoremap = self.create_multiple_gaussian_map(keypoint_hw21,
                                                     scoremap_size,
                                                     self.sigma,
                                                     valid_vec=keypoint_vis21)
        
        if self.scoremap_dropout:
            scoremap = tf.nn.dropout(scoremap, self.scoremap_dropout_prob,
                                        noise_shape=[1, 1, 21])
            scoremap *= self.scoremap_dropout_prob

        data_dict['scoremap'] = scoremap

        if self.scale_to_size:
            image, keypoint_uv21, keypoint_vis21 = data_dict['image'], data_dict['keypoint_uv21'], data_dict['keypoint_vis21']
            s = image.get_shape().as_list()
            image = tf.image.resize_images(image, self.scale_target_size)
            scale = (self.scale_target_size[0]/float(s[0]), self.scale_target_size[1]/float(s[1]))
            keypoint_uv21 = tf.stack([keypoint_uv21[:, 0] * scale[1],
                                      keypoint_uv21[:, 1] * scale[0]], 1)

            data_dict = dict()  # delete everything else because the scaling makes the data invalid anyway
            data_dict['image'] = image
            data_dict['keypoint_uv21'] = keypoint_uv21
            data_dict['keypoint_vis21'] = keypoint_vis21

        elif self.random_crop_to_size:
            tensor_stack = tf.concat([data_dict['image'],
                                      tf.expand_dims(tf.cast(data_dict['hand_parts'], tf.float32), -1),
                                      tf.cast(data_dict['hand_mask'], tf.float32)], 2)
            s = tensor_stack.get_shape().as_list()
            tensor_stack_cropped = tf.random_crop(tensor_stack,
                                                  [self.random_crop_size, self.random_crop_size, s[2]])
            data_dict = dict()  # delete everything else because the random cropping makes the data invalid anyway
            data_dict['image'], data_dict['hand_parts'], data_dict['hand_mask'] = tensor_stack_cropped[:, :, :3],\
                                                                                  tf.cast(tensor_stack_cropped[:, :, 3], tf.int32),\
                                                                                  tf.cast(tensor_stack_cropped[:, :, 4:], tf.int32)

        names, tensors = zip(*data_dict.items())

        if self.shuffle:
            tensors = tf.train.shuffle_batch_join([tensors],
                                                  batch_size=self.batch_size,
                                                  capacity=100,
                                                  min_after_dequeue=50,
                                                  enqueue_many=False)
        else:
            tensors = tf.train.batch_join([tensors],
                                          batch_size=self.batch_size,
                                          capacity=100,
                                          enqueue_many=False)

        return dict(zip(names, tensors))
コード例 #55
0
# We need an operation to copy the online DQN to the target DQN
copy_ops = [
    target_var.assign(online_vars[var_name])
    for var_name, target_var in target_vars.items()
]
copy_online_to_target = tf.group(*copy_ops)

# Now for the training operations
learning_rate = 0.001
momentum = 0.95

with tf.variable_scope("train"):
    X_action = tf.placeholder(tf.int32, shape=[None])
    y = tf.placeholder(tf.float32, shape=[None, 1])
    q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
                            axis=1,
                            keep_dims=True)
    error = tf.abs(y - q_value)
    clipped_error = tf.clip_by_value(error, 0.0, 1.0)
    linear_error = 2 * (error - clipped_error)
    loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)

    global_step = tf.Variable(0, trainable=False, name='global_step')
    optimizer = tf.train.MomentumOptimizer(learning_rate,
                                           momentum,
                                           use_nesterov=True)
    training_op = optimizer.minimize(loss, global_step=global_step)

init = tf.global_variables_initializer()
saver = tf.train.Saver()
コード例 #56
0
def main(_):
    network = importlib.import_module(FLAGS.model_def)
    def read_data_10_clips(input_queue, clip_length=FLAGS.clip_length):
        label = input_queue[1]
        filename = input_queue[0]
        filename = filename + '.npy'
        file_contents = tf.read_file(filename)
        file_contents = tf.decode_raw(file_contents, out_type=tf.float32)[32:]
        num_frames = tf.shape(file_contents)[0] / (FLAGS.crop_size * FLAGS.crop_size * 3)
        clip = tf.reshape(file_contents, [num_frames, FLAGS.crop_size, FLAGS.crop_size, 3])
        clip = tf.image.resize_images(clip, [128, 171])
        clip = tf.image.crop_to_bounding_box(clip, 8, 30, 112, 112)
        each_start = (num_frames - clip_length) // 10
        clips = []
        for i in range(10):
            begin = i * each_start
            clips.append(tf.slice(clip, [begin, 0, 0, 0], [clip_length, -1, -1, -1]))
        clips = tf.stack(clips)           
        return clips, label

    def test_data_loader():
        lines = open(FLAGS.test_list_path, 'r')
        lines = list(lines)
        lines = [line.strip('\n').split() for line in lines]
        clips = [line[0] for line in lines]
        labels = [int(line[1]) for line in lines]
        clips = tf.convert_to_tensor(clips, dtype=tf.string)
        labels = tf.convert_to_tensor(labels, dtype=tf.int32)
        input_queue = tf.train.slice_input_producer([clips, labels], shuffle=True)
        clip, label = read_data_10_clips(input_queue)
        clip_batch, label_batch = tf.train.batch([clip, label], batch_size=FLAGS.batch_size, 
            num_threads=12,
            shapes=[(10, FLAGS.clip_length, FLAGS.crop_size, FLAGS.crop_size, 3), ()],
            capacity=25*FLAGS.batch_size,
            allow_smaller_final_batch=False,
        )
        return clip_batch, label_batch
    
    batch_clips, batch_labels = test_data_loader()
    ori_labels = batch_labels
    batch_labels = tf.one_hot(batch_labels, FLAGS.num_classes)
    batch_clips = tf.reshape(batch_clips, [FLAGS.batch_size * 10, FLAGS.clip_length, FLAGS.crop_size, FLAGS.crop_size, 3])

    batch_clips = batch_clips / 127.5 - 1

    feature, logits = network.R2Plus1DNet(batch_clips, layer_sizes = [1,1,1,1], training = False, num_classes = FLAGS.num_classes, weight_decay=5e-4)


    with tf.name_scope('accuracy'):
        logits = tf.reshape(logits, [FLAGS.batch_size, 10, -1])
        logits = tf.reduce_sum(logits, axis=1)
        predictions = tf.argmax(logits, 1)
        gt = ori_labels
        accuracy = tf.metrics.accuracy(predictions=predictions, labels=gt)
        tf.summary.scalar('accuracy', accuracy)

    with tf.name_scope('loss'):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=batch_labels))
        tf.summary.scalar('entropy_loss', loss)


    num_examples = get_dataset_size()
    batch_size = FLAGS.batch_size
    num_batches = math.ceil(num_examples / float(batch_size))
    num_batches = int(num_batches)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    #v2r =  tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="R2Plus1DNet")
    v2r = tf.all_variables()
    print(v2r)
    restorer = tf.train.Saver(v2r)
    print('v2r:%d'%len(v2r))
    with tf.train.MonitoredTrainingSession(config=config) as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord, sess=sess)
        #restorer.restore(sess, FLAGS.checkpoint_save_path + '/model.ckpt-39000')
        #print('restore from {}' % FLAGS.checkpoint_save_path + '/model.ckpt-100000')
        acc = 0
        for i in range(num_batches):
            ac, ls, lb, pred = sess.run([accuracy, loss, ori_labels, predictions])
            print(ac)
            print(lb)
            print(pred)
            acc = acc + ac[1]
            if i % 10 == 0:
              print('[%d/%d]\tTime %s\tLoss %s\tAcc %2.3f' %
                      (i, num_batches, time.strftime('%Y-%m-%d %X', time.localtime()), ls, acc / i))
              sys.stdout.flush()
        print("ACCURACY: %2.3f" % acc / num_batches)
        sys.stdout.flush()
        coord.request_stop()
        coord.join(threads)
コード例 #57
0
            batch.append([c2i(inp), c2i(target)])
        epoch_data.append(batch)
    return epoch_data


epoch_data = gen_epoch_data(data, batch_size)
init_state = tf.zeros([hidden_size, 1])

# Input
x = tf.placeholder(tf.int32, shape=(seq_length), name="x")
y = tf.placeholder(tf.int32, shape=(seq_length), name="y")
state = tf.zeros([hidden_size, 1])

# One Hot representation of the input
x_oh = tf.one_hot(indices=x, depth=vocab_size)
y_oh = tf.one_hot(indices=y, depth=vocab_size)

rnn_inputs = tf.unpack(x_oh)
rnn_targets = tf.unpack(y_oh)

# Setup the weights and biases.
with tf.variable_scope('rnn_cell'):
    Wxh = tf.get_variable('Wxh', [hidden_size, vocab_size])
    Whh = tf.get_variable('Whh', [hidden_size, hidden_size])
    Why = tf.get_variable('Why', [vocab_size, hidden_size])
    bh = tf.get_variable('bh', [hidden_size, 1])
    by = tf.get_variable('by', [vocab_size, 1])

# Actual math behind computing the output and the next state of the RNN.
def rnn_cell(rnn_input, cur_state):
コード例 #58
0
def main():
    if FLAGS.job_name is None or FLAGS.job_name == '':
        raise ValueError('Must specify an explicit job_name !')
    else:
        print('job_name : %s' % FLAGS.job_name)
    if FLAGS.task_index is None or FLAGS.task_index == '':
        raise ValueError('Must specify an explicit task_index!')
    else:
        print('task_index : %d' % FLAGS.task_index)

    ps_spec = FLAGS.ps_hosts.split(',')
    worker_spec = FLAGS.worker_hosts.split(',')

    # 创建集群
    cluster = tf.train.ClusterSpec({'ps': ps_spec, 'worker': worker_spec})
    server = tf.train.Server(cluster,
                             job_name=FLAGS.job_name,
                             task_index=FLAGS.task_index)
    if FLAGS.job_name == 'ps':
        server.join()

    is_chief = (FLAGS.task_index == 0)
    # worker_device = '/job:worker/task%d/cpu:0' % FLAGS.task_index
    train_reader = Cifar10Reader.Reader([
        'cifar-10-python\\cifar-10-batches-py\\data_batch_1',
        'cifar-10-python\\cifar-10-batches-py\\data_batch_2',
        'cifar-10-python\\cifar-10-batches-py\\data_batch_3',
        'cifar-10-python\\cifar-10-batches-py\\data_batch_4',
        'cifar-10-python\\cifar-10-batches-py\\data_batch_5'
    ])
    if is_chief is True:
        test_reader = Cifar10Reader.Reader(
            ['cifar-10-python\\cifar-10-batches-py\\test_batch'])
    with tf.device(tf.train.replica_device_setter(cluster=cluster)):
        # step
        global_step = tf.Variable(0, name='global_step',
                                  trainable=False)  # 创建纪录全局训练步数变量
        # input
        x = tf.placeholder(tf.float32, [None, 32, 32, 3])
        y = tf.placeholder(tf.int32, [None, 10])
        # train
        logits, _ = ResNet_Model.resnet_v2_50(x, 10)
        train_op, loss_op = train(logits, y)
        # test
        test_op = test(logits, y)
        # 生成本地的参数初始化操作init_op
        init_op = tf.global_variables_initializer()

        sv = tf.train.Supervisor(is_chief=is_chief,
                                 logdir=FLAGS.train_dir,
                                 init_op=init_op,
                                 recovery_wait_secs=1,
                                 global_step=global_step)
        if is_chief:
            print('Worker %d: Initailizing session...' % FLAGS.task_index)
        else:
            print('Worker %d: Waiting for session to be initialized...' %
                  FLAGS.task_index)
        sess = sv.prepare_or_wait_for_session(server.target)
        print('Worker %d: Session initialization  complete.' %
              FLAGS.task_index)

        local_step = 0
        start_time = time.time()
        while True:
            train_images, train_labels = train_reader.next_batch(
                FLAGS.batch_size)
            train_images = tf.cast(train_images, tf.float32)
            train_labels = tf.cast(tf.one_hot(train_labels, 10), tf.int32)
            train_images, train_labels = sess.run([train_images, train_labels])
            _, loss, step = sess.run([train_op, loss_op, global_step],
                                     feed_dict={
                                         x: train_images,
                                         y: train_labels
                                     })
            local_step += 1
            if local_step % 100 == 0:
                duration = time.time() - start_time()
                print('Worker %d: training step %d dome (global step:%d)' %
                      (FLAGS.task_index, local_step, step))
                if is_chief is True:
                    correct_count = 0.0
                    input_count = 0
                    while test_reader.epoch < 1:
                        test_images, test_labels = test_reader.next_batch(
                            FLAGS.batch_size)
                        test_images = tf.cast(test_images, tf.float32)
                        test_labels = tf.cast(tf.one_hot(test_labels, 10),
                                              tf.int32)
                        test_images, test_labels = sess.run(
                            [test_images, test_labels])
                        input_count += len(test_labels)
                        correct_count += sess.run(test_op,
                                                  feed_dict={
                                                      x: test_images,
                                                      y: test_labels
                                                  })
                    print('time: %.5f, loss: %.3f, acc: %.3f' %
                          (duration, loss, correct_count / input_count))
                    test_reader.clear()
                    start_time = time.time()
            if step >= FLAGS.train_steps:
                break
        sess.close()
コード例 #59
0
input_data = 'GrayInputData.csv'
data_xy = np.loadtxt(input_data, delimiter=',', dtype=np.float32)
np.random.shuffle(data_xy)
data_N = len(data_xy) # print('data_N: ', data_N)

# ---------------------------------------------------------------------------------------------------
# X: input 32*32*1 (=1024)
# Y: output '1' or '0'
X = tf.placeholder(tf.float32, [None, 1024])
Y = tf.placeholder(tf.int32, [None, 1])  # 0,1

# 출력 class 개수 = 1(fire), 0(not fire)
nb_classes = 2

# one hot & reshape
Y_one_hot = tf.one_hot(Y, nb_classes) # print("one_hot", Y_one_hot)
Y_one_hot = tf.reshape(Y_one_hot, [-1, nb_classes]) # print("reshape", Y_one_hot)

# img 32x32x1 (black/white)
X_img = tf.reshape(X, [-1, 32, 32, 1])

# ---------------------------------------------------------------------------------------------------
# L1 ImgIn shape = (?, 32, 32, 1)
W1 = tf.Variable(tf.random_normal([FS, FS, 1, FN], stddev=0.01))

# Conv -> (?, 32, 32, FN)
L1 = tf.nn.conv2d(X_img, W1, strides=[1, 1, 1, 1], padding='SAME')
L1 = tf.nn.relu(L1)

# ---------------------------------------------------------------------------------------------------
# L2 ImgIn shape = (?, 32, 32, FN)
コード例 #60
0
def get_loss(mask_label, center_label, \
             heading_class_label, heading_residual_label, \
             size_class_label, size_residual_label, \
             end_points, \
             corner_loss_weight=10.0, \
             box_loss_weight=1.0):
    ''' Loss functions for 3D object detection.
    Input:
        mask_label: TF int32 tensor in shape (B,N)
        center_label: TF tensor in shape (B,3)
        heading_class_label: TF int32 tensor in shape (B,) 
        heading_residual_label: TF tensor in shape (B,) 
        size_class_label: TF tensor int32 in shape (B,)
        size_residual_label: TF tensor tensor in shape (B,)
        end_points: dict, outputs from our model
        corner_loss_weight: float scalar
        box_loss_weight: float scalar
    Output:
        total_loss: TF scalar tensor
            the total_loss is also added to the losses collection
    '''
    # 3D Segmentation loss
    mask_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\
        logits=end_points['mask_logits'], labels=mask_label))
    tf.summary.scalar('3d mask loss', mask_loss)

    # Center regression losses
    center_dist = tf.norm(center_label - end_points['center'], axis=-1)
    center_loss = huber_loss(center_dist, delta=2.0)
    tf.summary.scalar('center loss', center_loss)
    stage1_center_dist = tf.norm(center_label - \
        end_points['stage1_center'], axis=-1)
    stage1_center_loss = huber_loss(stage1_center_dist, delta=1.0)
    tf.summary.scalar('stage1 center loss', stage1_center_loss)

    # Heading loss
    heading_class_loss = tf.reduce_mean( \
        tf.nn.sparse_softmax_cross_entropy_with_logits( \
        logits=end_points['heading_scores'], labels=heading_class_label))
    tf.summary.scalar('heading class loss', heading_class_loss)

    hcls_onehot = tf.one_hot(heading_class_label,
                             depth=NUM_HEADING_BIN,
                             on_value=1,
                             off_value=0,
                             axis=-1)  # BxNUM_HEADING_BIN
    heading_residual_normalized_label = \
        heading_residual_label / (np.pi/NUM_HEADING_BIN)
    heading_residual_normalized_loss = huber_loss(tf.reduce_sum( \
        end_points['heading_residuals_normalized']*tf.to_float(hcls_onehot), axis=1) - \
        heading_residual_normalized_label, delta=1.0)
    tf.summary.scalar('heading residual normalized loss',
                      heading_residual_normalized_loss)

    # Size loss
    size_class_loss = tf.reduce_mean( \
        tf.nn.sparse_softmax_cross_entropy_with_logits( \
        logits=end_points['size_scores'], labels=size_class_label))
    tf.summary.scalar('size class loss', size_class_loss)

    scls_onehot = tf.one_hot(size_class_label,
                             depth=NUM_SIZE_CLUSTER,
                             on_value=1,
                             off_value=0,
                             axis=-1)  # BxNUM_SIZE_CLUSTER
    scls_onehot_tiled = tf.tile(tf.expand_dims( \
        tf.to_float(scls_onehot), -1), [1,1,3]) # BxNUM_SIZE_CLUSTERx3
    predicted_size_residual_normalized = tf.reduce_sum( \
        end_points['size_residuals_normalized']*scls_onehot_tiled, axis=[1]) # Bx3

    mean_size_arr_expand = tf.expand_dims( \
        tf.constant(g_mean_size_arr, dtype=tf.float32),0) # 1xNUM_SIZE_CLUSTERx3
    mean_size_label = tf.reduce_sum( \
        scls_onehot_tiled * mean_size_arr_expand, axis=[1]) # Bx3
    size_residual_label_normalized = size_residual_label / mean_size_label
    size_normalized_dist = tf.norm( \
        size_residual_label_normalized - predicted_size_residual_normalized,
        axis=-1)
    size_residual_normalized_loss = huber_loss(size_normalized_dist, delta=1.0)
    tf.summary.scalar('size residual normalized loss',
                      size_residual_normalized_loss)

    # Corner loss
    # We select the predicted corners corresponding to the
    # GT heading bin and size cluster.
    corners_3d = get_box3d_corners(
        end_points['center'], end_points['heading_residuals'],
        end_points['size_residuals'])  # (B,NH,NS,8,3)
    gt_mask = tf.tile(tf.expand_dims(hcls_onehot, 2), [1,1,NUM_SIZE_CLUSTER]) * \
        tf.tile(tf.expand_dims(scls_onehot,1), [1,NUM_HEADING_BIN,1]) # (B,NH,NS)
    corners_3d_pred = tf.reduce_sum( \
        tf.to_float(tf.expand_dims(tf.expand_dims(gt_mask,-1),-1)) * corners_3d,
        axis=[1,2]) # (B,8,3)

    heading_bin_centers = tf.constant( \
        np.arange(0,2*np.pi,2*np.pi/NUM_HEADING_BIN), dtype=tf.float32) # (NH,)
    heading_label = tf.expand_dims(heading_residual_label,1) + \
        tf.expand_dims(heading_bin_centers, 0) # (B,NH)
    heading_label = tf.reduce_sum(tf.to_float(hcls_onehot) * heading_label, 1)
    mean_sizes = tf.expand_dims( \
        tf.constant(g_mean_size_arr, dtype=tf.float32), 0) # (1,NS,3)
    size_label = mean_sizes + \
        tf.expand_dims(size_residual_label, 1) # (1,NS,3) + (B,1,3) = (B,NS,3)
    size_label = tf.reduce_sum( \
        tf.expand_dims(tf.to_float(scls_onehot),-1)*size_label, axis=[1]) # (B,3)
    corners_3d_gt = get_box3d_corners_helper( \
        center_label, heading_label, size_label) # (B,8,3)
    corners_3d_gt_flip = get_box3d_corners_helper( \
        center_label, heading_label+np.pi, size_label) # (B,8,3)

    corners_dist = tf.minimum(
        tf.norm(corners_3d_pred - corners_3d_gt, axis=-1),
        tf.norm(corners_3d_pred - corners_3d_gt_flip, axis=-1))
    corners_loss = huber_loss(corners_dist, delta=1.0)
    tf.summary.scalar('corners loss', corners_loss)

    # Weighted sum of all losses
    total_loss = mask_loss + box_loss_weight * (center_loss + \
        heading_class_loss + size_class_loss + \
        heading_residual_normalized_loss*20 + \
        size_residual_normalized_loss*20 + \
        stage1_center_loss + \
        corner_loss_weight*corners_loss)
    tf.add_to_collection('losses', total_loss)

    return total_loss