Beispiel #1
0
def one_hot_encoding(target, n_classes, on_value=1.0, off_value=1.0,
                     name="OneHotEncoding"):
    """ One Hot Encoding.

    Transform numeric labels into a binary vector.

    Input:
        The Labels Placeholder.

    Output:
        2-D Tensor, The encoded labels.

    Arguments:
        target: `Placeholder`. The labels placeholder.
        n_classes: `int`. Total number of classes.
        on_value: `scalar`. A scalar defining the on-value.
        off_value: `scalar`. A scalar defining the off-value.
        name: A name for this layer (optional). Default: 'OneHotEncoding'.

    """

    with tf.name_scope(name):
        if target.dtype == tf.dtypes.int32:
          target = standard_ops.to_int64(target)

        target = standard_ops.one_hot(target, n_classes,
                                      on_value=on_value,
                                      off_value=off_value)

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, target)

    return target
def one_hot_encoding(labels,
                     num_classes,
                     on_value=1.0,
                     off_value=0.0,
                     outputs_collections=None,
                     scope=None):
  """Transform numeric labels into onehot_labels using tf.one_hot.
  Args:
    labels: [batch_size] target labels.
    num_classes: total number of classes.
    on_value: A scalar defining the on-value.
    off_value: A scalar defining the off-value.
    outputs_collections: collection to add the outputs.
    scope: Optional scope for op_scope.
  Returns:
    one hot encoding of the labels.
  """
  with ops.op_scope([labels, num_classes], scope, 'OneHotEncoding') as sc:
    if labels.dtype == dtypes.int32:
      labels = standard_ops.to_int64(labels)
    outputs = standard_ops.one_hot(labels,
                                   num_classes,
                                   on_value=on_value,
                                   off_value=off_value)
    return utils.collect_named_outputs(outputs_collections, sc, outputs)
Beispiel #3
0
def one_hot_encoding(target, n_classes, on_value=1.0, off_value=0.0,
                     name="OneHotEncoding"):
    """ One Hot Encoding.

    Transform numeric labels into a binary vector.

    Input:
        The Labels Placeholder.

    Output:
        2-D Tensor, The encoded labels.

    Arguments:
        target: `Placeholder`. The labels placeholder.
        n_classes: `int`. Total number of classes.
        on_value: `scalar`. A scalar defining the on-value.
        off_value: `scalar`. A scalar defining the off-value.
        name: A name for this layer (optional). Default: 'OneHotEncoding'.

    """

    with tf.name_scope(name):
        if target.dtype != dtypes.int64:
            target = standard_ops.to_int64(target)

        target = standard_ops.one_hot(target, n_classes,
                                      on_value=on_value,
                                      off_value=off_value)

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, target)

    return target
Beispiel #4
0
    def _preprocess(self, features, labels):
        if isinstance(labels, Mapping):
            labels = labels['label']

        if self.one_hot_encode:
            labels = standard_ops.one_hot(indices=labels, depth=self.n_classes)
        return super(Classifier, self)._preprocess(features, labels)
Beispiel #5
0
def max_spanning_tree_gradient(mst_op, d_loss_d_max_scores, *_):
  """Returns a subgradient of the MaximumSpanningTree op.

  Note that MaximumSpanningTree is only differentiable w.r.t. its |scores| input
  and its |max_scores| output.

  Args:
    mst_op: The MaximumSpanningTree op being differentiated.
    d_loss_d_max_scores: [B] vector where entry b is the gradient of the network
      loss w.r.t. entry b of the |max_scores| output of the |mst_op|.
    *_: The gradients w.r.t. the other outputs; ignored.

  Returns:
    1. None, since the op is not differentiable w.r.t. its |num_nodes| input.
    2. [B,M,M] tensor where entry b,t,s is a subgradient of the network loss
       w.r.t. entry b,t,s of the |scores| input, with the same dtype as
       |d_loss_d_max_scores|.
  """
  dtype = d_loss_d_max_scores.dtype.base_dtype
  if dtype is None:
    raise errors.InvalidArgumentError("Expected (%s) is not None" % dtype)

  argmax_sources_bxm = mst_op.outputs[1]
  input_dim = array_ops.shape(argmax_sources_bxm)[1]  # M in the docstring

  # The one-hot argmax is a subgradient of max.  Convert the batch of maximal
  # spanning trees into 0/1 indicators, then scale them by the relevant output
  # gradients from |d_loss_d_max_scores|.  Note that |d_loss_d_max_scores| must
  # be reshaped in order for it to broadcast across the batch dimension.
  indicators_bxmxm = standard_ops.one_hot(
      argmax_sources_bxm, input_dim, dtype=dtype)
  d_loss_d_max_scores_bx1 = array_ops.expand_dims(d_loss_d_max_scores, -1)
  d_loss_d_max_scores_bx1x1 = array_ops.expand_dims(d_loss_d_max_scores_bx1, -1)
  d_loss_d_scores_bxmxm = indicators_bxmxm * d_loss_d_max_scores_bx1x1
  return None, d_loss_d_scores_bxmxm
Beispiel #6
0
    def _build_loss(self, results, features, labels):
        """Creates the loss operation

        Returns:
             tuple `(losses, loss)`:
                `losses` are the per-batch losses.
                `loss` is a single scalar tensor to minimize.
        """
        reward, action, done = labels['reward'], labels['action'], labels['done']
        train_action_selector = standard_ops.one_hot(
            indices=self._index_action, depth=self.num_actions)
        target_q_values = tf.reduce_sum(
            tf.multiply(self._target_results.q, train_action_selector), axis=1)

        action_selector = standard_ops.one_hot(indices=action[:-1], depth=self.num_actions)
        train_q_value = tf.reduce_sum(
            tf.multiply(self._train_results.q[:-1], action_selector), axis=1)

        target_q_value = (reward[:-1] + (1.0 - tf.cast(done[:-1], tf.float32)) *
                          self.discount * target_q_values[1:])
        return super(DDQNModel, self)._build_loss(train_q_value, features, target_q_value)
Beispiel #7
0
    def _build(self, incoming, *args, **kwargs):
        """
        Args:
            incoming: The Labels Placeholder.
        Returns:
            2-D Tensor, The encoded labels.
        """
        if incoming.dtype != dtypes.int64:
            incoming = standard_ops.to_int64(incoming)

        incoming = standard_ops.one_hot(indices=incoming, depth=self.n_classes,
                                        on_value=self.on_value, off_value=self.off_value)
        track(incoming, tf.GraphKeys.LAYER_TENSOR, self.module_name)
        return incoming