コード例 #1
0
def reduce_sum(input_tensor,
               axis=None,
               keep_dims=False,
               name=None,
               reduction_indices=None):

    if reduction_indices is not None:
        if axis is not None:
            raise ValueError(
                "cannot specify both 'axis' and 'reduction_indices'.")
        axis = reduction_indices
    elif axis is None:
        axis = -1  # reduce all
    if isinstance(axis, list) or isinstance(axis,
                                            tuple):  # reduce continuously
        if len(axis) < 1:
            raise RuntimeError('reduce axes should at least have one.')
        if len(axis) == 1:
            return ops.Sum(input_tensor, axis=axis[0], keep_dims=keep_dims)
        else:
            ret = ops.Sum(input_tensor, axis=axis[0], keep_dims=True)
            for i in xrange(1, len(axis) - 1):
                ret = ops.Sum(ret, axis=axis[i], keep_dims=True)
            return ops.Sum(ret, axis=axis[len(axis) - 1], keep_dims=keep_dims)
    else:
        return ops.Sum(input_tensor, axis=axis, keep_dims=keep_dims)
コード例 #2
0
def reduce_sum(input_tensor,
               axis=None,
               keep_dims=False,
               name=None,
               reduction_indices=None):
    """
    Computes the sum of elements across dimensions of a tensor.

      Reduces `input_tensor` along the dimensions given in `axis`.
      Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
      entry in `axis`. If `keep_dims` is true, the reduced dimensions
      are retained with length 1.

      If `axis` has no entries, all dimensions are reduced, and a
      tensor with a single element is returned.

      For example:

      ```python
      # 'x' is [[1, 1, 1]
      #         [1, 1, 1]]
      tf.reduce_sum(x) ==> 6
      tf.reduce_sum(x, 0) ==> [2, 2, 2]
      tf.reduce_sum(x, 1) ==> [3, 3]
      tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
      tf.reduce_sum(x, [0, 1]) ==> 6
      ```

      Args:
        input_tensor: The tensor to reduce. Should have numeric type.
        axis: The dimensions to reduce. If `None` (the default),
          reduces all dimensions.
        keep_dims: If true, retains reduced dimensions with length 1.
        name: A name for the operation (optional).
        reduction_indices: The old (deprecated) name for axis.

      Returns:
        The reduced tensor.
    """

    if reduction_indices is not None:
        if axis is not None:
            raise ValueError(
                "cannot specify both 'axis' and 'reduction_indices'.")
        axis = reduction_indices
    elif axis is None:
        axis = -1  # reduce all
    if isinstance(axis, list) or isinstance(axis,
                                            tuple):  # reduce continuously
        if len(axis) < 1:
            raise RuntimeError('reduce axes should at least have one.')
        if len(axis) == 1:
            return ops.Sum(input_tensor, axis=axis[0], keep_dims=keep_dims)
        else:
            ret = ops.Sum(input_tensor, axis=axis[0], keep_dims=True)
            for i in xrange(1, len(axis) - 1):
                ret = ops.Sum(ret, axis=axis[i], keep_dims=True)
            return ops.Sum(ret, axis=axis[len(axis) - 1], keep_dims=keep_dims)
    else:
        return ops.Sum(input_tensor, axis=axis, keep_dims=keep_dims)
コード例 #3
0
    def _create_graph(self):
        self.x = Tensor(shape=[None, self.img_channels, self.img_height, self.img_width]).Variable()
        self.y_r = Tensor(shape=[None], name='Yr').Variable()

        # As implemented in A3C paper
        self.n1 = ops.Relu(ops.Conv2D([self.x] + self.weight_bias(), kernel_size=8, stride=4, num_output=16))
        self.n2 = ops.Relu(ops.Conv2D([self.n1] + self.weight_bias(), kernel_size=4, stride=2, num_output=32))

        self.action_index = Tensor(shape=[None, self.num_actions]).Variable()

        self.d1 = ops.Relu(ops.InnerProduct([self.n2] + self.weight_bias(), num_output=256))

        self.logits_v = ops.InnerProduct([self.d1] + self.weight_bias(), num_output=1)
        self.cost_v = ops.L2Loss([self.y_r, self.logits_v])

        self.logits_p = ops.InnerProduct([self.d1] + self.weight_bias(), num_output=self.num_actions)

        if Config.USE_LOG_SOFTMAX: raise NotImplementedError()
        else:
            self.softmax_p = ops.Softmax(self.logits_p)
            self.selected_action_prob = ops.Sum(self.softmax_p * self.action_index, axis=1)
            self.cost_p_1 = ops.Log(ops.Clip(self.selected_action_prob, self.log_epsilon, None)) * \
                            (self.y_r - ops.StopGradient(self.logits_v))
            self.cost_p_2 = ops.Sum(ops.Log(ops.Clip(self.softmax_p, self.log_epsilon, None)) *
                                      self.softmax_p, axis=1) * (-self.beta)
        self.cost_p_1_agg = ops.Sum(self.cost_p_1)
        self.cost_p_2_agg = ops.Sum(self.cost_p_2)
        self.cost_p = -(self.cost_p_1_agg + self.cost_p_2_agg)
        self.cost_all = self.cost_p + self.cost_v
        
        if Config.DUAL_RMSPROP: raise NotImplementedError()
        else:
            if Config.USE_GRAD_CLIP:
                self.opt = updaters.RMSPropUpdater(decay=Config.RMSPROP_DECAY,
                                                   eps=Config.RMSPROP_EPSILON,
                                                   clip_gradient=Config.GRAD_CLIP_NORM)
            else:
                self.opt = updaters.RMSPropUpdater(decay=Config.RMSPROP_DECAY,
                                                   eps=Config.RMSPROP_EPSILON)

        grads = T.grad(self.cost_all, self.network_params)
        for p, g in zip(self.network_params, grads):
            self.opt.append((p, g), lr_mult=1.0)
コード例 #4
0
ファイル: math_ops.py プロジェクト: yyaqi/Dragon
def reduce_sum(
    input_tensor,
    axis=None,
    keep_dims=False,
    name=None,
    reduction_indices=None,
):
    if reduction_indices is not None:
        if axis is not None:
            raise ValueError(
                "Cannot specify both 'axis' and 'reduction_indices'.")
        axis = reduction_indices
    return _ops.Sum(
        input_tensor,
        axes=axis,
        keep_dims=keep_dims,
        name=name,
    )
コード例 #5
0
ファイル: nnet.py プロジェクト: awesome-archive/Dragon
def categorical_crossentropy(coding_dist, true_dist, axis=1):
    """Compute the categorical cross-entropy between input and target distribution.

    Parameters
    ----------
    coding_dist : Tensor
        The distribution of input.
    true_dist : Tensor
        The distribution of target.
    axis : int
        The axis of category.

    Returns
    -------
    Tensor
        The categorical cross-entropy.

    """
    return -ops.Sum(true_dist * ops.Log(coding_dist), axis=axis)
コード例 #6
0
ファイル: basic.py プロジェクト: k9sret/Dragon
def sum(input, axis=None, keepdims=False, **kwargs):
    """Compute the sum along the given axis.

    Parameters
    ----------
    input : Tensor
        The input tensor.
    axis : int
        The axis to compute. Default is ``None`` (Along all axes).
    keep_dims : boolean
        Whether to keep dims after computing.

    Returns
    -------
    Tensor
        The sum result.

    """
    if axis is None: axis = -1
    return ops.Sum(input, axis=axis, keep_dims=keepdims)