Beispiel #1
0
def rtt_fused_batch_norm(x,
                         scale,
                         offset,
                         mean,
                         variance,
                         epsilon=0.0001,
                         data_format="NHWC",
                         is_training=True,
                         name=None):
    x = rtt_ts.convert_to_rtttensor(x)
    scale = rtt_ts.convert_to_rtttensor(scale)
    offset = rtt_ts.convert_to_rtttensor(offset)
    mean = rtt_ts.convert_to_rtttensor(mean)
    variance = rtt_ts.convert_to_rtttensor(variance)
    y, batch_mean, batch_var, _, _ = rtt_ts.rtt_ops.rtt_fused_batch_norm(
        x,
        scale,
        offset,
        mean,
        variance,
        epsilon=epsilon,
        data_format=data_format,
        is_training=is_training,
        name=name)
    return rtt_ts.RttTensor(y), rtt_ts.RttTensor(batch_mean), rtt_ts.RttTensor(
        batch_var), _, _
Beispiel #2
0
    def _softmax(logits, compute_op, dim=-1, name=None):
        logits = rtt_ts.convert_to_rtttensor(logits)

        def _swap_axis(logits, dim_index, last_index, name=None):
            """Swaps logits's dim_index and last_index."""
            return array_ops.transpose(
                logits,
                array_ops.concat([
                    math_ops.range(dim_index), [last_index],
                    math_ops.range(dim_index + 1, last_index), [dim_index]
                ], 0),
                name=name)

        # We need its original shape for shape inference.
        shape = logits._raw.get_shape()
        is_last_dim = (dim == -1) or (dim == shape.ndims - 1)
        if is_last_dim:
            _result = compute_op(logits, name=name)
            return rtt_ts.RttTensor(_result)

        dim_val = dim
        if isinstance(dim, ops.Tensor):
            dim_val = tensor_util.constant_value(dim)
        elif isinstance(dim, rtt_ts.RttTensor):
            dim_val = tensor_util.constant_value(dim._raw)
        if dim_val is not None and not -shape.ndims <= dim_val < shape.ndims:
            raise errors_impl.InvalidArgumentError(
                None, None,
                "Dimension (%d) must be in the range [%d, %d) where %d is the number of"
                " dimensions in the input." %
                (dim_val, -shape.ndims, shape.ndims, shape.ndims))

        # In case dim is negative (and is not last dimension -1), add shape.ndims
        ndims = array_ops.rank(logits._raw)
        if not isinstance(dim, ops.Tensor):
            if dim < 0:
                dim += ndims
        else:
            dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)

        # Swap logits' dimension of dim and its last dimension.
        input_rank = array_ops.rank(logits._raw)
        dim_axis = dim % shape.ndims
        logits = _swap_axis(logits._raw, dim_axis,
                            math_ops.subtract(input_rank, 1))

        # Do the actual softmax on its last dimension.
        _result = compute_op(logits, name=name)

        # If dim is not the last dimension, we have to do a transpose so that we can
        # still perform softmax on its last dimension.
        _result = _swap_axis(_result,
                             dim_axis,
                             math_ops.subtract(input_rank, 1),
                             name=name)

        # Make shape inference work since transpose may erase its static shape.
        _result.set_shape(shape)
        return rtt_ts.RttTensor(_result)
Beispiel #3
0
def rtt_bias_add(value, bias, data_format="NHWC", name=None):
    value = rtt_ts.convert_to_rtttensor(value)
    bias = rtt_ts.convert_to_rtttensor(bias)
    _result = rtt_ts.rtt_ops.rtt_bias_add(value._raw,
                                          bias._raw,
                                          data_format=data_format,
                                          name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #4
0
def rtt_arg_max(input, dimension=None, name=None, output_type=dtypes.string):
    if dimension is None:
        dimension = 0
    input = rtt_ts.convert_to_rtttensor(input)
    _result = rtt_ts.rtt_ops.rtt_arg_max(input,
                                         dimension=dimension,
                                         name=name,
                                         output_type=output_type)
    return rtt_ts.RttTensor(_result)
Beispiel #5
0
def rtt_matmul(x, y, transpose_a=False, transpose_b=False, name=None):
    """Multiplies matrix `a` by matrix `b`, producing `a` * `b`."""
    x = rtt_ts.convert_to_rtttensor(x)
    y = rtt_ts.convert_to_rtttensor(y)
    _result = rtt_ts.rtt_ops.rtt_matmul(x._raw,
                                        y._raw,
                                        transpose_a=transpose_a,
                                        transpose_b=transpose_b,
                                        name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #6
0
def rtt_max_pool(value,
                 ksize,
                 strides,
                 padding,
                 data_format="NHWC",
                 name=None):
    value = rtt_ts.convert_to_rtttensor(value)
    _result = rtt_ts.rtt_ops.rtt_max_pool(value,
                                          ksize=ksize,
                                          strides=strides,
                                          padding=padding,
                                          data_format=data_format,
                                          name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #7
0
def rtt_mean(
    input_tensor,
    axis=None,
    keepdims=None,
    name=None,
    reduction_indices=None,
    keep_dims=None,
):
    """Computes the mean of elements across dimensions of a tensor."""
    keepdims = False if keepdims is None else keepdims
    axis = math_ops._ReductionDims(input_tensor, axis)
    input_tensor = rtt_ts.convert_to_rtttensor(input_tensor)
    _result = rtt_ts.rtt_ops.rtt_reduce_mean(input_tensor,
                                             reduction_indices=axis,
                                             name=name,
                                             keep_dims=keepdims)
    return rtt_ts.RttTensor(_result)
Beispiel #8
0
def rtt_sum(
    input_tensor,
    axis=None,
    keepdims=None,
    name=None,
    reduction_indices=None,
    keep_dims=None,
):
    """Computes the sum of elements across dimensions of a tensor."""

    keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
                                                      "keep_dims", keep_dims)
    keepdims = False if keepdims is None else keepdims
    axis = math_ops._ReductionDims(input_tensor, axis)
    input_tensor = rtt_ts.convert_to_rtttensor(input_tensor)
    _result = rtt_ts.rtt_ops.rtt_reduce_sum(input_tensor,
                                            reduction_indices=axis,
                                            name=name,
                                            keep_dims=keepdims)
    return rtt_ts.RttTensor(_result)
Beispiel #9
0
def rtt_conv2d(input,
               filter,
               strides=None,
               padding=None,
               use_cudnn_on_gpu=False,
               explicit_paddings=[],
               data_format="NHWC",
               dilations=[1, 1, 1, 1],
               name=None):
    input = rtt_ts.convert_to_rtttensor(input)
    filter = rtt_ts.convert_to_rtttensor(filter)
    _result = rtt_ts.rtt_ops.rtt_conv2d(input._raw,
                                        filter._raw,
                                        strides=strides,
                                        padding=padding,
                                        use_cudnn_on_gpu=use_cudnn_on_gpu,
                                        explicit_paddings=explicit_paddings,
                                        data_format=data_format,
                                        dilations=None,
                                        name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #10
0
def rtt_truediv(x, y, name=None):
    """Divides x / y elementwise (using Python 3 division operator semantics)."""
    x = rtt_ts.convert_to_rtttensor(x)
    y = rtt_ts.convert_to_rtttensor(y)
    _result = rtt_ts.rtt_ops.rtt_truediv(x._raw, y._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #11
0
def rtt_floordiv(x, y, name=None):
    """Divides `x / y` elementwise, rounding toward the most negative integer."""
    x = rtt_ts.convert_to_rtttensor(x)
    y = rtt_ts.convert_to_rtttensor(y)
    _result = rtt_ts.rtt_ops.rtt_floordiv(x._raw, y._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #12
0
def rtt_mul(x, y, name=None):
    """Returns x * y element-wise."""
    x = rtt_ts.convert_to_rtttensor(x)
    y = rtt_ts.convert_to_rtttensor(y)
    _result = rtt_ts.rtt_ops.rtt_mul(x._raw, y._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #13
0
def rtt_sigmoid(x, name=None):
    """Computes sigmoid of `x` element-wise.
    Specifically, `y = 1 / (1 + exp(-x))`."""
    x = rtt_ts.convert_to_rtttensor(x)
    _result = rtt_ts.rtt_ops.rtt_sigmoid(x._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #14
0
def rtt_neg(x, name=None):
    """Computes numerical negative value element-wise."""
    x = rtt_ts.convert_to_rtttensor(x)
    _result = rtt_ts.rtt_ops.rtt_negative(x._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #15
0
def rtt_abs(x, name=None):
    """Computes the absolute value of a tensor."""
    x = rtt_ts.convert_to_rtttensor(x)
    _result = rtt_ts.rtt_ops.rtt_abs(x._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #16
0
def rtt_log1p(x, name=None):
    """Computes natural logarithm of (1 + x) element-wise."""
    x = rtt_ts.convert_to_rtttensor(x)
    _result = rtt_ts.rtt_ops.rtt_log1p(x._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #17
0
def rtt_pow(x, y, name=None):
    """Computes the power of one value to another."""
    x = rtt_ts.convert_to_rtttensor(x)
    y = rtt_ts.convert_to_rtttensor(y)
    _result = rtt_ts.rtt_ops.rtt_pow(x._raw, y._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #18
0
def rtt_relu(x, name=None):
    """Computes rectified linear: `max(features, 0)`."""
    x = rtt_ts.convert_to_rtttensor(x)
    _result = rtt_ts.rtt_ops.rtt_relu(x._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #19
0
def rtt_realdiv(x, y, name=None):
    """Returns x / y element-wise for real types."""
    x = rtt_ts.convert_to_rtttensor(x)
    y = rtt_ts.convert_to_rtttensor(y)
    _result = rtt_ts.rtt_ops.rtt_realdiv(x._raw, y._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #20
0
def rtt_logical_not(x, name=None):
    """Returns the truth value of (!x) element-wise."""
    x = rtt_ts.convert_to_rtttensor(x)
    _result = rtt_ts.rtt_ops.rtt_logical_not(x._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #21
0
def rtt_logical_and(x, y, name=None):
    """Returns the truth value of (x & y) element-wise.."""
    x = rtt_ts.convert_to_rtttensor(x)
    y = rtt_ts.convert_to_rtttensor(y)
    _result = rtt_ts.rtt_ops.rtt_logical_and(x._raw, y._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #22
0
def rtt_l2_loss(x, name=None):
    x = rtt_ts.convert_to_rtttensor(x)
    _result = rtt_ts.rtt_ops.rtt_l2_loss(x._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #23
0
def rtt_notequal(x, y, name=None):
    """Returns the truth value of (x != y) element-wise."""
    x = rtt_ts.convert_to_rtttensor(x)
    y = rtt_ts.convert_to_rtttensor(y)
    _result = rtt_ts.rtt_ops.rtt_not_equal(x._raw, y._raw, name=name)
    return rtt_ts.RttTensor(_result)
Beispiel #24
0
def rtt_square(x, name=None):
    """Computes square of x element-wise."""
    x = rtt_ts.convert_to_rtttensor(x)
    _result = rtt_ts.rtt_ops.rtt_square(x._raw, name=name)
    return rtt_ts.RttTensor(_result)