Esempio n. 1
0
def gather(input, dim, index, out=None):
    """Gather the input values along the given axis.

    Note that it is a tensorflow style gather, which takes a vector index,

    values of other dimension will be copied automatically.

    Parameters
    ----------
    input : vm.torch.Tensor
        The values.
    dim : int
        The dim to gather.
    index : vm.torch.Tensor
        The indices.
    out : vm.torch.Tensor or None
        The optional output tensor.

    Returns
    -------
    vm.torch.Tensor
        The output tensor.

    """
    ctx = MakeContext(inputs=[input, index], outputs=[out] if out else [])
    key = 'torch/ops/gather/{}:{}/dim:{}'.format(ctx[0].lower(), ctx[1], dim)
    module = get_module(Gather, key, ctx, axis=dim)
    return module.forward(input, index, out)
Esempio n. 2
0
def _rfundamental(input, value, op='RAdd', out=None):
    if not isinstance(value, Tensor):
        value = WrapScalar(value, input._dtype, input._ctx)
    ctx = MakeContext(inputs=[input, value])
    key = 'torch/ops/{}/{}:{}'.format(op.lower(), ctx[0].lower(), ctx[1])
    module = get_module(Fundamental, key, ctx, op_type=op)
    return module.forward(value, input, out)
Esempio n. 3
0
def _permute(input, perms=None):
    ctx = MakeContext(inputs=[input])
    len_perms = len(perms) if perms else 0
    key = 'torch/ops/permute/{}:{}/n_dims:#{}'.format(ctx[0].lower(), ctx[1],
                                                      len_perms)
    module = get_module(Permute, key, ctx, len_perms=len_perms)
    return module.forward(input, perms)
Esempio n. 4
0
def _repeat(input, times):
    ctx = MakeContext(inputs=[input])
    len_times = len(times)
    key = 'torch/ops/repeat/{}:{}/n_times:#{}'.format(ctx[0].lower(), ctx[1],
                                                      len_times)
    module = get_module(Repeat, key, ctx, len_times=len_times)
    return module.forward(input, times)
Esempio n. 5
0
def ones(*sizes, **kwargs):
    """Return a float tensor with values of ``1``.

    Parameters
    ----------
    sizes : tuple, list or int
        The sizes indicating the shape of the output tensor.
    out : vm.torch.Tensor
        The optional output tensor.

    Returns
    -------
    vm.torch.FloatTensor
        The output tensor.

    """
    arguments = {'value': 1.0, 'dims': sizes}
    out = kwargs['out'] if 'out' in kwargs else None
    if out is None:
        out = LeafTensor(sizes, requires_grad=kwargs['requires_grad'] \
            if 'requires_grad' in kwargs else False)
    inputs = []
    outputs = [out]
    ctx = MakeContext(inputs, outputs)
    meta = ('ONCE', 'Fill', ctx)
    return RunOperator(inputs, outputs, meta, **arguments)
Esempio n. 6
0
def zeros_like(input, out=None, **kwargs):
    """Return a float tensor with values of ``0``, shape as the input.

    Parameters
    ----------
    input : vm.torch.Tensor
        The tensor for indicating shape.
    out : vm.torch.Tensor
        The optional output tensor.

    Returns
    -------
    vm.torch.FloatTensor
        The output tensor.

    """
    if not hasattr(input, 'shape'):
        raise ValueError('Input does not have the shape attribute.')
    arguments = {'value': 0.0, 'dims': input.shape}
    if out is None:
        out = LeafTensor(input.shape, requires_grad=kwargs['requires_grad'] \
            if 'requires_grad' in kwargs else False)
    inputs = []
    outputs = [out]
    ctx = MakeContext(inputs, outputs)
    meta = ('ONCE', 'Fill', ctx)
    return RunOperator(inputs, outputs, meta, **arguments)
Esempio n. 7
0
def randn(*sizes, **kwargs):
    """Return a float tensor with a normal distribution of N(0, 1).

    Parameters
    ----------
    sizes : tuple, list or int
        The sizes indicating the shape of the output tensor.
    out : vm.torch.Tensor
        The optional output tensor.

    Returns
    -------
    vm.torch.FloatTensor
        The output tensor.

    """
    arguments = {'mean': 0.0, 'std': 1.0, 'dims': sizes}
    out = kwargs['out'] if 'out' in kwargs else None
    if out is None:
        out = LeafTensor(sizes, requires_grad=kwargs['requires_grad'] \
            if 'requires_grad' in kwargs else False)
    inputs = []
    outputs = [out]
    ctx = MakeContext(inputs, outputs)
    meta = ('ONCE', 'RandomNormal', ctx)
    return RunOperator(inputs, outputs, meta, **arguments)
Esempio n. 8
0
def _fill(input, shape, value):
    ctx = MakeContext(inputs=[input])
    len_shape = len(shape)
    key = 'torch/ops/fill/{}:{}/ndims:#{}/value:{}'.format(
        ctx[0].lower(), ctx[1], len_shape, value)
    module = get_module(Fill, key, ctx, len_shape=len_shape, value=value)
    return module.forward(input, shape)
Esempio n. 9
0
def reshape(input, shape, shape_like=None):
    if shape_like is not None: shape = shape_like.shape
    ctx = MakeContext(inputs=[input])
    len_shape = len(shape)
    key = 'torch/ops/reshape/{}:{}/n_dims:#{}'.format(ctx[0].lower(), ctx[1],
                                                      len_shape)
    module = get_module(Reshape, key, ctx, len_shape=len_shape)
    return module.forward(input, shape)
Esempio n. 10
0
def _update(param, grad, op_type, slot,
            lr_mult=1.0, decay_mult=1.0):
    ctx = MakeContext(inputs=[param])
    key = 'torch/ops/{}/{}:{}/{}/{}'.format(op_type.lower(),
        ctx[0].lower(),ctx[1], slot, param.name)
    module = get_module(Update, key, ctx, op_type=op_type,
            lr_mult=lr_mult, decay_mult=decay_mult, slot=slot)
    return module.forward(param, grad)
Esempio n. 11
0
def _type_to(input, dtype='float32', inplace=False):
    if dtype == input._dtype: return input
    ctx = MakeContext(inputs=[input])
    key = 'torch/ops/astype/{}:{}/dtype:{}/inplace:{}'.format(
        ctx[0].lower(), ctx[1], dtype, 'true' if inplace else 'false')
    module = get_module(AsType, key, ctx, dtype=dtype, inplace=inplace)
    with no_grad():
        return module.forward(input)
Esempio n. 12
0
def _allreduce(grads):
    if not mpi.Is_Init(): return
    if not isinstance(grads, (list, tuple)): grads = [grads]
    ctx = MakeContext(inputs=grads)
    mode = mpi.GetParallelMode() + '_ALLREDUCE'
    key = 'torch/ops/collective/{}:{}/{}'.format(
        ctx[0].lower(), ctx[1], mode.lower())
    module = get_module(Collective, key, ctx, mode=mode)
    return module.forward(grads)
Esempio n. 13
0
def _fundamental(input, value, op='Add', out=None):
    if not isinstance(value, Tensor):
        if not isinstance(value, (int, float)):
            raise TypeError(
                'Type of value should be numerical, got {}.'.format(
                    type(value)))
        value = WrapScalar(value, input._dtype, input._ctx)
    ctx = MakeContext(inputs=[input, value])
    key = 'torch/ops/{}/{}:{}'.format(op.lower(), ctx[0].lower(), ctx[1])
    module = get_module(Fundamental, key, ctx, op_type=op)
    return module.forward(input, value, out)
Esempio n. 14
0
def roi_pool(feature, rois, pooled_h, pooled_w, spatial_scale):
    ctx = MakeContext(inputs=[feature])
    key = 'torch/ops/roi_pool/{}:{}/pool_h:{}/pool_w:{}/spatial_scale:{}'.format(
        ctx[0].lower(), ctx[1], pooled_h, pooled_w, spatial_scale)
    module = get_module(RoIPool,
                        key,
                        ctx,
                        pooled_h=pooled_h,
                        pooled_w=pooled_w,
                        spatial_scale=spatial_scale)
    return module.forward(feature, rois)
Esempio n. 15
0
def _crop(input, starts, ends):
    len_starts, len_ends = len(starts), len(ends)
    ctx = MakeContext(inputs=[input])
    key = 'torch/ops/crop/{}:{}/starts:#{}/ends:#{}'.format(
        ctx[0].lower(), ctx[1], len_starts, len_ends)
    module = get_module(Crop,
                        key,
                        ctx,
                        len_starts=len_starts,
                        len_ends=len_ends)
    return module.forward(input, starts, ends)
Esempio n. 16
0
def _minimum(input, other, out=None):
    if not isinstance(input, Tensor):
        input = WrapScalar(input, 'float32', other._ctx)
        dtype = other._dtype
    elif not isinstance(other, Tensor):
        other = WrapScalar(other, 'float32', input._ctx)
        dtype = input._dtype
    else:
        dtype = input._dtype
    ctx = MakeContext(inputs=[input])
    key = 'torch/ops/minimum/{}:{}'.format(ctx[0].lower(), ctx[1])
    module = get_module(Minimum, key, ctx)
    return module.forward(input, other, out, dtype)
Esempio n. 17
0
def _reduce(input, operation, dim=None, keepdim=False, out=None):
    ctx = MakeContext(inputs=[input])
    if dim is None:
        dim = -1
        keepdim = False
    elif dim < 0:
        dim = CanonicalAxis(input, dim)
    key = 'torch/ops/{}/{}:{}/dim[{}]/keep_dims:{}'.format(
        operation.lower(), ctx[0].lower(), ctx[1], dim, int(keepdim))
    module = get_module(Reduce,
                        key,
                        ctx,
                        operation=operation,
                        axis=dim,
                        keep_dims=keepdim)
    return module.forward(input, out)
Esempio n. 18
0
def roi_align(feature,
              rois,
              pooled_h,
              pooled_w,
              spatial_scale,
              sampling_ratio=2):
    ctx = MakeContext(inputs=[feature])
    key = 'torch/ops/roi_align/{}:{}/pool_h:{}/pool_w:{}/' \
          'spatial_scale:{}/sampling_ratio:{}'.format(
        ctx[0].lower(), ctx[1], pooled_h, pooled_w, spatial_scale, sampling_ratio)
    module = get_module(RoIAlign,
                        key,
                        ctx,
                        pooled_h=pooled_h,
                        pooled_w=pooled_w,
                        spatial_scale=spatial_scale,
                        sampling_ratio=sampling_ratio)
    return module.forward(feature, rois)
Esempio n. 19
0
def normal_(self, mean=0, std=1):
    """Fill self tensor with the specified normal distribution.

    Parameters
    ----------
    mean : numerical type
        The mean(mu) of normal distribution.
    std : numerical type
        The std(sigma) of normal distribution.

    Returns
    -------
    vm.torch.Tensor
        The self.

    """
    # TODO(PhyscalX): To support various dtypes, not only float32.
    arguments = {'mean': float(mean), 'std': float(std), 'dims': self.shape}
    inputs = []; outputs = [self]; ctx = MakeContext(inputs, outputs)
    meta = ('ONCE', 'RandomNormal', ctx)
    return RunOperator(inputs, outputs, meta, **arguments)
Esempio n. 20
0
def _resize_2d(input, op_type, dsize, fx, fy):
    if dsize is None:
        if fx < 0 or fy < 0:
            raise ValueError('Set fx and fy if dsize is None.')
    else:
        if len(dsize) != 2:
            raise ValueError('The dsize should be a list with 2 elements.')
    if dsize is None and (fy == -1.0 or fx == -1.0):
        raise RuntimeError('The dsize, fx/fy should be specified either.')
    ctx = MakeContext(inputs=[input])
    key = 'torch/ops/{}/{}:{}/dsize:{}/fx:{}/fy:{}'.format(
        op_type.lower(), ctx[0].lower(), ctx[1], '2' if dsize else 'none', fx,
        fy)
    module = get_module(Resize2d,
                        key,
                        ctx,
                        op_type=op_type,
                        dsize=dsize,
                        fx=fx,
                        fy=fy)
    return module.forward(input, dsize)
Esempio n. 21
0
def uniform_(self, low=0, high=1):
    """Fill self tensor with the specified uniform distribution.

    Parameters
    ----------
    low : numerical type
        The lower bound.
    high : numerical type
        The higher bound.

    Returns
    -------
    vm.torch.Tensor
        The self.

    """
    # TODO(PhyscalX): To support various dtypes, not only float32.
    arguments = {'low': float(low), 'high': float(high), 'dims': self.shape}
    inputs = []; outputs = [self]; ctx = MakeContext(inputs, outputs)
    meta = ('ONCE', 'RandomUniform', ctx)
    return RunOperator(inputs, outputs, meta, **arguments)
Esempio n. 22
0
def cat(seq, dim=0, out=None):
    """Concatenate the inputs along the given axis.

    Parameters
    ----------
    seq : tuple or list of vm.torch.Tensor
        The sequence.
    dim : int
        The dim to concatenate.
    out : vm.torch.Tensor or None
        The optional output tensor.

    Returns
    -------
    vm.torch.Tensor
        The output tensor.

    """
    ctx = MakeContext(inputs=seq, outputs=[out] if out else [])
    key = 'torch/ops/cat/{}:{}/dim:{}'.format(ctx[0].lower(), ctx[1], dim)
    module = get_module(Concat, key, ctx, axis=dim)
    return module.forward(seq, out)
Esempio n. 23
0
def _clamp(input, min=None, max=None, out=None):
    ctx = MakeContext(inputs=[input])
    key = 'torch/ops/clamp/{}:{}/min:{}/max:{}'.format(ctx[0].lower(), ctx[1],
                                                       min, max)
    module = get_module(Clamp, key, ctx, min=min, max=max)
    return module.forward(input, out)
Esempio n. 24
0
def _log(input, out=None):
    ctx = MakeContext(inputs=[input])
    key = 'torch/ops/log/{}:{}'.format(ctx[0].lower(), ctx[1])
    module = get_module(Log, key, ctx)
    return module.forward(input, out)
Esempio n. 25
0
def unsqueeze(input, dim, out=None):
    ctx = MakeContext(inputs=[input])
    key = 'torch/ops/unsqueeze/{}:{}/dim:{}'.format(ctx[0].lower(), ctx[1],
                                                    dim if dim else 'None')
    module = get_module(UnSqueeze, key, ctx, dim=dim)
    return module.forward(input, out=out)