def gather(input, dim, index, out=None): """Gather the input values along the given axis. Note that it is a tensorflow style gather, which takes a vector index, values of other dimension will be copied automatically. Parameters ---------- input : vm.torch.Tensor The values. dim : int The dim to gather. index : vm.torch.Tensor The indices. out : vm.torch.Tensor or None The optional output tensor. Returns ------- vm.torch.Tensor The output tensor. """ ctx = MakeContext(inputs=[input, index], outputs=[out] if out else []) key = 'torch/ops/gather/{}:{}/dim:{}'.format(ctx[0].lower(), ctx[1], dim) module = get_module(Gather, key, ctx, axis=dim) return module.forward(input, index, out)
def _rfundamental(input, value, op='RAdd', out=None): if not isinstance(value, Tensor): value = WrapScalar(value, input._dtype, input._ctx) ctx = MakeContext(inputs=[input, value]) key = 'torch/ops/{}/{}:{}'.format(op.lower(), ctx[0].lower(), ctx[1]) module = get_module(Fundamental, key, ctx, op_type=op) return module.forward(value, input, out)
def _permute(input, perms=None): ctx = MakeContext(inputs=[input]) len_perms = len(perms) if perms else 0 key = 'torch/ops/permute/{}:{}/n_dims:#{}'.format(ctx[0].lower(), ctx[1], len_perms) module = get_module(Permute, key, ctx, len_perms=len_perms) return module.forward(input, perms)
def _repeat(input, times): ctx = MakeContext(inputs=[input]) len_times = len(times) key = 'torch/ops/repeat/{}:{}/n_times:#{}'.format(ctx[0].lower(), ctx[1], len_times) module = get_module(Repeat, key, ctx, len_times=len_times) return module.forward(input, times)
def ones(*sizes, **kwargs): """Return a float tensor with values of ``1``. Parameters ---------- sizes : tuple, list or int The sizes indicating the shape of the output tensor. out : vm.torch.Tensor The optional output tensor. Returns ------- vm.torch.FloatTensor The output tensor. """ arguments = {'value': 1.0, 'dims': sizes} out = kwargs['out'] if 'out' in kwargs else None if out is None: out = LeafTensor(sizes, requires_grad=kwargs['requires_grad'] \ if 'requires_grad' in kwargs else False) inputs = [] outputs = [out] ctx = MakeContext(inputs, outputs) meta = ('ONCE', 'Fill', ctx) return RunOperator(inputs, outputs, meta, **arguments)
def zeros_like(input, out=None, **kwargs): """Return a float tensor with values of ``0``, shape as the input. Parameters ---------- input : vm.torch.Tensor The tensor for indicating shape. out : vm.torch.Tensor The optional output tensor. Returns ------- vm.torch.FloatTensor The output tensor. """ if not hasattr(input, 'shape'): raise ValueError('Input does not have the shape attribute.') arguments = {'value': 0.0, 'dims': input.shape} if out is None: out = LeafTensor(input.shape, requires_grad=kwargs['requires_grad'] \ if 'requires_grad' in kwargs else False) inputs = [] outputs = [out] ctx = MakeContext(inputs, outputs) meta = ('ONCE', 'Fill', ctx) return RunOperator(inputs, outputs, meta, **arguments)
def randn(*sizes, **kwargs): """Return a float tensor with a normal distribution of N(0, 1). Parameters ---------- sizes : tuple, list or int The sizes indicating the shape of the output tensor. out : vm.torch.Tensor The optional output tensor. Returns ------- vm.torch.FloatTensor The output tensor. """ arguments = {'mean': 0.0, 'std': 1.0, 'dims': sizes} out = kwargs['out'] if 'out' in kwargs else None if out is None: out = LeafTensor(sizes, requires_grad=kwargs['requires_grad'] \ if 'requires_grad' in kwargs else False) inputs = [] outputs = [out] ctx = MakeContext(inputs, outputs) meta = ('ONCE', 'RandomNormal', ctx) return RunOperator(inputs, outputs, meta, **arguments)
def _fill(input, shape, value): ctx = MakeContext(inputs=[input]) len_shape = len(shape) key = 'torch/ops/fill/{}:{}/ndims:#{}/value:{}'.format( ctx[0].lower(), ctx[1], len_shape, value) module = get_module(Fill, key, ctx, len_shape=len_shape, value=value) return module.forward(input, shape)
def reshape(input, shape, shape_like=None): if shape_like is not None: shape = shape_like.shape ctx = MakeContext(inputs=[input]) len_shape = len(shape) key = 'torch/ops/reshape/{}:{}/n_dims:#{}'.format(ctx[0].lower(), ctx[1], len_shape) module = get_module(Reshape, key, ctx, len_shape=len_shape) return module.forward(input, shape)
def _update(param, grad, op_type, slot, lr_mult=1.0, decay_mult=1.0): ctx = MakeContext(inputs=[param]) key = 'torch/ops/{}/{}:{}/{}/{}'.format(op_type.lower(), ctx[0].lower(),ctx[1], slot, param.name) module = get_module(Update, key, ctx, op_type=op_type, lr_mult=lr_mult, decay_mult=decay_mult, slot=slot) return module.forward(param, grad)
def _type_to(input, dtype='float32', inplace=False): if dtype == input._dtype: return input ctx = MakeContext(inputs=[input]) key = 'torch/ops/astype/{}:{}/dtype:{}/inplace:{}'.format( ctx[0].lower(), ctx[1], dtype, 'true' if inplace else 'false') module = get_module(AsType, key, ctx, dtype=dtype, inplace=inplace) with no_grad(): return module.forward(input)
def _allreduce(grads): if not mpi.Is_Init(): return if not isinstance(grads, (list, tuple)): grads = [grads] ctx = MakeContext(inputs=grads) mode = mpi.GetParallelMode() + '_ALLREDUCE' key = 'torch/ops/collective/{}:{}/{}'.format( ctx[0].lower(), ctx[1], mode.lower()) module = get_module(Collective, key, ctx, mode=mode) return module.forward(grads)
def _fundamental(input, value, op='Add', out=None): if not isinstance(value, Tensor): if not isinstance(value, (int, float)): raise TypeError( 'Type of value should be numerical, got {}.'.format( type(value))) value = WrapScalar(value, input._dtype, input._ctx) ctx = MakeContext(inputs=[input, value]) key = 'torch/ops/{}/{}:{}'.format(op.lower(), ctx[0].lower(), ctx[1]) module = get_module(Fundamental, key, ctx, op_type=op) return module.forward(input, value, out)
def roi_pool(feature, rois, pooled_h, pooled_w, spatial_scale): ctx = MakeContext(inputs=[feature]) key = 'torch/ops/roi_pool/{}:{}/pool_h:{}/pool_w:{}/spatial_scale:{}'.format( ctx[0].lower(), ctx[1], pooled_h, pooled_w, spatial_scale) module = get_module(RoIPool, key, ctx, pooled_h=pooled_h, pooled_w=pooled_w, spatial_scale=spatial_scale) return module.forward(feature, rois)
def _crop(input, starts, ends): len_starts, len_ends = len(starts), len(ends) ctx = MakeContext(inputs=[input]) key = 'torch/ops/crop/{}:{}/starts:#{}/ends:#{}'.format( ctx[0].lower(), ctx[1], len_starts, len_ends) module = get_module(Crop, key, ctx, len_starts=len_starts, len_ends=len_ends) return module.forward(input, starts, ends)
def _minimum(input, other, out=None): if not isinstance(input, Tensor): input = WrapScalar(input, 'float32', other._ctx) dtype = other._dtype elif not isinstance(other, Tensor): other = WrapScalar(other, 'float32', input._ctx) dtype = input._dtype else: dtype = input._dtype ctx = MakeContext(inputs=[input]) key = 'torch/ops/minimum/{}:{}'.format(ctx[0].lower(), ctx[1]) module = get_module(Minimum, key, ctx) return module.forward(input, other, out, dtype)
def _reduce(input, operation, dim=None, keepdim=False, out=None): ctx = MakeContext(inputs=[input]) if dim is None: dim = -1 keepdim = False elif dim < 0: dim = CanonicalAxis(input, dim) key = 'torch/ops/{}/{}:{}/dim[{}]/keep_dims:{}'.format( operation.lower(), ctx[0].lower(), ctx[1], dim, int(keepdim)) module = get_module(Reduce, key, ctx, operation=operation, axis=dim, keep_dims=keepdim) return module.forward(input, out)
def roi_align(feature, rois, pooled_h, pooled_w, spatial_scale, sampling_ratio=2): ctx = MakeContext(inputs=[feature]) key = 'torch/ops/roi_align/{}:{}/pool_h:{}/pool_w:{}/' \ 'spatial_scale:{}/sampling_ratio:{}'.format( ctx[0].lower(), ctx[1], pooled_h, pooled_w, spatial_scale, sampling_ratio) module = get_module(RoIAlign, key, ctx, pooled_h=pooled_h, pooled_w=pooled_w, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio) return module.forward(feature, rois)
def normal_(self, mean=0, std=1): """Fill self tensor with the specified normal distribution. Parameters ---------- mean : numerical type The mean(mu) of normal distribution. std : numerical type The std(sigma) of normal distribution. Returns ------- vm.torch.Tensor The self. """ # TODO(PhyscalX): To support various dtypes, not only float32. arguments = {'mean': float(mean), 'std': float(std), 'dims': self.shape} inputs = []; outputs = [self]; ctx = MakeContext(inputs, outputs) meta = ('ONCE', 'RandomNormal', ctx) return RunOperator(inputs, outputs, meta, **arguments)
def _resize_2d(input, op_type, dsize, fx, fy): if dsize is None: if fx < 0 or fy < 0: raise ValueError('Set fx and fy if dsize is None.') else: if len(dsize) != 2: raise ValueError('The dsize should be a list with 2 elements.') if dsize is None and (fy == -1.0 or fx == -1.0): raise RuntimeError('The dsize, fx/fy should be specified either.') ctx = MakeContext(inputs=[input]) key = 'torch/ops/{}/{}:{}/dsize:{}/fx:{}/fy:{}'.format( op_type.lower(), ctx[0].lower(), ctx[1], '2' if dsize else 'none', fx, fy) module = get_module(Resize2d, key, ctx, op_type=op_type, dsize=dsize, fx=fx, fy=fy) return module.forward(input, dsize)
def uniform_(self, low=0, high=1): """Fill self tensor with the specified uniform distribution. Parameters ---------- low : numerical type The lower bound. high : numerical type The higher bound. Returns ------- vm.torch.Tensor The self. """ # TODO(PhyscalX): To support various dtypes, not only float32. arguments = {'low': float(low), 'high': float(high), 'dims': self.shape} inputs = []; outputs = [self]; ctx = MakeContext(inputs, outputs) meta = ('ONCE', 'RandomUniform', ctx) return RunOperator(inputs, outputs, meta, **arguments)
def cat(seq, dim=0, out=None): """Concatenate the inputs along the given axis. Parameters ---------- seq : tuple or list of vm.torch.Tensor The sequence. dim : int The dim to concatenate. out : vm.torch.Tensor or None The optional output tensor. Returns ------- vm.torch.Tensor The output tensor. """ ctx = MakeContext(inputs=seq, outputs=[out] if out else []) key = 'torch/ops/cat/{}:{}/dim:{}'.format(ctx[0].lower(), ctx[1], dim) module = get_module(Concat, key, ctx, axis=dim) return module.forward(seq, out)
def _clamp(input, min=None, max=None, out=None): ctx = MakeContext(inputs=[input]) key = 'torch/ops/clamp/{}:{}/min:{}/max:{}'.format(ctx[0].lower(), ctx[1], min, max) module = get_module(Clamp, key, ctx, min=min, max=max) return module.forward(input, out)
def _log(input, out=None): ctx = MakeContext(inputs=[input]) key = 'torch/ops/log/{}:{}'.format(ctx[0].lower(), ctx[1]) module = get_module(Log, key, ctx) return module.forward(input, out)
def unsqueeze(input, dim, out=None): ctx = MakeContext(inputs=[input]) key = 'torch/ops/unsqueeze/{}:{}/dim:{}'.format(ctx[0].lower(), ctx[1], dim if dim else 'None') module = get_module(UnSqueeze, key, ctx, dim=dim) return module.forward(input, out=out)