Beispiel #1
0
    def __init__(self, in_channels, out_channels, kernel_size, padding=0):
        super(ConvTBC, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = _single(kernel_size)
        self.padding = _single(padding)

        self.weight = torch.nn.Parameter(torch.Tensor(
            self.kernel_size[0], in_channels, out_channels))
        self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True,
                 cuda=False, init_weight=None, init_bias=None, clip_var=None):
        kernel_size = utils._single(kernel_size)
        stride = utils._single(stride)
        padding = utils._single(padding)
        dilation = utils._single(dilation)

        super(Conv1dGroupNJ, self).__init__(
            in_channels, out_channels, kernel_size, stride, padding, dilation,
            False, utils._pair(0), groups, bias, init_weight, init_bias, cuda, clip_var)
Beispiel #3
0
def max_pool1d(g, input, kernel_size, stride, padding, dilation, ceil_mode):
    if ceil_mode:
        return _unimplemented("max_pool1d", "ceil_mode")
    if set(_single(dilation)) != {1}:
        return _unimplemented("max_pool1d", "dilation")
    if stride is None:
        stride = kernel_size
    r = g.op("MaxPool", input,
             kernel_shape_i=_single(kernel_size),
             pads_i=_single(padding) * 2,
             strides_i=_single(stride))
    return r, None
Beispiel #4
0
 def symbolic(g, input, kernel_size, stride=None, padding=0, dilation=1,
              ceil_mode=False):
     from torch.onnx.symbolic import _unimplemented
     if ceil_mode:
         return _unimplemented("MaxPool1d", "ceil_mode")
     if set(_single(dilation)) != {1}:
         return _unimplemented("MaxPool1d", "dilation")
     if stride is None:
         stride = kernel_size
     r = g.op("MaxPool", input,
              kernel_shape_i=_single(kernel_size),
              pads_i=_single(padding),
              strides_i=_single(stride))
     return r, None
Beispiel #5
0
 def symbolic(g,
              input,
              kernel_size,
              stride=None,
              padding=0,
              dilation=1,
              ceil_mode=False):
     if ceil_mode:
         raise RuntimeError("ceil_mode not supported in MaxPool1d")
     stride = stride or kernel_size
     n = g.appendNode(
         g.create("MaxPool",
                  [input]).is_("kernel_shape", _single(kernel_size)).is_(
                      "pads", _single(padding)).is_("dilations",
                                                    _single(dilation)).is_(
                                                        "strides",
                                                        _single(stride)))
     return (n, None)
Beispiel #6
0
def soft_pool1d(x, kernel_size=2, stride=None, force_inplace=False):
    if x.is_cuda and not force_inplace:
        return CUDA_SOFTPOOL1d.apply(x, kernel_size, stride)
    kernel_size = _single(kernel_size)
    if stride is None:
        stride = kernel_size
    else:
        stride = _single(stride)
    # Get input sizes
    _, c, d = x.size()
    # Create per-element exponential value sum : Tensor [b x 1 x d]
    e_x = torch.sum(torch.exp(x), dim=1, keepdim=True)
    # Apply mask to input and pool and calculate the exponential sum
    # Tensor: [b x c x d] -> [b x c x d']
    return F.avg_pool1d(x.mul(e_x), kernel_size,
                        stride=stride).mul_(sum(kernel_size)).div_(
                            F.avg_pool1d(e_x, kernel_size,
                                         stride=stride).mul_(sum(kernel_size)))
Beispiel #7
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              padding=0,
              dilation=1,
              groups=1,
              bias=True,
              device=''):
     kernel_size = _single(kernel_size)
     stride = _single(stride)
     padding = _single(padding)
     dilation = _single(dilation)
     super(Conv1d_samepadding,
           self).__init__(in_channels, out_channels,
                          kernel_size, stride, padding, dilation, False,
                          _single(0), groups, bias, device)
Beispiel #8
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              padding=0,
              dilation=1,
              upsample=1,
              groups=1,
              bias=True,
              activation='linear',
              alpha=0.,
              residual=None):
     super(Conv1d, self).__init__(4, in_channels, out_channels,
                                  _single(kernel_size), _single(stride),
                                  _single(padding), _single(dilation),
                                  _single(upsample), groups, bias,
                                  activation, alpha, residual)
def conv1d_input(input_size,
                 weight,
                 grad_output,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1):
    r"""
    Computes the gradient of conv1d with respect to the input of the convolution.
    This is same as the 1D transposed convolution operator under the hood but requires
    the shape of the gradient w.r.t. input to be specified explicitly.

    Args:
        input_size : Shape of the input gradient tensor
        weight: weight tensor (out_channels x in_channels/groups x kW)
        grad_output : output gradient tensor (minibatch x out_channels x oW)
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
        dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1

    Examples::

        >>> input = torch.randn(1,1,3, requires_grad=True)
        >>> weight = torch.randn(1,1,1, requires_grad=True)
        >>> output = F.conv1d(input, weight)
        >>> grad_output = torch.randn(output.shape)
        >>> grad_input = torch.autograd.grad(output, input, grad_output)
        >>> F.grad.conv1d_input(input.shape, weight, grad_output)

    """
    stride = _single(stride)
    padding = _single(padding)
    dilation = _single(dilation)
    kernel_size = [weight.shape[2]]

    if input_size is None:
        raise ValueError("grad.conv1d_input requires specifying an input_size")

    grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
                                             padding, kernel_size, dilation)

    return torch.conv_transpose1d(grad_output, weight, None, stride, padding,
                                  grad_input_padding, groups, dilation)
Beispiel #10
0
 def _conv_forward(self, spike: Tensor, weight: Tensor,
                   bias: Optional[Tensor]):
     if self.padding_mode != 'zeros':
         return spike_conv1d(
             F.pad(spike,
                   self._reversed_padding_repeated_twice,
                   mode=self.padding_mode), weight, bias, self.stride,
             _single(0), self.dilation, self.groups)
     return spike_conv1d(spike, weight, bias, self.stride, self.padding,
                         self.dilation, self.groups)
Beispiel #11
0
 def __init__(self,
              in_channels: int,
              out_channels: int,
              kernel_size: _size_1_t,
              stride: _size_1_t = 1,
              padding: _size_1_t = 0,
              dilation: _size_1_t = 1,
              groups: int = 1,
              bias: bool = True,
              padding_mode: str = 'zeros'):
     nnq.Conv1d.__init__(self, in_channels, out_channels, kernel_size,
                         stride, padding, dilation, groups, bias,
                         padding_mode)
     # self.stride, self.padding, self.dilation are 2d tuple since
     # current quantized conv1d is using Conv2dPackedParams
     # TODO: we should fix this if we implemenet Conv1dPackedParams
     self._conv1d_stride = _single(self.stride[0])
     self._conv1d_padding = _single(self.padding[0])
     self._conv1d_dilation = _single(self.dilation[0])
Beispiel #12
0
 def __init__(
     self,
     in_channels: int,
     out_channels: int,
     kernel_size: _size_1_t,
     stride: _size_1_t = 1,
     padding: _size_1_t = 0,
     dilation: _size_1_t = 1,
     groups: int = 1,
     bias: bool = True,
     padding_mode: str = 'zeros'  # TODO: refine this type
 ):
     kernel_size = _single(kernel_size)
     stride = _single(stride)
     padding = _single(padding)
     dilation = _single(dilation)
     super(Conv1d, self).__init__(
         in_channels, out_channels, kernel_size, stride, padding, dilation,
         False, _single(0), groups, bias, padding_mode)
Beispiel #13
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 dropout=0,
                 padding=0,
                 weight_norm=True):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.dropout = dropout
        self.kernel_size = _single(kernel_size)
        self.padding = _single(padding)

        self.weight = torch.nn.Parameter(
            torch.Tensor(self.kernel_size[0], in_channels, out_channels))
        self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
        self.weight_norm = weight_norm
        self.reset_parameters()
Beispiel #14
0
def soft_pool1d(x, kernel_size=2, stride=None, force_inplace=False):
    if x.is_cuda and not force_inplace:
        x = CUDA_SOFTPOOL1d.apply(x, kernel_size, stride)
        # Replace `NaN's if found
        if torch.isnan(x).any():
            return torch.nan_to_num(x)
        return x
    kernel_size = _single(kernel_size)
    if stride is None:
        stride = kernel_size
    else:
        stride = _single(stride)
    # Get input sizes
    _, c, d = x.size()
    # Create per-element exponential value sum : Tensor [b x c x d]
    e_x = torch.exp(x)
    # Apply mask to input and pool and calculate the exponential sum
    # Tensor: [b x c x d] -> [b x c x d']
    return F.avg_pool1d(x.mul(e_x), kernel_size, stride=stride).div_(F.avg_pool1d(e_x, kernel_size, stride=stride))
Beispiel #15
0
def _hier_dequantizer_pact(module):
    if module.__class__.__name__ == 'PACT_Conv2d':
        W = module.weight.data
        try:
            b = module.bias.data
        except AttributeError:
            b = None
        module = torch.nn.Conv2d(
            module.in_channels,
            module.out_channels,
            _single(module.kernel_size),
            stride=_single(module.stride),
            padding=_single(module.padding),
            dilation=_single(module.dilation),
            groups=module.groups,
            bias=True if module.bias is not None else False
        )
        module.weight.data = W.clone()
        if b is not None:
            module.bias.data = b.clone()
        return module
    if module.__class__.__name__ == 'PACT_Linear':
        W = module.weight.data
        try:
            b = module.bias.data
        except AttributeError:
            b = None
        module = torch.nn.Linear(
            module.in_features,
            module.out_features,
            bias=True if module.bias is not None else False
        )
        module.weight.data = W.clone()
        if b is not None:
            module.bias.data = b.clone()
        return module
    elif module.__class__.__name__ == 'PACT_Act':
        module = torch.nn.ReLU()
        return module
    else:
        for n,m in module.named_children():
            module._modules[n] = _hier_dequantizer_pact(m)
        return module
Beispiel #16
0
 def symbolic(g,
              input,
              kernel_size,
              stride=None,
              padding=0,
              dilation=1,
              ceil_mode=False):
     from torch.onnx.symbolic import _unimplemented
     if ceil_mode:
         return _unimplemented("MaxPool1d", "ceil_mode")
     if stride is None:
         stride = kernel_size
     r = g.op("MaxPool",
              input,
              kernel_shape_i=_single(kernel_size),
              pads_i=_single(padding),
              dilations_i=_single(dilation),
              strides_i=_single(stride))
     return r, None
Beispiel #17
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              padding=0,
              output_padding=0,
              groups=1,
              bias=True,
              dilation=1):
     kernel_size = _single(kernel_size)
     stride = _single(stride)
     padding = _single(padding)
     dilation = _single(dilation)
     output_padding = _single(output_padding)
     super(ConvTranspose1d,
           self).__init__(in_channels, out_channels, kernel_size, stride,
                          padding, dilation, True, output_padding, groups,
                          bias)
Beispiel #18
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size_2d,
                 unit_size,
                 stride=1,
                 dilation=1,
                 groups=1,
                 bias=True):
        '''
        in_channels:  number of channels of the input feature,
        out_channels: the number of channels of the output feature,
        kernel_size_2d: kernel size of 2D convolution
        unit_size: the width of 2D feature map
        '''
        super(TALayer, self).__init__()
        assert in_channels % groups == 0, \
            'in_channels {} cannot be divisible by groups {}'.format(
                in_channels, groups)
        assert out_channels % groups == 0, \
            'out_channels {} cannot be divisible by groups {}'.format(
                out_channels, groups)
        assert unit_size >= kernel_size_2d[1]
        self.unit_size = unit_size

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size_2d = kernel_size_2d
        equiv_kernel = kernel_size_2d[0] * kernel_size_2d[1]
        # the kernel size of 2D deformable convolution
        self.kernel_size = [1, equiv_kernel]
        self.stride = _pair(stride)
        self.padding = [0, (equiv_kernel - 1) // 2]
        self.dilation = _pair(dilation)

        self.with_bias = bias

        self.groups = groups
        # enable compatibility with nn.Conv2d
        self.transposed = False
        self.output_padding = _single(0)

        self.weight = nn.Parameter(
            torch.Tensor(out_channels, in_channels // self.groups,
                         *self.kernel_size))
        if bias:
            self.bias = nn.Parameter(torch.rand([out_channels]))
            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
            bound = 1 / float(np.sqrt(fan_in))
            nn.init.uniform_(self.bias, -bound, bound)

        self.reset_parameters()

        self.base_offset = self.get_base_offset(kernel_size_2d,
                                                unit_size).cuda()
Beispiel #19
0
 def __init__(
         self,
         # Conv1d args
         in_channels,
         out_channels,
         kernel_size,
         stride=1,
         padding=0,
         dilation=1,
         groups=1,
         bias=None,
         padding_mode='zeros',
         # BatchNorm1d args
         # num_features: out_channels
         eps=1e-05,
         momentum=0.1,
         # affine: True
         # track_running_stats: True
         # Args for this module
         freeze_bn=False,
         qconfig=None):
     kernel_size = _single(kernel_size)
     stride = _single(stride)
     padding = _single(padding)
     dilation = _single(dilation)
     _ConvBnNd.__init__(self,
                        in_channels,
                        out_channels,
                        kernel_size,
                        stride,
                        padding,
                        dilation,
                        False,
                        _single(0),
                        groups,
                        bias,
                        padding_mode,
                        eps,
                        momentum,
                        freeze_bn,
                        qconfig,
                        dim=1)
Beispiel #20
0
def pwm_conv1d_input(input,
                     weight,
                     bg_tensor,
                     grad_output,
                     stride=1,
                     padding=0,
                     dilation=1,
                     groups=1):
    """
    Gradient calculation for the filter weights of the PWM-based convolution.

    Parameters
    ----------
    input : tensor
        input tensor
    weight : tensor
        weight tensor
    bg_tensor : tensor
        background probabilities tensor
    grad_output : tensor
        Gradient output from backwards pass

    Returns
    -------
    Calculated gradients

    """
    stride = _single(stride)
    padding = _single(padding)
    dilation = _single(dilation)
    kernel_size = [weight.shape[2]]
    input_size = input.shape

    if input_size is None:
        raise ValueError("grad.conv1d_input requires specifying an input_size")

    grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
                                             padding, kernel_size)

    pwm = weight.div(bg_tensor).log2()
    return torch.conv_transpose1d(grad_output, pwm, None, stride, padding,
                                  grad_input_padding, groups, dilation)
Beispiel #21
0
 def __init__(self,
              process,
              thresholds,
              quant_levels,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              padding=0,
              dilation=1,
              groups=1,
              bias=True):
     kernel_size = _single(kernel_size)
     stride = _single(stride)
     padding = _single(padding)
     dilation = _single(dilation)
     super(StochasticConv1d,
           self).__init__(process, thresholds, quant_levels, in_channels,
                          out_channels, kernel_size, stride, padding,
                          dilation, False, _single(0), groups, bias)
Beispiel #22
0
    def __init__(self, in_channels, out_channels, kernel_size, padding=0):
        super(ConvGLUTest, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = _single(kernel_size)
        self.padding = _single(padding)
        #self.conv = nn.Conv_TBC()

        self.weight = torch.nn.Parameter(
            torch.Tensor(self.kernel_size[0], in_channels, out_channels))
        self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
        cnt = 0
        for i in range(self.kernel_size[0]):
            for j in range(in_channels):
                for k in range(out_channels):
                    self.weight[i][j][k] = manual_init[cnt]
                    cnt = cnt + 1
        for i in range(out_channels):
            self.bias[i] = manual_init[cnt]
            cnt = cnt + 1
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              sigma_prior,
              stride=1,
              padding=0,
              dilation=1,
              groups=1,
              bias=True):
     kernel_size = _single(kernel_size)
     stride = _single(stride)
     padding = _single(padding)
     dilation = _single(dilation)
     super(bb_Conv1d, self).__init__(in_channels, out_channels, kernel_size,
                                     stride, padding, dilation, False,
                                     _single(0), groups, bias)
     self.lpw = 0
     self.lqw = 0
     self.sigma_prior = sigma_prior
Beispiel #24
0
def avg_pool1d(g, input, kernel_size, stride, padding, ceil_mode,
               count_include_pad):
    if ceil_mode:
        #return _unimplemented("avg_pool1d", "ceil_mode")
        ceil_number = 1
    else:
        ceil_number = 0
    if not stride:
        stride = kernel_size
    if not count_include_pad:
        return _unimplemented("avg_pool1d", "count_include_pad")
    # 由于不需要与其他框架保持格式一致,可以随意增添onnx的nodeparameter的参数
    """
       g.op中的第一个参数是node的名字,然后是输入,之后的参数和顺序都随意,与onnx2caffe工具中的逻辑保持一致就行
    """
    return g.op("AveragePool1d",
                input,
                kernel_shape_i=_single(kernel_size),
                pads_i=_single(padding),
                strides_i=_single(stride))
Beispiel #25
0
 def forward(self, input: torch.Tensor) -> torch.Tensor:
     flipped_weight = torch.flip(self.weight, dims=(2, ))
     sym_weight = (self.weight + flipped_weight) / 2.
     if self.padding_mode != 'zeros':
         return F.conv1d(
             F.pad(input,
                   self._reversed_padding_repeated_twice,
                   mode=self.padding_mode), sym_weight, self.bias,
             self.stride, _single(0), self.dilation, self.groups)
     return F.conv1d(input, sym_weight, self.bias, self.stride,
                     self.padding, self.dilation, self.groups)
Beispiel #26
0
    def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1, boundary_cond='periodic'):

        super(DynamicConv1d, self).__init__()

        self.kernel_size = _single(kernel_size)
        self.stride = _single(stride) # not implemented
        self.padding = _single(padding)
        self.dilation = _single(dilation) # not implemented

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.boundary_cond = boundary_cond

        if self.padding[0] > 0 and boundary_cond == 'periodic':
            assert self.padding[0] == int((self.kernel_size[0]-1)/2)
            self.pad = PeriodicPad1d(self.padding[0], dim=-2)
        else:
            self.pad = None
Beispiel #27
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              padding=0,
              dilation=1,
              groups=1,
              bias=True,
              padding_mode='zeros',
              enable_BF16=False):
     kernel_size = _single(kernel_size)
     stride = _single(stride)
     padding = _single(padding)
     dilation = _single(dilation)
     self.enable_BF16 = enable_BF16
     super(Conv1dOpti,
           self).__init__(in_channels, out_channels,
                          kernel_size, stride, padding, dilation, False,
                          _single(0), groups, bias, padding_mode)
Beispiel #28
0
    def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1, bias=True,
                 padding_mode='zeros'):
        kernel_size = _pair_from_first(kernel_size)
        stride = _pair_from_first(stride)
        padding = _pair_from_first(padding)
        dilation = _pair_from_first(dilation)

        super(Conv1d, self).__init__(
            in_channels, out_channels, kernel_size, stride, padding, dilation,
            False, _single(0), groups, bias, padding_mode)
Beispiel #29
0
    def change_context_window(self, context_window: int):
        """
        Update the context window of the SqueezeExcitation module, in-place if possible.

        Will update the pooling layer to either nn.AdaptiveAvgPool1d() (for global SE) or nn.AvgPool1d()
        (for limited context SE).

        If only the context window is changing but still a limited SE context block - then
        the earlier instance of nn.AvgPool1d() will be updated.

        Args:
            context_window: An integer representing the number of input timeframes that will be used
                to compute the context. Each timeframe corresponds to a single window stride of the
                STFT features.

                Say the window_stride = 0.01s, then a context window of 128 represents 128 * 0.01 s
                of context to compute the Squeeze step.
        """
        if hasattr(self, 'context_window'):
            logging.info(f"Changing Squeeze-Excitation context window from {self.context_window} to {context_window}")

        self.context_window = int(context_window)

        if self.context_window <= 0:
            if PYTORCH_QUANTIZATION_AVAILABLE and self._quantize:
                if not isinstance(self.pool, quant_nn.QuantAdaptiveAvgPool1d(1)):
                    self.pool = quant_nn.QuantAdaptiveAvgPool1d(1)  # context window = T

            elif not PYTORCH_QUANTIZATION_AVAILABLE and self._quantize:
                raise ImportError(
                    "pytorch-quantization is not installed. Install from "
                    "https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
                )

            else:
                if not isinstance(self.pool, nn.AdaptiveAvgPool1d):
                    self.pool = nn.AdaptiveAvgPool1d(1)  # context window = T
        else:
            if PYTORCH_QUANTIZATION_AVAILABLE and self._quantize:
                if not isinstance(self.pool, quant_nn.QuantAvgPool1d):
                    self.pool = quant_nn.QuantAvgPool1d(self.context_window, stride=1)

            elif not PYTORCH_QUANTIZATION_AVAILABLE and self._quantize:
                raise ImportError(
                    "pytorch-quantization is not installed. Install from "
                    "https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
                )

            else:
                if not isinstance(self.pool, nn.AvgPool1d):
                    self.pool = nn.AvgPool1d(self.context_window, stride=1)
                else:
                    # update the context window
                    self.pool.kernel_size = _single(self.context_window)
Beispiel #30
0
 def forward(ctx, input, kernel=2, stride=None):
     no_batch = False
     if len(input.size()) == 2:
         no_batch = True
         input.unsqueeze_(0)
     B, C, D = input.size()
     kernel = _single(kernel)
     if stride is None:
         stride = kernel
     else:
         stride = _single(stride)
     oD = D // stride[0]
     output = input.new_zeros((B, C, oD))
     softpool_cuda.forward_1d(input, kernel, stride, output)
     ctx.save_for_backward(input)
     ctx.kernel = kernel
     ctx.stride = stride
     if no_batch:
         return output.squeeze_(0)
     return output
Beispiel #31
0
 def forward(ctx, input, kernel=2, stride=None):
     # Create contiguous tensor (if tensor is not contiguous)
     no_batch = False
     if len(input.size()) == 2:
         no_batch = True
         input.unsqueeze_(0)
     B, C, D = input.size()
     kernel = _single(kernel)
     if stride is None:
         stride = kernel
     else:
         stride = _single(stride)
     oD = (D - kernel[0]) // stride[0] + 1
     output = input.new_zeros((B, C, oD))
     softpool_cuda.forward_1d(input.contiguous(), kernel, stride, output)
     ctx.save_for_backward(input)
     ctx.kernel = kernel
     ctx.stride = stride
     if no_batch:
         return output.squeeze_(0)
     return output
Beispiel #32
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 padding='SAME',
                 dilation=1,
                 bias=True):
        implied_kernel_size = kernel_size // 2 + 1
        padding = _single(self.same_padding(
            kernel_size, dilation)) if padding == 'SAME' else _single(
                int(padding))
        kernel_size = _single(kernel_size)
        dilation = _single(dilation)

        self.mask = t.ones(out_channels, in_channels, *kernel_size).byte()
        self.mask[:, :, :implied_kernel_size] = t.zeros(
            out_channels, in_channels, implied_kernel_size)

        super(MaskedConv1d,
              self).__init__(in_channels, out_channels, kernel_size, 1,
                             padding, dilation, False, _single(0), 1, bias)
Beispiel #33
0
    def __init__(
            self,
            in_channels: int,
            out_channels: int,
            kernel_size: Union[int, Tuple],
            stride: Union[int, Tuple] = 1,
            padding: Union[int, Tuple] = 0,
            dilation: Union[int, Tuple] = 1,
            groups: int = 1,
            bias: bool = True,
            padding_mode: str = 'zeros',
            rpu_config: Optional[RPUConfigAlias] = None,
            realistic_read_write: bool = False,
            weight_scaling_omega: Optional[float] = None,
    ):
        # pylint: disable=too-many-arguments
        kernel_size = _single(kernel_size)
        stride = _single(stride)
        padding = _single(padding)
        dilation = _single(dilation)

        if dilation != _single(1):
            raise ValueError('Only dilation = 1 is supported')

        super().__init__(
            in_channels, out_channels, kernel_size, stride, padding, dilation,  # type: ignore
            False, _single(0), groups, bias, padding_mode,
            rpu_config, realistic_read_write, weight_scaling_omega
        )

        self.tensor_view = (-1, 1)
Beispiel #34
0
    def _output_padding(self,
                        input: Tensor,
                        output_size: Optional[List[int]],
                        stride: List[int],
                        padding: List[int],
                        kernel_size: List[int],
                        dilation: Optional[List[int]] = None) -> List[int]:
        # Input format: [N, B, Cin, ...]
        if output_size is None:
            ret = _single(
                self.output_padding)  # converting to list if was not already
        else:
            # Given the input format as [N, B, Cin, ...], we need to exclude the first 3 items
            # This is modified from the original code.
            k = input.dim() - 3
            if len(output_size) == k + 3:
                output_size = output_size[3:]
            if len(output_size) != k:
                # We are checking to ensure the output_size shows either the dimensions of conv operator (1d, 2d, etc.)
                # or the whole shape of the expected output (with B), e.g. 2d + 3 = 5
                raise ValueError(
                    "output_size must have {} or {} elements (got {})".format(
                        k, k + 3, len(output_size)))

            min_sizes = torch.jit.annotate(List[int], [])
            max_sizes = torch.jit.annotate(List[int], [])
            for d in range(k):
                # This is modified for HFTA.
                dim_size = ((input.size(d + 3) - 1) * stride[d] -
                            2 * padding[d] +
                            (dilation[d] if dilation is not None else 1) *
                            (kernel_size[d] - 1) + 1)
                min_sizes.append(dim_size)
                max_sizes.append(min_sizes[d] + stride[d] - 1)

            for i in range(len(output_size)):
                size = output_size[i]
                min_size = min_sizes[i]
                max_size = max_sizes[i]
                if size < min_size or size > max_size:
                    raise ValueError((
                        "requested an output size of {}, but valid sizes range "
                        "from {} to {} (for an input of {})").format(
                            output_size, min_sizes, max_sizes,
                            input.size()[3:]))

            res = torch.jit.annotate(List[int], [])
            for d in range(k):
                res.append(output_size[d] - min_sizes[d])

            ret = res
        return ret
Beispiel #35
0
    def forward(ctx, input, output_size):
        if input.dim() != 3:
            raise ValueError('expected 3D input (got {}D input)'
                             .format(input.dim()))

        ctx.output_size = _single(output_size)
        input2d = input.unsqueeze(2)    # size = N*C*1*L
        backend = type2backend[type(input)]
        output = input2d.new()
        ctx.save_for_backward(input)
        backend.SpatialAdaptiveAveragePooling_updateOutput(
            backend.library_state,
            input2d, output,
            ctx.output_size[0], 1)
        output = output.squeeze(2)
        return output
Beispiel #36
0
    def forward(ctx, input, output_size):
        if input.dim() != 3:
            raise ValueError('expected 3D input (got {}D input)'
                             .format(input.dim()))

        ctx.output_size = _single(output_size)
        input2d = input.unsqueeze(2)    # size = N*C*1*L
        backend = type2backend[type(input)]
        indices, output = input2d.new().long(), input2d.new()
        backend.SpatialAdaptiveMaxPooling_updateOutput(backend.library_state,
                                                       input2d, output, indices,
                                                       ctx.output_size[0], 1)
        indices = indices.squeeze(2)
        output = output.squeeze(2)
        ctx.save_for_backward(input, indices)
        ctx.mark_non_differentiable(indices)
        return output, indices
Beispiel #37
0
 def __init__(self, dim, in_channels, out_channels, kernel_size, stride, padding, dilation, upsample, groups, bias, activation, alpha, residual):
     nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, False, _single(0), groups, bias)
     Quantizable.__init__(self, self.weight)
     self.activation = activation
     self.alpha = alpha
     self.upsample = upsample
     self.dim = dim
     self.weight.data = self.weight.data.permute(*to_chwn_idx(self.dim))
     self.residual = residual
Beispiel #38
0
 def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, upsample=1, groups=1, bias=True, activation = 'linear', alpha = 0., residual = None):
     super(Conv1d, self).__init__(4, in_channels, out_channels, _single(kernel_size), _single(stride), _single(padding), _single(dilation), _single(upsample), groups, bias, activation, alpha, residual)
Beispiel #39
0
 def __init__(self, kernel_size, stride=1, padding=0):
     super(AvgPool1d, self).__init__(_single(kernel_size), 'avg', _single(stride), _single(padding))