Example #1
0
def conv1d(
    input,
    pointwise,
    spatial,
    bias=None,
    stride=1,
    padding=0,
    dilation=1,
    groups=1,
):
    if not isinstance(spatial, Tensor):
        spatial = spatial[0]
    stride = single(stride)
    padding = single(padding)
    dilation = single(dilation)
    _ = F.conv1d(input, pointwise, bias, 1, 0, 1, groups)
    _ = F.conv1d(_, spatial, None, stride, padding, dilation, _.shape[1])
    return _
Example #2
0
    def _to_tuple(self, module: nn.Module, val: Union[Tuple[int],
                                                      int]) -> Tuple[int]:
        if isinstance(val, tuple):
            return val

        module_name = module.__class__.__name__.lower()
        if "1d" in module_name:
            return single(val)
        elif "2d" in module_name:
            return double(val)
        elif "3d" in module_name:
            return triple(val)
        else:
            raise ValueError(
                f"Couldn't infer tuple size for class {module.__class__.__name__}. "
                "Please pass an explicit tuple.")
Example #3
0
    def _output_padding(self, input, output_size, stride, padding,
                        kernel_size):
        # type: (Tensor, Optional[List[int]], List[int], List[int], List[int]) -> List[int]
        if output_size is None:
            ret = single(
                self.output_padding)  # converting to list if was not already
        else:
            k = input.dim() - 2
            if len(output_size) == k + 2:
                output_size = output_size[2:]
            if len(output_size) != k:
                raise ValueError(
                    "output_size must have {} or {} elements (got {})".format(
                        k, k + 2, len(output_size)))

            min_sizes = torch.jit.annotate(List[int], [])
            max_sizes = torch.jit.annotate(List[int], [])
            for d in range(k):
                dim_size = (input.size(d + 2) -
                            1) * stride[d] - 2 * padding[d] + kernel_size[d]
                min_sizes.append(dim_size)
                max_sizes.append(min_sizes[d] + stride[d] - 1)

            for i in range(len(output_size)):
                size = output_size[i]
                min_size = min_sizes[i]
                max_size = max_sizes[i]
                if size < min_size or size > max_size:
                    raise ValueError((
                        "requested an output size of {}, but valid sizes range "
                        "from {} to {} (for an input of {})").format(
                            output_size, min_sizes, max_sizes,
                            input.size()[2:]))

            res = torch.jit.annotate(List[int], [])
            for d in range(k):
                res.append(output_size[d] - min_sizes[d])

            ret = res
        return ret