Пример #1
0
    def __init__(self, n_dim: int, in_channels: int, out_channels: int,
                 pooling: bool = True, norm_layer: str = "Batch"):
        """

        Parameters
        ----------
        n_dim : int
            dimensionality of the input
        in_channels : int
            number of input channels
        out_channels : int
            number of output cannels
        pooling : bool
            whether to apply pooling or not
        norm_layer : str
            the kind of normalization layer to use
        """
        super().__init__()

        self.n_dim = n_dim
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.pooling = pooling

        self.conv1 = _conv3x3(self.n_dim, self.in_channels,
                              self.out_channels)
        self.norm1 = NormNd(norm_layer, n_dim, self.out_channels)
        self.conv2 = _conv3x3(self.n_dim, self.out_channels,
                              self.out_channels)
        self.norm2 = NormNd(norm_layer, n_dim, self.out_channels)

        if self.pooling:
            self.pool = PoolingNd("Max", n_dim, 2)
Пример #2
0
 def __init__(self,
              num_input_features: int,
              growth_rate: int,
              bn_size: int,
              drop_rate: float,
              n_dim: int = 2,
              norm_type: str = "Batch"):
     super().__init__()
     self.add_module('norm1', NormNd(norm_type, n_dim, num_input_features)),
     self.add_module('relu1', torch.nn.ReLU(inplace=True)),
     self.add_module(
         'conv1',
         ConvNd(n_dim,
                num_input_features,
                bn_size * growth_rate,
                kernel_size=1,
                stride=1,
                bias=False)),
     self.add_module('norm2', NormNd(norm_type, n_dim,
                                     bn_size * growth_rate)),
     self.add_module('relu2', torch.nn.ReLU(inplace=True)),
     self.add_module(
         'conv2',
         ConvNd(n_dim,
                bn_size * growth_rate,
                growth_rate,
                kernel_size=3,
                stride=1,
                padding=1,
                bias=False)),
     self.drop_rate = drop_rate
Пример #3
0
    def __init__(self, n_dim: int, in_channels: int, out_channels: int,
                 merge_mode: str = 'concat', up_mode: str = 'transpose',
                 norm_layer: str = 'Batch',
                 padding_kwargs: dict = None):
        """

        Parameters
        ----------
        n_dim : int
            input dimensionality
        in_channels : int
            number of input channels
        out_channels : int
            number of output channels
        merge_mode : str
            how to merge the inputs
        up_mode : str
            how to do the upsampling
        norm_layer : str
            what kind of norm to use
        padding_kwargs : dict
            additional padding kwargs
        """
        super().__init__()

        if padding_kwargs is None:
            padding_kwargs = {}

        self.padding_kwargs = padding_kwargs

        self.n_dim = n_dim
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.merge_mode = merge_mode
        self.up_mode = up_mode

        self.upconv = _upconv2x2(self.n_dim, self.in_channels,
                                 self.out_channels,
                                 mode=self.up_mode)

        if self.merge_mode == 'concat':
            self.conv1 = _conv3x3(
                self.n_dim,
                2 * self.out_channels,
                self.out_channels)
        else:
            # num of input channels to conv2 is same
            self.conv1 = _conv3x3(self.n_dim,
                                  out_channels,
                                  self.out_channels)
        self.norm1 = NormNd(norm_layer, n_dim, self.out_channels)
        self.conv2 = _conv3x3(self.n_dim,
                              self.out_channels,
                              self.out_channels)
        self.norm2 = NormNd(norm_layer, n_dim, self.out_channels)
Пример #4
0
    def _make_layer(self,
                    block: torch.nn.Module,
                    planes: int,
                    blocks: int,
                    stride: Union[int, Sequence[int]] = 1,
                    norm_layer: str = "Batch",
                    n_dim: int = 2):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = torch.nn.Sequential(
                _conv1x1(self.inplanes,
                         planes * block.expansion,
                         stride,
                         n_dim=n_dim),
                NormNd(norm_layer, n_dim, planes * block.expansion),
            )

        layers = [
            block(self.inplanes,
                  planes,
                  stride,
                  downsample,
                  norm_layer,
                  n_dim=n_dim)
        ]
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      norm_layer=norm_layer,
                      n_dim=n_dim))

        return torch.nn.Sequential(*layers)
Пример #5
0
 def __init__(self,
              inplanes: int,
              planes: int,
              stride: Union[int, Sequence[int]] = 1,
              downsample: torch.nn.Module = None,
              norm_layer: str = "Batch",
              n_dim: int = 2):
     super().__init__()
     # Both self.conv2 and self.downsample layers downsample the input when stride != 1
     self.conv1 = _conv1x1(inplanes, planes, n_dim=n_dim)
     self.bn1 = NormNd(norm_layer, n_dim, planes)
     self.conv2 = _conv3x3(planes, planes, stride, n_dim=n_dim)
     self.bn2 = NormNd(norm_layer, n_dim, planes)
     self.conv3 = _conv1x1(planes, planes * self.expansion, n_dim=n_dim)
     self.bn3 = NormNd(norm_layer, n_dim, planes * self.expansion)
     self.relu = torch.nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Пример #6
0
    def __init__(self,
                 inplanes: int,
                 planes: int,
                 stride: Union[int, Sequence[int]] = 1,
                 downsample: torch.nn.Module = None,
                 norm_layer: str = "Batch",
                 n_dim: int = 2,
                 reduction: int = 16):
        """
        Squeeze and Excitation Basic ResNet block
        Parameters
        ----------
        inplanes : int
            number of input channels
        planes : int
            number of intermediate channels
        stride : int or tuple
            stride of first convolution
        downsample : nn.Module
            downsampling in residual path
        norm_layer : str
            type of normalisation layer
        n_dim : int
            dimensionality of convolution
        reduction : int
            reduction for squeeze and excitation layer
        """
        super().__init__()
        # Both self.conv1 and self.downsample layers downsample the input
        # when stride != 1
        self.conv1 = _conv3x3(inplanes, planes, stride, n_dim=n_dim)
        self.bn1 = NormNd(norm_layer, n_dim, planes)
        self.relu = torch.nn.ReLU(inplace=True)

        self.conv2 = _conv3x3(planes, planes, n_dim=n_dim)
        self.bn2 = NormNd(norm_layer, n_dim, planes)

        self.downsample = downsample
        self.stride = stride

        self.selayer = _SELayer(n_dim,
                                planes * self.expansion,
                                reduction=reduction)
Пример #7
0
    def __init__(self,
                 block: torch.nn.Module,
                 layers: Sequence[int],
                 num_classes: int,
                 in_channels: int,
                 zero_init_residual: bool = False,
                 norm_layer: str = "Batch",
                 n_dim: int = 2,
                 start_filts: int = 64):
        super().__init__()

        self.start_filts = start_filts
        self.inplanes = copy.deepcopy(start_filts)
        self.conv1 = ConvNd(n_dim,
                            in_channels,
                            self.inplanes,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)

        self.bn1 = NormNd(norm_layer, n_dim, self.inplanes)
        self.relu = torch.nn.ReLU(inplace=True)
        self.maxpool = PoolingNd("Max",
                                 n_dim=n_dim,
                                 kernel_size=3,
                                 stride=2,
                                 padding=1)

        num_layers = 0
        for idx, _layers in enumerate(layers):
            stride = 1 if idx == 0 else 2
            planes = min(self.start_filts * pow(2, idx), self.start_filts * 8)
            _local_layer = self._make_layer(block,
                                            planes,
                                            _layers,
                                            stride=stride,
                                            norm_layer=norm_layer,
                                            n_dim=n_dim)

            setattr(self, "layer%d" % (idx + 1), _local_layer)
            num_layers += 1

        self.num_layers = num_layers

        self.avgpool = PoolingNd("AdaptiveAvg", n_dim, 1)
        self.fc = torch.nn.Linear(self.inplanes, num_classes)
        self.reset_weights(zero_init_residual=zero_init_residual)
Пример #8
0
 def __init__(self,
              num_input_features: int,
              num_output_features: int,
              n_dim: int = 2,
              norm_type: str = "Batch"):
     super().__init__()
     self.add_module('norm', NormNd(norm_type, n_dim, num_input_features))
     self.add_module('relu', torch.nn.ReLU(inplace=True))
     self.add_module(
         'conv',
         ConvNd(n_dim,
                num_input_features,
                num_output_features,
                kernel_size=1,
                stride=1,
                bias=False))
     self.add_module('pool', PoolingNd("AdaptiveAvg", n_dim, output_size=2))
Пример #9
0
    def __init__(self,
                 in_planes,
                 out_planes,
                 kernel_size=3,
                 stride=1,
                 groups=1,
                 n_dim=2,
                 norm_type="Batch"):
        padding = (kernel_size - 1) // 2

        super().__init__(
            ConvNd(n_dim,
                   in_planes,
                   out_planes,
                   kernel_size,
                   stride,
                   padding,
                   groups=groups,
                   bias=False), NormNd(norm_type, n_dim, out_planes),
            torch.nn.ReLU6(inplace=True))
Пример #10
0
    def make_layers(cfg: Sequence[Union[int, str]], in_channels: int,
                    norm_type: str = None, n_dim: int = 2,
                    pool_type: str = "Max") -> torch.nn.Sequential:
        layers = []

        for v in cfg:
            if v == 'P':
                layers += [PoolingNd(pool_type, n_dim, kernel_size=2,
                                     stride=2)]
            else:
                _layers = [ConvNd(n_dim, in_channels, v, kernel_size=3,
                                  padding=1)]
                if norm_type is not None:
                    _layers.append(NormNd(norm_type, n_dim, v))

                _layers.append(torch.nn.ReLU(inplace=True))
                layers += _layers
                in_channels = v

        return torch.nn.Sequential(*layers)
Пример #11
0
    def __init__(self,
                 inp,
                 oup,
                 stride,
                 expand_ratio,
                 n_dim=2,
                 norm_type="Batch"):
        super().__init__()
        self.stride = stride
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = self.stride == 1 and inp == oup

        layers = []
        if expand_ratio != 1:
            # pw
            layers.append(
                _ConvNormReLU(inp,
                              hidden_dim,
                              kernel_size=1,
                              n_dim=n_dim,
                              norm_type=norm_type))
        layers.extend([
            # dw
            _ConvNormReLU(hidden_dim,
                          hidden_dim,
                          stride=stride,
                          groups=hidden_dim,
                          n_dim=n_dim,
                          norm_type=norm_type),
            # pw-linear
            ConvNd(n_dim, hidden_dim, oup, 1, 1, 0, bias=False),
            NormNd(norm_type, n_dim, oup)
        ])
        self.conv = torch.nn.Sequential(*layers)
Пример #12
0
    def __init__(self,
                 in_channels: int,
                 channels: int,
                 stride: int,
                 cardinality: int,
                 width: int,
                 n_dim: int,
                 norm_layer: str,
                 reduction: int = 16):
        """
        Squeeze and Excitation ResNeXt Block
        Parameters
        ----------
        in_channels : int
            number of input channels
        stride : int
            stride of 3x3 convolution layer
        cardinality : int
            number of convolution groups
        width : int
            width of resnext block
        n_dim : int
            dimensionality of convolutions
        norm_layer : str
            type of normalization layer
        reduction : int
            reduction for se layer
        """
        super().__init__()
        out_channels = channels * self.expansion
        if cardinality == 1:
            rc = channels
        else:
            width_ratio = channels * (width / self.start_filts)
            rc = cardinality * math.floor(width_ratio)

        self.conv_reduce = ConvNd(n_dim,
                                  in_channels,
                                  rc,
                                  kernel_size=1,
                                  stride=1,
                                  padding=0,
                                  bias=False)
        self.bn_reduce = NormNd(norm_layer, n_dim, rc)
        self.relu = torch.nn.ReLU(inplace=True)

        self.conv_conv = ConvNd(n_dim,
                                rc,
                                rc,
                                kernel_size=3,
                                stride=stride,
                                padding=1,
                                groups=cardinality,
                                bias=False)
        self.bn = NormNd(norm_layer, n_dim, rc)

        self.conv_expand = ConvNd(n_dim,
                                  rc,
                                  out_channels,
                                  kernel_size=1,
                                  stride=1,
                                  padding=0,
                                  bias=False)
        self.bn_expand = NormNd(norm_layer, n_dim, out_channels)

        self.shortcut = torch.nn.Sequential()

        if in_channels != out_channels or stride != 1:
            self.shortcut.add_module(
                'shortcut_conv',
                ConvNd(n_dim,
                       in_channels,
                       out_channels,
                       kernel_size=1,
                       stride=stride,
                       padding=0,
                       bias=False))
            self.shortcut.add_module('shortcut_bn',
                                     NormNd(norm_layer, n_dim, out_channels))

        self.selayer = _SELayer(n_dim, out_channels, reduction=reduction)
Пример #13
0
    def __init__(self,
                 num_classes: int,
                 in_channels: int,
                 growth_rate: int = 32,
                 block_config: Sequence[int] = (6, 12, 24, 16),
                 num_init_features: int = 64,
                 bn_size: int = 4,
                 drop_rate: float = 0,
                 n_dim: int = 2,
                 pool_type: str = "Max",
                 norm_type: str = "Batch"):

        super().__init__()

        # First convolution
        self.features = torch.nn.Sequential(
            OrderedDict([
                ('conv0',
                 ConvNd(n_dim,
                        in_channels,
                        num_init_features,
                        kernel_size=7,
                        stride=2,
                        padding=3,
                        bias=False)),
                ('norm0', NormNd(norm_type, n_dim, num_init_features)),
                ('relu0', torch.nn.ReLU(inplace=True)),
                ('pool0',
                 PoolingNd(pool_type,
                           n_dim,
                           kernel_size=3,
                           stride=2,
                           padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate,
                                n_dim=n_dim,
                                norm_type=norm_type)

            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate

            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2,
                                    n_dim=n_dim,
                                    norm_type=norm_type)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final norm
        self.features.add_module('norm5', NormNd(norm_type, n_dim,
                                                 num_features))

        self.pool = PoolingNd("AdaptiveAvg", n_dim, 1)

        # Linear layer
        self.classifier = torch.nn.Linear(num_features, num_classes)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, ConvNd):
                torch.nn.init.kaiming_normal_(m.conv.weight)
            elif isinstance(m, NormNd):
                if hasattr(m.norm, "weight") and m.norm.weight is not None:
                    torch.nn.init.constant_(m.norm.weight, 1)

                if hasattr(m.norm, "bias") and m.norm.bias is not None:
                    torch.nn.init.constant_(m.norm.bias, 0)
            elif isinstance(m, torch.nn.Linear):
                torch.nn.init.constant_(m.bias, 0)
Пример #14
0
    def __init__(self,
                 n_dim: int,
                 norm_layer: str,
                 in_channels: int,
                 start_filts: int,
                 mode: str = '7x7'):
        """
        Defines different sequences of start convolutions
        Parameters
        ----------
        n_dim : int
            dimensionality of convolutions
        norm_layer : str
            type of normlization layer
        in_channels : int
            number of input channels
        start_filts : int
            number of channels after first convolution
        mode : str
            either '7x7' for default configuration (7x7 conv) or 3x3 for
            three consecutive convolutions as proposed in
            https://arxiv.org/abs/1812.01187
        """
        super().__init__()
        self._in_channels = in_channels
        self._start_filts = start_filts
        self._mode = mode

        if mode == '7x7':
            self.convs = torch.nn.Sequential(*[
                ConvNd(n_dim,
                       in_channels,
                       self._start_filts,
                       kernel_size=7,
                       stride=2,
                       padding=3,
                       bias=False),
                NormNd(norm_layer, n_dim, self._start_filts)
            ])
        elif mode == '3x3':
            self.convs = torch.nn.Sequential(*[
                ConvNd(n_dim,
                       in_channels,
                       self._start_filts,
                       kernel_size=3,
                       stride=2,
                       padding=1,
                       bias=False),
                NormNd(norm_layer, n_dim, self._start_filts),
                ConvNd(n_dim,
                       self._start_filts,
                       self._start_filts,
                       kernel_size=3,
                       stride=1,
                       padding=1,
                       bias=False),
                NormNd(norm_layer, n_dim, self._start_filts),
                ConvNd(n_dim,
                       self._start_filts,
                       self._start_filts,
                       kernel_size=3,
                       stride=1,
                       padding=1,
                       bias=False),
                NormNd(norm_layer, n_dim, self._start_filts)
            ])
        else:
            raise ValueError('{} is not a supported mode!'.format(mode))